ngram
listlengths
0
82k
[ "cursor, f\"PO # : {job.name}-{STYLE}-{SPECIES}\") c.drawString( (PAGE_WIDTH / 2) +", "N/A \", height=INPUT_HEIGHT, width=30, borderWidth=0, # fillColor=([1, 1, 1]), relative=True,", "2 ) self.canv.drawCentredString( self.string_center, 0.25 * self.height, self.dwr_size ) def", "- (LEFT_MARGIN / 2) - 87, borderWidth=0, # fillColor=([1, 1,", "relative=True, ) c.acroForm.textfield( x=LEFT_MARGIN + 430, y=cursor - 4, name=\"slab_front\",", "y=cursor - 4, name=\"out_profile\", value=\" N/A \", height=INPUT_HEIGHT, width=(PAGE_WIDTH /", "* self.height, self.dr_qty ) self.canv.rect(self.size_box_x, 0, self.size_box_width, self.height) self.string_center +=", "range(0, num_of_entries): try: door_qty, door_size = door_list[i][\"qty\"], door_list[i][\"size\"] except IndexError:", "rail unless otherwise noted.', ) c.restoreState() class OrderEntry(Flowable): \"\"\"Draws table", "self.canv.drawCentredString( self.string_center, 0.25 * self.height, str(self.index) ) self.canv.setFillColorRGB(0, 0, 0)", "Front: Slab:\", ) c.acroForm.textfield( x=LEFT_MARGIN + 350, y=cursor - 4,", "LEFT_MARGIN, cursor, ) cursor -= LINE_HEIGHT c.drawString(LEFT_MARGIN, cursor, f\"Inside Profile", "def myLaterPages(c, doc): cursor = PAGE_HEIGHT - 54 c.saveState() c.setFont(\"Helvetica-Bold\",", "c.setStrokeColorRGB( BACKGROUND_COLOR[0], BACKGROUND_COLOR[1], BACKGROUND_COLOR[2] ) c.setFillColorRGB(BACKGROUND_COLOR[0], BACKGROUND_COLOR[1], BACKGROUND_COLOR[2]) c.rect( LEFT_MARGIN,", "- 4, name=\"drawer_fronts\", value=\" N/A \", height=INPUT_HEIGHT, width=(PAGE_WIDTH / 2)", "cursor, f\"Door Style : {STYLE}\") c.setFont(\"Helvetica\", 12) c.drawString( (PAGE_WIDTH /", "= defaultPageSize[1] PAGE_WIDTH = defaultPageSize[0] LEFT_MARGIN = 30 LINE_HEIGHT =", "- LEFT_MARGIN - (LEFT_MARGIN / 2) - 87, borderWidth=0, #", "IndexError: drawer_qty, drawer_size = \"\", \"\" p = OrderEntry( xoffset=-50,", "1]), relative=True, ) c.drawString((PAGE_WIDTH / 2) + (LEFT_MARGIN / 2),", "drawer_size = \"\", \"\" p = OrderEntry( xoffset=-50, dr_qty=door_qty, dr_size=door_size,", "cursor, 170, 20, fill=1) c.setFont(\"Helvetica-Bold\", 12) c.setFillColorRGB(1, 1, 1) string_center", "horizontal center rail unless otherwise noted.', ) c.restoreState() def myLaterPages(c,", "\"Comments : \") cursor -= LINE_HEIGHT c.drawString(LEFT_MARGIN, cursor, f\"Wood Type", "import SimpleDocTemplate, Spacer from reportlab.rl_config import defaultPageSize from reportlab.lib.units import", "PAGE_HEIGHT - 60 INPUT_HEIGHT = LINE_HEIGHT - (LINE_HEIGHT * 0.1)", "c.drawString( (PAGE_WIDTH / 2) + (LEFT_MARGIN / 2), cursor, \"Delivery", "\", height=INPUT_HEIGHT, width=(PAGE_WIDTH / 2) - LEFT_MARGIN - (LEFT_MARGIN /", "c.restoreState() class OrderEntry(Flowable): \"\"\"Draws table entry for each item in", ": 901-853-7568\" ) cursor -= LINE_HEIGHT c.drawString(LEFT_MARGIN, cursor, f\"Panel :", "c.setFont(\"Helvetica\", 12) c.setFillColorRGB(0, 0, 0) c.drawString(LEFT_MARGIN, cursor, f\"Customer : JS", "115 c.drawCentredString(string_center, cursor + 5, \"Width X Height\") string_center +=", "door_style.outside_profile TOTAL_DRS = len(doors) TOTAL_DWRS = len(drawers) def myFirstPage(c, doc):", "relative=True, ) c.drawString((PAGE_WIDTH / 2) + (LEFT_MARGIN / 2), cursor,", "BACKGROUND_COLOR[1], BACKGROUND_COLOR[2]) c.rect(LEFT_MARGIN + 38, cursor, 60, 20, fill=1) c.rect(LEFT_MARGIN", "dr_size self.dwr_qty = dwr_qty self.dwr_size = dwr_size self.index = index", "/ 2) + (LEFT_MARGIN / 2), cursor, f\" 5 PC", "from reportlab.platypus.flowables import Flowable def generate_order(job, path, door_style, doors=[], drawers=[]):", "f\"Total Drawer Fronts: {TOTAL_DWRS}\" ) cursor -= 24 c.setStrokeColorRGB(0, 0,", "door_size = door_list[i][\"qty\"], door_list[i][\"size\"] except IndexError: door_qty, door_size = \"\",", "(LEFT_MARGIN / 2), cursor, \"Delivery Date : ASAP\" ) cursor", "f\"Boring not available in arched doors, applied mould doors\", )", "255) CURSOR_HEIGHT = PAGE_HEIGHT - 60 INPUT_HEIGHT = LINE_HEIGHT -", "c.drawString( (PAGE_WIDTH / 2) + (LEFT_MARGIN / 2), cursor, f\"", "self.canv.drawCentredString( self.string_center, 0.25 * self.height, self.dwr_size ) def build_pdf(path, name,", "1]), relative=True, ) cursor -= 12 c.setFont(\"Times-Italic\", 10) c.drawString( LEFT_MARGIN,", "\"Doors\") c.drawCentredString(((PAGE_WIDTH / 4) * 3) + 10, cursor, \"Drawer", "door_style.inside_profile OUTSIDE_PROFILE = door_style.outside_profile TOTAL_DRS = len(doors) TOTAL_DWRS = len(drawers)", "/ 2) - 98, # borderWidth=0, # # fillColor=([1, 1,", "# width=(PAGE_WIDTH / 2) - LEFT_MARGIN - (LEFT_MARGIN / 2)", "in height will automatically receive a horizontal center rail unless", "c.acroForm.textfield( x=LEFT_MARGIN + 370, y=cursor - 4, name=\"out_profile\", value=\" N/A", "cursor -= LINE_HEIGHT c.drawString(LEFT_MARGIN, cursor, f\"Boring For Hinges : No\")", "): Flowable.__init__(self) self.dr_qty = dr_qty self.dr_size = dr_size self.dwr_qty =", "0, 0) self.canv.rect(self.qty_box_x, 0, self.qty_box_width, self.height) self.string_center += (self.idx_box_width /", "self.height) self.string_center += (self.idx_box_width / 2) + (self.qty_box_width / 2)", "+ 68 c.drawCentredString(string_center, cursor + 5, \"Qty\") string_center += 115", "2), cursor, \"Comments : \") cursor -= LINE_HEIGHT c.drawString(LEFT_MARGIN, cursor,", "80 / 255, 156 / 255) CURSOR_HEIGHT = PAGE_HEIGHT -", "= SimpleDocTemplate(f\"{path}/{name}-{STYLE}.pdf\") Story = [Spacer(1, 3.11 * inch)] num_of_doors =", "PAGE_WIDTH / 2, 40, f\"Page 1 of {job.name}-{STYLE}-{SPECIES}\" ) c.drawCentredString(", "c.drawString(LEFT_MARGIN, cursor, f\"Panel : \") c.acroForm.textfield( x=LEFT_MARGIN + 40, y=cursor", "/ 255, 156 / 255) CURSOR_HEIGHT = PAGE_HEIGHT - 60", "LINE_HEIGHT c.drawString(LEFT_MARGIN, cursor, f\"Add Hinges : No\") c.drawString( (PAGE_WIDTH /", "index self.height = height self.idx_box_x = xoffset self.idx_box_width = 40", "self.idx_box_x = xoffset self.idx_box_width = 40 self.string_center = xoffset +", "LINE_HEIGHT c.drawString(LEFT_MARGIN, cursor, f\"Panel : \") c.acroForm.textfield( x=LEFT_MARGIN + 40,", "1, ) Story.append(p) doc.build(Story, onFirstPage=myFirstPage, onLaterPages=myLaterPages) build_pdf(path, job.name, doors, drawers)", "x=LEFT_MARGIN + 40, y=cursor - 4, name=\"Panel\", value=\" N/A \",", "self.string_center, 0.25 * self.height, str(self.index) ) self.canv.setFillColorRGB(0, 0, 0) self.canv.rect(self.qty_box_x,", "inch)] num_of_doors = len(door_list) num_of_drawers = len(drawer_list) num_of_entries = max(num_of_doors,", "c.drawString(LEFT_MARGIN, cursor, f\"Stile/Rails : \") c.acroForm.textfield( x=LEFT_MARGIN + 62, y=cursor", "c.drawString((PAGE_WIDTH / 2) + (LEFT_MARGIN / 2), cursor, f\"Drawer Fronts", "308, cursor, 60, 20, fill=1) c.rect(LEFT_MARGIN + 368, cursor, 170,", "cursor, f\"Order Date : {job.order_date}\", ) cursor -= LINE_HEIGHT c.drawString(LEFT_MARGIN,", "self.size_box_x, 0, self.size_box_width, self.height, ) self.string_center += (self.qty_box_width / 2)", "num_of_drawers = len(drawer_list) num_of_entries = max(num_of_doors, num_of_drawers) for i in", "height self.idx_box_x = xoffset self.idx_box_width = 40 self.string_center = xoffset", "{TOTAL_DWRS}\" ) cursor -= 24 c.setStrokeColorRGB(0, 0, 0) c.setFillColorRGB(BACKGROUND_COLOR[0], BACKGROUND_COLOR[1],", "self.dwr_size ) def build_pdf(path, name, door_list, drawer_list): doc = SimpleDocTemplate(f\"{path}/{name}-{STYLE}.pdf\")", "= len(drawers) def myFirstPage(c, doc): cursor = CURSOR_HEIGHT c.saveState() c.setStrokeColorRGB(", "= 18 BACKGROUND_COLOR = (33 / 255, 80 / 255,", "0.25 * self.height, self.dwr_qty, ) self.canv.rect( self.second_column_offset + self.size_box_x, 0,", "+ 10, cursor, \"Drawer Fronts\") cursor -= 24 c.setStrokeColorRGB(0, 0,", "68 c.drawCentredString(string_center, cursor + 5, \"Qty\") string_center += 115 c.drawCentredString(string_center,", "if self.dwr_qty != \"\" and self.dwr_size != \"\": self.canv.rect( self.second_column_offset", "- LEFT_MARGIN - (LEFT_MARGIN / 2) - 98, # borderWidth=0,", "Fronts\") cursor -= 24 c.setStrokeColorRGB(0, 0, 0) c.setFillColorRGB(BACKGROUND_COLOR[0], BACKGROUND_COLOR[1], BACKGROUND_COLOR[2])", "* self.height, str(self.index) ) self.canv.setFillColorRGB(0, 0, 0) self.canv.rect(self.qty_box_x, 0, self.qty_box_width,", "87, borderWidth=0, # fillColor=([1, 1, 1]), relative=True, ) cursor -=", "0) self.canv.setFillColorRGB( BACKGROUND_COLOR[0], BACKGROUND_COLOR[1], BACKGROUND_COLOR[2] ) self.canv.rect(self.idx_box_x, 0, self.idx_box_width, self.height,", "will not bore any door with 2\" stiles' ) cursor", "dwr_qty=\"\", dwr_size=\"\", index=0, ): Flowable.__init__(self) self.dr_qty = dr_qty self.dr_size =", "c.restoreState() def myLaterPages(c, doc): cursor = PAGE_HEIGHT - 54 c.saveState()", "import defaultPageSize from reportlab.lib.units import inch from reportlab.platypus.flowables import Flowable", "/ 2), cursor, f\"Outside Profile : \" ) c.acroForm.textfield( x=LEFT_MARGIN", "\"Delivery Date : ASAP\" ) cursor -= LINE_HEIGHT c.setFont(\"Helvetica-Bold\", 12)", "Height\") string_center += 155 c.drawCentredString(string_center, cursor + 5, \"Qty\") string_center", "+ (LEFT_MARGIN / 2), cursor, f\" 5 PC Front: Slab:\",", "- 4, # name=\"inside_profile\", # value=\" N/A \", # height=INPUT_HEIGHT,", "c.acroForm.textfield( x=LEFT_MARGIN + 62, y=cursor - 4, name=\"stiles_rails\", value=\" N/A", "c.saveState() c.setFont(\"Helvetica-Bold\", 14) c.setFillColorRGB(BACKGROUND_COLOR[0], BACKGROUND_COLOR[1], BACKGROUND_COLOR[2]) c.drawCentredString((PAGE_WIDTH / 4) +", "0, 0) c.drawString(LEFT_MARGIN, cursor, f\"Customer : JS Designs Shop, LLC\")", "c.rect(LEFT_MARGIN + 308, cursor, 60, 20, fill=1) c.rect(LEFT_MARGIN + 368,", "10 self.size_box_width = 170 self.second_column_offset = 270 def draw(self): #", "self.qty_box_width, self.height, ) self.string_center += 155 self.canv.drawCentredString( self.string_center, 0.25 *", "PAGE_WIDTH - (LEFT_MARGIN * 2), 24, fill=1 ) c.setFillColorRGB(1, 1,", "cursor -= 24 c.setStrokeColorRGB(0, 0, 0) c.setFillColorRGB(BACKGROUND_COLOR[0], BACKGROUND_COLOR[1], BACKGROUND_COLOR[2]) c.rect(LEFT_MARGIN", "0) c.drawCentredString( PAGE_WIDTH / 2, 40, f\"Page 1 of {job.name}-{STYLE}-{SPECIES}\"", "BACKGROUND_COLOR[1], BACKGROUND_COLOR[2] ) self.canv.rect(self.idx_box_x, 0, self.idx_box_width, self.height, fill=1) self.canv.setFillColorRGB(1, 1,", "LINE_HEIGHT - (LINE_HEIGHT * 0.1) SPECIES = door_style.species STYLE =", "= height self.idx_box_x = xoffset self.idx_box_width = 40 self.string_center =", "doc): cursor = PAGE_HEIGHT - 54 c.saveState() c.setFont(\"Helvetica-Bold\", 14) c.setFillColorRGB(BACKGROUND_COLOR[0],", "- LEFT_MARGIN, cursor, ) cursor -= LINE_HEIGHT c.drawString(LEFT_MARGIN, cursor, f\"Stile/Rails", "255, 80 / 255, 156 / 255) CURSOR_HEIGHT = PAGE_HEIGHT", "Type : {SPECIES}\") c.line( (PAGE_WIDTH / 2) + (LEFT_MARGIN /", "dr_size=\"\", dwr_qty=\"\", dwr_size=\"\", index=0, ): Flowable.__init__(self) self.dr_qty = dr_qty self.dr_size", "5, \"Qty\") string_center += 115 c.drawCentredString(string_center, cursor + 5, \"Width", "(LEFT_MARGIN / 2) - 87, borderWidth=0, # fillColor=([1, 1, 1]),", "self.idx_box_width + xoffset self.qty_box_width = 60 self.size_box_x = self.qty_box_width -", "370, y=cursor - 4, name=\"out_profile\", value=\" N/A \", height=INPUT_HEIGHT, width=(PAGE_WIDTH", "\"Qty\") string_center += 115 c.drawCentredString(string_center, cursor + 5, \"Width X", "name=\"inside_profile\", # value=\" N/A \", # height=INPUT_HEIGHT, # width=(PAGE_WIDTH /", "self.second_column_offset = 270 def draw(self): # Door self.canv.setStrokeColorRGB(0, 0, 0)", "INPUT_HEIGHT = LINE_HEIGHT - (LINE_HEIGHT * 0.1) SPECIES = door_style.species", "60 self.size_box_x = self.qty_box_width - 10 self.size_box_width = 170 self.second_column_offset", "door_style, doors=[], drawers=[]): PAGE_HEIGHT = defaultPageSize[1] PAGE_WIDTH = defaultPageSize[0] LEFT_MARGIN", "dr_size=door_size, dwr_qty=drawer_qty, dwr_size=drawer_size, index=i + 1, ) Story.append(p) doc.build(Story, onFirstPage=myFirstPage,", "12) c.drawString( (PAGE_WIDTH / 2) + (LEFT_MARGIN / 2), cursor,", ") self.string_center += 155 self.canv.drawCentredString( self.string_center, 0.25 * self.height, self.dwr_qty,", "{job.name}-{STYLE}-{SPECIES}\" ) c.drawCentredString( PAGE_WIDTH / 2, 20, 'Reminder : Any", "0.25 * self.height, self.dwr_size ) def build_pdf(path, name, door_list, drawer_list):", "self.string_center += 155 self.canv.drawCentredString( self.string_center, 0.25 * self.height, self.dwr_qty, )", ": ASAP\" ) cursor -= LINE_HEIGHT c.setFont(\"Helvetica-Bold\", 12) c.drawString(LEFT_MARGIN, cursor,", "- 92, borderWidth=0, # fillColor=([1, 1, 1]), relative=True, ) c.setFont(\"Helvetica\",", "/ 2) + (LEFT_MARGIN / 2), cursor, f\"Drawer Fronts :", "cursor, f\"Wood Type : {SPECIES}\") c.line( (PAGE_WIDTH / 2) +", "self, xoffset=0, height=20, dr_qty=\"\", dr_size=\"\", dwr_qty=\"\", dwr_size=\"\", index=0, ): Flowable.__init__(self)", "0, self.size_box_width, self.height) self.string_center += (self.qty_box_width / 2) + (self.size_box_width", "1, 1) c.setFont(\"Helvetica-Bold\", 16) c.drawCentredString(PAGE_WIDTH / 2.0, PAGE_HEIGHT - 34,", "- 87, borderWidth=0, # fillColor=([1, 1, 1]), relative=True, ) cursor", "0) c.setFillColorRGB(BACKGROUND_COLOR[0], BACKGROUND_COLOR[1], BACKGROUND_COLOR[2]) c.rect(LEFT_MARGIN + 38, cursor, 60, 20,", "unless otherwise noted.', ) c.restoreState() def myLaterPages(c, doc): cursor =", "2) - LEFT_MARGIN - (LEFT_MARGIN / 2) - 108, #", "height=INPUT_HEIGHT, width=30, borderWidth=0, # fillColor=([1, 1, 1]), relative=True, ) c.acroForm.textfield(", "170, 20, fill=1) c.rect(LEFT_MARGIN + 308, cursor, 60, 20, fill=1)", "2) - LEFT_MARGIN - (LEFT_MARGIN / 2) - 87, borderWidth=0,", "fillColor=([1, 1, 1]), relative=True, ) c.acroForm.textfield( x=LEFT_MARGIN + 430, y=cursor", "18 BACKGROUND_COLOR = (33 / 255, 80 / 255, 156", "self.dwr_size != \"\": self.canv.rect( self.second_column_offset + self.qty_box_x, 0, self.qty_box_width, self.height,", "Designs Shop, LLC\") c.drawString( (PAGE_WIDTH / 2) + (LEFT_MARGIN /", "1, 1]), relative=True, ) c.acroForm.textfield( x=LEFT_MARGIN + 430, y=cursor -", "BACKGROUND_COLOR[2]) c.drawCentredString((PAGE_WIDTH / 4) + 30, cursor, f\"Total Doors: {TOTAL_DRS}\")", "+ self.qty_box_x, 0, self.qty_box_width, self.height, ) self.string_center += 155 self.canv.drawCentredString(", "= max(num_of_doors, num_of_drawers) for i in range(0, num_of_entries): try: door_qty,", "door_style.name INSIDE_PROFILE = door_style.inside_profile OUTSIDE_PROFILE = door_style.outside_profile TOTAL_DRS = len(doors)", "noted.', ) c.restoreState() class OrderEntry(Flowable): \"\"\"Draws table entry for each", "350, y=cursor - 4, name=\"5_pc_front\", value=\" N/A \", height=INPUT_HEIGHT, width=30,", "155 c.drawCentredString(string_center, cursor + 5, \"Qty\") string_center += 115 c.drawCentredString(string_center,", "(LEFT_MARGIN / 2) - 98, # borderWidth=0, # # fillColor=([1,", "cursor + 5, \"Width X Height\") string_center += 155 c.drawCentredString(string_center,", "dwr_qty self.dwr_size = dwr_size self.index = index self.height = height", "and self.dwr_size != \"\": self.canv.rect( self.second_column_offset + self.qty_box_x, 0, self.qty_box_width,", "= xoffset + (self.idx_box_width / 2) self.qty_box_x = self.idx_box_width +", "cursor, ) cursor -= LINE_HEIGHT c.drawString(LEFT_MARGIN, cursor, f\"Stile/Rails : \")", "156 / 255) CURSOR_HEIGHT = PAGE_HEIGHT - 60 INPUT_HEIGHT =", "+ (LEFT_MARGIN / 2), cursor, f\"Drawer Fronts : \") c.acroForm.textfield(", "0.25 * self.height, self.dr_qty ) self.canv.rect(self.size_box_x, 0, self.size_box_width, self.height) self.string_center", "-= 12 c.setFont(\"Times-Italic\", 10) c.drawString( LEFT_MARGIN, cursor, f\"Boring not available", "f\"Outside Profile : {OUTSIDE_PROFILE}\") # c.acroForm.textfield( # x=LEFT_MARGIN + 88,", "(self.idx_box_width / 2) + (self.qty_box_width / 2) self.canv.drawCentredString( self.string_center, 0.25", "- 40, PAGE_WIDTH - (LEFT_MARGIN * 2), 24, fill=1 )", "24 c.setStrokeColorRGB(0, 0, 0) c.setFillColorRGB(BACKGROUND_COLOR[0], BACKGROUND_COLOR[1], BACKGROUND_COLOR[2]) c.rect(LEFT_MARGIN + 38,", "LEFT_MARGIN, cursor, ) cursor -= LINE_HEIGHT c.drawString(LEFT_MARGIN, cursor, f\"Outside Profile", "c.setFillColorRGB(0, 0, 0) c.drawString(LEFT_MARGIN, cursor, f\"Customer : JS Designs Shop,", "x=LEFT_MARGIN + 370, y=cursor - 4, name=\"out_profile\", value=\" N/A \",", "self.qty_box_x, 0, self.qty_box_width, self.height, ) self.string_center += 155 self.canv.drawCentredString( self.string_center,", "# Drawer if self.dwr_qty != \"\" and self.dwr_size != \"\":", "Flowable def generate_order(job, path, door_style, doors=[], drawers=[]): PAGE_HEIGHT = defaultPageSize[1]", "5 PC Front: Slab:\", ) c.acroForm.textfield( x=LEFT_MARGIN + 350, y=cursor", "PAGE_HEIGHT - 40, PAGE_WIDTH - (LEFT_MARGIN * 2), 24, fill=1", "10) c.drawString( LEFT_MARGIN, cursor, f\"Boring not available in arched doors,", "40, f\"Page 1 of {job.name}-{STYLE}-{SPECIES}\" ) c.drawCentredString( PAGE_WIDTH / 2,", "+ (LEFT_MARGIN / 2), cursor, f\"Outside Profile : \" )", "- LEFT_MARGIN, cursor, ) cursor -= LINE_HEIGHT c.drawString(LEFT_MARGIN, cursor, f\"Inside", "def draw(self): # Door self.canv.setStrokeColorRGB(0, 0, 0) self.canv.setFillColorRGB( BACKGROUND_COLOR[0], BACKGROUND_COLOR[1],", "LEFT_MARGIN + 68 c.drawCentredString(string_center, cursor + 5, \"Qty\") string_center +=", "+ (self.size_box_width / 2) self.canv.drawCentredString( self.string_center, 0.25 * self.height, self.dr_size", "\"\"\"Draws table entry for each item in list of door", "cursor, f\"and raised bead profile mitered doors\", ) cursor -=", "1, 1]), relative=True, ) c.setFont(\"Helvetica\", 12) cursor -= LINE_HEIGHT c.drawString(LEFT_MARGIN,", "f\"Page {doc.page} of {job.name}-{STYLE}-{SPECIES}\" ) c.drawCentredString( PAGE_WIDTH / 2, 20,", "c.drawString(LEFT_MARGIN, cursor, f\"Inside Profile : {INSIDE_PROFILE}\") # c.acroForm.textfield( # x=LEFT_MARGIN", "IndexError: door_qty, door_size = \"\", \"\" try: drawer_qty, drawer_size =", "# fillColor=([1, 1, 1]), relative=True, ) c.drawString((PAGE_WIDTH / 2) +", "dwr_size=\"\", index=0, ): Flowable.__init__(self) self.dr_qty = dr_qty self.dr_size = dr_size", "24, fill=1 ) c.setFillColorRGB(1, 1, 1) c.setFont(\"Helvetica-Bold\", 16) c.drawCentredString(PAGE_WIDTH /", "c.setFillColorRGB(1, 1, 1) c.setFont(\"Helvetica-Bold\", 16) c.drawCentredString(PAGE_WIDTH / 2.0, PAGE_HEIGHT -", "= len(doors) TOTAL_DWRS = len(drawers) def myFirstPage(c, doc): cursor =", "f\"PO # : {job.name}-{STYLE}-{SPECIES}\") c.drawString( (PAGE_WIDTH / 2) + (LEFT_MARGIN", "fill=1 ) c.setFillColorRGB(1, 1, 1) c.setFont(\"Helvetica-Bold\", 16) c.drawCentredString(PAGE_WIDTH / 2.0,", "12) c.setFillColorRGB(0, 0, 0) c.drawString(LEFT_MARGIN, cursor, f\"Customer : JS Designs", "2) - 60, borderWidth=0, # fillColor=([1, 1, 1]), relative=True, )", "2) - LEFT_MARGIN - (LEFT_MARGIN / 2) - 98, #", "raised bead profile mitered doors\", ) cursor -= 14 c.setFont(\"Times-BoldItalic\",", "self.height, self.dwr_size ) def build_pdf(path, name, door_list, drawer_list): doc =", "y=cursor - 4, # name=\"inside_profile\", # value=\" N/A \", #", "2), cursor, f\"Outside Profile : \" ) c.acroForm.textfield( x=LEFT_MARGIN +", "arched doors, applied mould doors\", ) cursor -= 10 c.drawString(", "f\"Customer : JS Designs Shop, LLC\") c.drawString( (PAGE_WIDTH / 2)", "index=i + 1, ) Story.append(p) doc.build(Story, onFirstPage=myFirstPage, onLaterPages=myLaterPages) build_pdf(path, job.name,", "inch from reportlab.platypus.flowables import Flowable def generate_order(job, path, door_style, doors=[],", "108, # borderWidth=0, # # fillColor=([1, 1, 1]), # relative=True,", "1) c.setFont(\"Helvetica-Bold\", 16) c.drawCentredString(PAGE_WIDTH / 2.0, PAGE_HEIGHT - 34, \"DOOR", "cursor, 60, 20, fill=1) c.rect(LEFT_MARGIN + 98, cursor, 170, 20,", "9) c.setFillColorRGB(0, 0, 0) c.drawCentredString( PAGE_WIDTH / 2, 40, f\"Page", "self.qty_box_width = 60 self.size_box_x = self.qty_box_width - 10 self.size_box_width =", "+ (LEFT_MARGIN / 2), cursor, PAGE_WIDTH - LEFT_MARGIN, cursor, )", "fillColor=([1, 1, 1]), relative=True, ) c.drawString((PAGE_WIDTH / 2) + (LEFT_MARGIN", "60, borderWidth=0, # fillColor=([1, 1, 1]), relative=True, ) c.drawString((PAGE_WIDTH /", "fillColor=([1, 1, 1]), relative=True, ) c.setFont(\"Helvetica\", 12) cursor -= LINE_HEIGHT", "\"Drawer Fronts\") cursor -= 24 c.setStrokeColorRGB(0, 0, 0) c.setFillColorRGB(BACKGROUND_COLOR[0], BACKGROUND_COLOR[1],", "self.size_box_width = 170 self.second_column_offset = 270 def draw(self): # Door", "c.setStrokeColorRGB(0, 0, 0) c.setFillColorRGB(BACKGROUND_COLOR[0], BACKGROUND_COLOR[1], BACKGROUND_COLOR[2]) c.rect(LEFT_MARGIN + 38, cursor,", "OUTSIDE_PROFILE = door_style.outside_profile TOTAL_DRS = len(doors) TOTAL_DWRS = len(drawers) def", "door_list[i][\"size\"] except IndexError: door_qty, door_size = \"\", \"\" try: drawer_qty,", "+ (self.idx_box_width / 2) self.qty_box_x = self.idx_box_width + xoffset self.qty_box_width", "fill=1) self.canv.setFillColorRGB(1, 1, 1) self.canv.setFont(\"Helvetica\", 12) self.canv.drawCentredString( self.string_center, 0.25 *", "\"Width X Height\") c.setFont(\"Helvetica\", 9) c.setFillColorRGB(0, 0, 0) c.drawCentredString( PAGE_WIDTH", "+= (self.qty_box_width / 2) + (self.size_box_width / 2) self.canv.drawCentredString( self.string_center,", "LEFT_MARGIN, PAGE_HEIGHT - 40, PAGE_WIDTH - (LEFT_MARGIN * 2), 24,", "cursor, f\"Customer : JS Designs Shop, LLC\") c.drawString( (PAGE_WIDTH /", "\" ) c.acroForm.textfield( x=LEFT_MARGIN + 370, y=cursor - 4, name=\"out_profile\",", "(LEFT_MARGIN / 2) - 60, borderWidth=0, # fillColor=([1, 1, 1]),", "height=INPUT_HEIGHT, # width=(PAGE_WIDTH / 2) - LEFT_MARGIN - (LEFT_MARGIN /", "INSIDE_PROFILE = door_style.inside_profile OUTSIDE_PROFILE = door_style.outside_profile TOTAL_DRS = len(doors) TOTAL_DWRS", "/ 2) - 108, # borderWidth=0, # # fillColor=([1, 1,", "f\"Door Style : {STYLE}\") c.setFont(\"Helvetica\", 12) c.drawString( (PAGE_WIDTH / 2)", "- LEFT_MARGIN - (LEFT_MARGIN / 2) - 92, borderWidth=0, #", "f'Cullman will not bore any door with 2\" stiles' )", "self.idx_box_width, self.height, fill=1) self.canv.setFillColorRGB(1, 1, 1) self.canv.setFont(\"Helvetica\", 12) self.canv.drawCentredString( self.string_center,", "\"\" try: drawer_qty, drawer_size = drawer_list[i][\"qty\"], drawer_list[i][\"size\"] except IndexError: drawer_qty,", "/ 2) + (LEFT_MARGIN / 2), cursor, \"Comments : \")", "stiles' ) cursor -= 20 c.setFont(\"Helvetica-Bold\", 14) c.setFillColorRGB(BACKGROUND_COLOR[0], BACKGROUND_COLOR[1], BACKGROUND_COLOR[2])", "1]), relative=True, ) c.setFont(\"Helvetica\", 12) cursor -= LINE_HEIGHT c.drawString(LEFT_MARGIN, cursor,", "self.height, fill=1) self.canv.setFillColorRGB(1, 1, 1) self.canv.setFont(\"Helvetica\", 12) self.canv.drawCentredString( self.string_center, 0.25", "self.canv.setFillColorRGB(1, 1, 1) self.canv.setFont(\"Helvetica\", 12) self.canv.drawCentredString( self.string_center, 0.25 * self.height,", "mould doors\", ) cursor -= 10 c.drawString( LEFT_MARGIN, cursor, f\"and", ": \") cursor -= LINE_HEIGHT c.drawString(LEFT_MARGIN, cursor, f\"Wood Type :", "myFirstPage(c, doc): cursor = CURSOR_HEIGHT c.saveState() c.setStrokeColorRGB( BACKGROUND_COLOR[0], BACKGROUND_COLOR[1], BACKGROUND_COLOR[2]", "/ 2) self.canv.drawCentredString( self.string_center, 0.25 * self.height, self.dr_size ) #", "drawer_qty, drawer_size = \"\", \"\" p = OrderEntry( xoffset=-50, dr_qty=door_qty,", "f\"Boring For Hinges : No\") c.drawString( (PAGE_WIDTH / 2) +", "self.canv.rect(self.size_box_x, 0, self.size_box_width, self.height) self.string_center += (self.qty_box_width / 2) +", "ASAP\" ) cursor -= LINE_HEIGHT c.setFont(\"Helvetica-Bold\", 12) c.drawString(LEFT_MARGIN, cursor, f\"Door", "c.drawCentredString( PAGE_WIDTH / 2, 40, f\"Page {doc.page} of {job.name}-{STYLE}-{SPECIES}\" )", "fill=1) c.setFont(\"Helvetica-Bold\", 12) c.setFillColorRGB(1, 1, 1) string_center = LEFT_MARGIN +", "+ 88, # y=cursor - 4, # name=\"outside_profile\", # value=\"", "12) c.drawString((PAGE_WIDTH / 2) + (LEFT_MARGIN / 2), cursor, f\"Drawer", "cursor + 5, \"Qty\") string_center += 115 c.drawCentredString(string_center, cursor +", "= door_style.outside_profile TOTAL_DRS = len(doors) TOTAL_DWRS = len(drawers) def myFirstPage(c,", "2) + (LEFT_MARGIN / 2), cursor, f\"Order Date : {job.order_date}\",", "of door sizes.\"\"\" def __init__( self, xoffset=0, height=20, dr_qty=\"\", dr_size=\"\",", "self.height, self.dr_qty ) self.canv.rect(self.size_box_x, 0, self.size_box_width, self.height) self.string_center += (self.qty_box_width", "drawers=[]): PAGE_HEIGHT = defaultPageSize[1] PAGE_WIDTH = defaultPageSize[0] LEFT_MARGIN = 30", "not bore any door with 2\" stiles' ) cursor -=", "self.qty_box_width, self.height) self.string_center += (self.idx_box_width / 2) + (self.qty_box_width /", "profile mitered doors\", ) cursor -= 14 c.setFont(\"Times-BoldItalic\", 12) c.drawString(", "f\"Add Hinges : No\") c.drawString( (PAGE_WIDTH / 2) + (LEFT_MARGIN", "c.drawString(LEFT_MARGIN, cursor, f\"PO # : {job.name}-{STYLE}-{SPECIES}\") c.drawString( (PAGE_WIDTH / 2)", "= dr_qty self.dr_size = dr_size self.dwr_qty = dwr_qty self.dwr_size =", "and over in height will automatically receive a horizontal center", ": {OUTSIDE_PROFILE}\") # c.acroForm.textfield( # x=LEFT_MARGIN + 88, # y=cursor", "doc): cursor = CURSOR_HEIGHT c.saveState() c.setStrokeColorRGB( BACKGROUND_COLOR[0], BACKGROUND_COLOR[1], BACKGROUND_COLOR[2] )", "c.drawString(LEFT_MARGIN, cursor, f\"Add Hinges : No\") c.drawString( (PAGE_WIDTH / 2)", ": \") c.acroForm.textfield( x=LEFT_MARGIN + 375, y=cursor - 4, name=\"drawer_fronts\",", "+= (self.qty_box_width / 2) + ( self.size_box_width / 2 )", "self.dr_qty ) self.canv.rect(self.size_box_x, 0, self.size_box_width, self.height) self.string_center += (self.qty_box_width /", "2, 20, 'Reminder : Any doors 46\" and over in", "self.canv.setStrokeColorRGB(0, 0, 0) self.canv.setFillColorRGB( BACKGROUND_COLOR[0], BACKGROUND_COLOR[1], BACKGROUND_COLOR[2] ) self.canv.rect(self.idx_box_x, 0,", "(LEFT_MARGIN / 2), cursor, PAGE_WIDTH - LEFT_MARGIN, cursor, ) cursor", "(self.size_box_width / 2) self.canv.drawCentredString( self.string_center, 0.25 * self.height, self.dr_size )", "c.drawString((PAGE_WIDTH / 2) + (LEFT_MARGIN / 2), cursor, \"Comments :", "door_list, drawer_list): doc = SimpleDocTemplate(f\"{path}/{name}-{STYLE}.pdf\") Story = [Spacer(1, 3.11 *", "= xoffset self.idx_box_width = 40 self.string_center = xoffset + (self.idx_box_width", "LINE_HEIGHT c.drawString(LEFT_MARGIN, cursor, f\"PO # : {job.name}-{STYLE}-{SPECIES}\") c.drawString( (PAGE_WIDTH /", "430, y=cursor - 4, name=\"slab_front\", value=\" N/A \", height=INPUT_HEIGHT, width=30,", "c.setFont(\"Times-Italic\", 10) c.drawString( LEFT_MARGIN, cursor, f\"Boring not available in arched", "98, cursor, 170, 20, fill=1) c.rect(LEFT_MARGIN + 308, cursor, 60,", "cursor + 5, \"Width X Height\") c.setFont(\"Helvetica\", 9) c.setFillColorRGB(0, 0,", "LINE_HEIGHT c.drawString(LEFT_MARGIN, cursor, f\"Inside Profile : {INSIDE_PROFILE}\") # c.acroForm.textfield( #", "- 4, name=\"Panel\", value=\" N/A \", height=INPUT_HEIGHT, width=(PAGE_WIDTH / 2)", "LINE_HEIGHT c.drawString(LEFT_MARGIN, cursor, f\"Stile/Rails : \") c.acroForm.textfield( x=LEFT_MARGIN + 62,", "relative=True, ) cursor -= LINE_HEIGHT c.drawString(LEFT_MARGIN, cursor, f\"Add Hinges :", "doors 46\" and over in height will automatically receive a", "+ (LEFT_MARGIN / 2), cursor, \"Delivery Date : ASAP\" )", "4, # name=\"inside_profile\", # value=\" N/A \", # height=INPUT_HEIGHT, #", "LEFT_MARGIN - (LEFT_MARGIN / 2) - 92, borderWidth=0, # fillColor=([1,", "import Flowable def generate_order(job, path, door_style, doors=[], drawers=[]): PAGE_HEIGHT =", "x=LEFT_MARGIN + 350, y=cursor - 4, name=\"5_pc_front\", value=\" N/A \",", "/ 2) + (LEFT_MARGIN / 2), cursor, \"Phone : 901-853-7568\"", "/ 2) self.qty_box_x = self.idx_box_width + xoffset self.qty_box_width = 60", "* 0.1) SPECIES = door_style.species STYLE = door_style.name INSIDE_PROFILE =", "40, PAGE_WIDTH - (LEFT_MARGIN * 2), 24, fill=1 ) c.setFillColorRGB(1,", "list of door sizes.\"\"\" def __init__( self, xoffset=0, height=20, dr_qty=\"\",", "c.acroForm.textfield( # x=LEFT_MARGIN + 88, # y=cursor - 4, #", "40, f\"Page {doc.page} of {job.name}-{STYLE}-{SPECIES}\" ) c.drawCentredString( PAGE_WIDTH / 2,", "3) + 10, cursor, \"Drawer Fronts\") cursor -= 24 c.setStrokeColorRGB(0,", "4, name=\"stiles_rails\", value=\" N/A \", height=INPUT_HEIGHT, width=(PAGE_WIDTH / 2) -", ") cursor -= 10 c.drawString( LEFT_MARGIN, cursor, f\"and raised bead", "y=cursor - 4, name=\"drawer_fronts\", value=\" N/A \", height=INPUT_HEIGHT, width=(PAGE_WIDTH /", "LINE_HEIGHT c.drawString(LEFT_MARGIN, cursor, f\"Boring For Hinges : No\") c.drawString( (PAGE_WIDTH", "+ 62, y=cursor - 4, name=\"stiles_rails\", value=\" N/A \", height=INPUT_HEIGHT,", ") c.setFont(\"Helvetica\", 12) cursor -= LINE_HEIGHT c.drawString(LEFT_MARGIN, cursor, f\"Boring For", "c.drawCentredString(string_center, cursor + 5, \"Qty\") string_center += 115 c.drawCentredString(string_center, cursor", "x=LEFT_MARGIN + 375, y=cursor - 4, name=\"drawer_fronts\", value=\" N/A \",", "c.setFont(\"Helvetica-Bold\", 16) c.drawCentredString(PAGE_WIDTH / 2.0, PAGE_HEIGHT - 34, \"DOOR ORDER", "-= LINE_HEIGHT c.drawString(LEFT_MARGIN, cursor, f\"Wood Type : {SPECIES}\") c.line( (PAGE_WIDTH", "+ self.size_box_x, 0, self.size_box_width, self.height, ) self.string_center += (self.qty_box_width /", "# y=cursor - 4, # name=\"inside_profile\", # value=\" N/A \",", "x=LEFT_MARGIN + 78, # y=cursor - 4, # name=\"inside_profile\", #", "LINE_HEIGHT c.drawString(LEFT_MARGIN, cursor, f\"Wood Type : {SPECIES}\") c.line( (PAGE_WIDTH /", "1 of {job.name}-{STYLE}-{SPECIES}\" ) c.drawCentredString( PAGE_WIDTH / 2, 20, 'Reminder", ") self.canv.setFillColorRGB(0, 0, 0) self.canv.rect(self.qty_box_x, 0, self.qty_box_width, self.height) self.string_center +=", "= door_style.name INSIDE_PROFILE = door_style.inside_profile OUTSIDE_PROFILE = door_style.outside_profile TOTAL_DRS =", "from reportlab.lib.units import inch from reportlab.platypus import SimpleDocTemplate, Spacer from", "-= 10 c.drawString( LEFT_MARGIN, cursor, f\"and raised bead profile mitered", "115 c.drawCentredString(string_center, cursor + 5, \"Width X Height\") c.setFont(\"Helvetica\", 9)", "height=INPUT_HEIGHT, width=(PAGE_WIDTH / 2) - LEFT_MARGIN - (LEFT_MARGIN / 2)", "Fronts: {TOTAL_DWRS}\" ) cursor -= 24 c.setStrokeColorRGB(0, 0, 0) c.setFillColorRGB(BACKGROUND_COLOR[0],", "from reportlab.lib.units import inch from reportlab.platypus.flowables import Flowable def generate_order(job,", "/ 2) + (LEFT_MARGIN / 2), cursor, f\"Order Date :", "doors, applied mould doors\", ) cursor -= 10 c.drawString( LEFT_MARGIN,", "c.drawCentredString( ((PAGE_WIDTH / 4) * 3) + 10, cursor, f\"Total", ") c.acroForm.textfield( x=LEFT_MARGIN + 350, y=cursor - 4, name=\"5_pc_front\", value=\"", "12 c.setFont(\"Times-Italic\", 10) c.drawString( LEFT_MARGIN, cursor, f\"Boring not available in", "/ 4) * 3) + 10, cursor, \"Drawer Fronts\") cursor", "\") c.acroForm.textfield( x=LEFT_MARGIN + 40, y=cursor - 4, name=\"Panel\", value=\"", "1, 1) self.canv.setFont(\"Helvetica\", 12) self.canv.drawCentredString( self.string_center, 0.25 * self.height, str(self.index)", "Any doors 46\" and over in height will automatically receive", "!= \"\": self.canv.rect( self.second_column_offset + self.qty_box_x, 0, self.qty_box_width, self.height, )", "self.canv.setFillColorRGB(0, 0, 0) self.canv.rect(self.qty_box_x, 0, self.qty_box_width, self.height) self.string_center += (self.idx_box_width", "cursor -= LINE_HEIGHT c.setFont(\"Helvetica-Bold\", 12) c.drawString(LEFT_MARGIN, cursor, f\"Door Style :", "{SPECIES}\") c.line( (PAGE_WIDTH / 2) + (LEFT_MARGIN / 2), cursor,", "4, name=\"out_profile\", value=\" N/A \", height=INPUT_HEIGHT, width=(PAGE_WIDTH / 2) -", ") cursor -= LINE_HEIGHT c.drawString(LEFT_MARGIN, cursor, f\"Add Hinges : No\")", "self.string_center, 0.25 * self.height, self.dr_qty ) self.canv.rect(self.size_box_x, 0, self.size_box_width, self.height)", "\"\" and self.dwr_size != \"\": self.canv.rect( self.second_column_offset + self.qty_box_x, 0,", "4) * 3) + 10, cursor, f\"Total Drawer Fronts: {TOTAL_DWRS}\"", "155 self.canv.drawCentredString( self.string_center, 0.25 * self.height, self.dwr_qty, ) self.canv.rect( self.second_column_offset", "OrderEntry( xoffset=-50, dr_qty=door_qty, dr_size=door_size, dwr_qty=drawer_qty, dwr_size=drawer_size, index=i + 1, )", "= drawer_list[i][\"qty\"], drawer_list[i][\"size\"] except IndexError: drawer_qty, drawer_size = \"\", \"\"", "doors=[], drawers=[]): PAGE_HEIGHT = defaultPageSize[1] PAGE_WIDTH = defaultPageSize[0] LEFT_MARGIN =", "40 self.string_center = xoffset + (self.idx_box_width / 2) self.qty_box_x =", "+ xoffset self.qty_box_width = 60 self.size_box_x = self.qty_box_width - 10", "\", height=INPUT_HEIGHT, width=30, borderWidth=0, # fillColor=([1, 1, 1]), relative=True, )", "c.setFont(\"Helvetica-Bold\", 14) c.setFillColorRGB(BACKGROUND_COLOR[0], BACKGROUND_COLOR[1], BACKGROUND_COLOR[2]) c.drawCentredString((PAGE_WIDTH / 4) + 30,", "y=cursor - 4, name=\"stiles_rails\", value=\" N/A \", height=INPUT_HEIGHT, width=(PAGE_WIDTH /", "a horizontal center rail unless otherwise noted.', ) c.restoreState() def", "= len(door_list) num_of_drawers = len(drawer_list) num_of_entries = max(num_of_doors, num_of_drawers) for", "# fillColor=([1, 1, 1]), relative=True, ) c.acroForm.textfield( x=LEFT_MARGIN + 430,", "self.dr_size ) # Drawer if self.dwr_qty != \"\" and self.dwr_size", "901-853-7568\" ) cursor -= LINE_HEIGHT c.drawString(LEFT_MARGIN, cursor, f\"Panel : \")", "string_center += 115 c.drawCentredString(string_center, cursor + 5, \"Width X Height\")", "BACKGROUND_COLOR[1], BACKGROUND_COLOR[2]) c.drawCentredString((PAGE_WIDTH / 4) + 30, cursor, \"Doors\") c.drawCentredString(((PAGE_WIDTH", "- 4, name=\"5_pc_front\", value=\" N/A \", height=INPUT_HEIGHT, width=30, borderWidth=0, #", "defaultPageSize from reportlab.lib.units import inch from reportlab.platypus.flowables import Flowable def", "y=cursor - 4, name=\"5_pc_front\", value=\" N/A \", height=INPUT_HEIGHT, width=30, borderWidth=0,", "0, 0) c.drawCentredString( PAGE_WIDTH / 2, 40, f\"Page {doc.page} of", "Drawer Fronts: {TOTAL_DWRS}\" ) cursor -= 24 c.setStrokeColorRGB(0, 0, 0)", "dr_qty self.dr_size = dr_size self.dwr_qty = dwr_qty self.dwr_size = dwr_size", "xoffset + (self.idx_box_width / 2) self.qty_box_x = self.idx_box_width + xoffset", "= PAGE_HEIGHT - 60 INPUT_HEIGHT = LINE_HEIGHT - (LINE_HEIGHT *", "# name=\"outside_profile\", # value=\" N/A \", # height=INPUT_HEIGHT, # width=(PAGE_WIDTH", ": Any doors 46\" and over in height will automatically", "= \"\", \"\" p = OrderEntry( xoffset=-50, dr_qty=door_qty, dr_size=door_size, dwr_qty=drawer_qty,", "c.drawCentredString(string_center, cursor + 5, \"Width X Height\") string_center += 155", "\") c.acroForm.textfield( x=LEFT_MARGIN + 375, y=cursor - 4, name=\"drawer_fronts\", value=\"", "f\"Total Doors: {TOTAL_DRS}\") c.drawCentredString( ((PAGE_WIDTH / 4) * 3) +", "X Height\") c.setFont(\"Helvetica\", 9) c.setFillColorRGB(0, 0, 0) c.drawCentredString( PAGE_WIDTH /", "SimpleDocTemplate, Spacer from reportlab.rl_config import defaultPageSize from reportlab.lib.units import inch", "center rail unless otherwise noted.', ) c.restoreState() def myLaterPages(c, doc):", "self.canv.rect( self.second_column_offset + self.qty_box_x, 0, self.qty_box_width, self.height, ) self.string_center +=", "1, 1]), relative=True, ) cursor -= LINE_HEIGHT c.drawString(LEFT_MARGIN, cursor, f\"Add", "40, y=cursor - 4, name=\"Panel\", value=\" N/A \", height=INPUT_HEIGHT, width=(PAGE_WIDTH", "(PAGE_WIDTH / 2) + (LEFT_MARGIN / 2), cursor, f\"Order Date", "cursor, \"Phone : 901-853-7568\" ) cursor -= LINE_HEIGHT c.drawString(LEFT_MARGIN, cursor,", "(LEFT_MARGIN / 2), cursor, f\"Outside Profile : \" ) c.acroForm.textfield(", "xoffset self.idx_box_width = 40 self.string_center = xoffset + (self.idx_box_width /", "self.qty_box_width - 10 self.size_box_width = 170 self.second_column_offset = 270 def", "/ 2) self.canv.drawCentredString( self.string_center, 0.25 * self.height, self.dr_qty ) self.canv.rect(self.size_box_x,", "0.25 * self.height, self.dr_size ) # Drawer if self.dwr_qty !=", "cursor, PAGE_WIDTH - LEFT_MARGIN, cursor, ) cursor -= LINE_HEIGHT c.drawString(LEFT_MARGIN,", "LEFT_MARGIN - (LEFT_MARGIN / 2) - 87, borderWidth=0, # fillColor=([1,", ") c.setFillColorRGB(BACKGROUND_COLOR[0], BACKGROUND_COLOR[1], BACKGROUND_COLOR[2]) c.rect( LEFT_MARGIN, PAGE_HEIGHT - 40, PAGE_WIDTH", "self.height) self.string_center += (self.qty_box_width / 2) + (self.size_box_width / 2)", "f\"Page 1 of {job.name}-{STYLE}-{SPECIES}\" ) c.drawCentredString( PAGE_WIDTH / 2, 20,", "\"\": self.canv.rect( self.second_column_offset + self.qty_box_x, 0, self.qty_box_width, self.height, ) self.string_center", "+ 98, cursor, 170, 20, fill=1) c.rect(LEFT_MARGIN + 308, cursor,", "# fillColor=([1, 1, 1]), relative=True, ) cursor -= LINE_HEIGHT c.drawString(LEFT_MARGIN,", "0, 0) c.setFillColorRGB(BACKGROUND_COLOR[0], BACKGROUND_COLOR[1], BACKGROUND_COLOR[2]) c.rect(LEFT_MARGIN + 38, cursor, 60,", ") cursor -= 12 c.setFont(\"Times-Italic\", 10) c.drawString( LEFT_MARGIN, cursor, f\"Boring", "12) c.setFillColorRGB(1, 1, 1) string_center = LEFT_MARGIN + 68 c.drawCentredString(string_center,", "c.setFillColorRGB(0, 0, 0) c.drawCentredString( PAGE_WIDTH / 2, 40, f\"Page {doc.page}", "Shop, LLC\") c.drawString( (PAGE_WIDTH / 2) + (LEFT_MARGIN / 2),", "xoffset self.qty_box_width = 60 self.size_box_x = self.qty_box_width - 10 self.size_box_width", "/ 255) CURSOR_HEIGHT = PAGE_HEIGHT - 60 INPUT_HEIGHT = LINE_HEIGHT", ": {INSIDE_PROFILE}\") # c.acroForm.textfield( # x=LEFT_MARGIN + 78, # y=cursor", "-= LINE_HEIGHT c.drawString(LEFT_MARGIN, cursor, f\"Boring For Hinges : No\") c.drawString(", "any door with 2\" stiles' ) cursor -= 20 c.setFont(\"Helvetica-Bold\",", "= \"\", \"\" try: drawer_qty, drawer_size = drawer_list[i][\"qty\"], drawer_list[i][\"size\"] except", ") c.acroForm.textfield( x=LEFT_MARGIN + 370, y=cursor - 4, name=\"out_profile\", value=\"", "c.drawString(LEFT_MARGIN, cursor, f\"Door Style : {STYLE}\") c.setFont(\"Helvetica\", 12) c.drawString( (PAGE_WIDTH", "self.height, ) self.string_center += 155 self.canv.drawCentredString( self.string_center, 0.25 * self.height,", "cursor, f'Cullman will not bore any door with 2\" stiles'", "otherwise noted.', ) c.restoreState() def myLaterPages(c, doc): cursor = PAGE_HEIGHT", "name=\"outside_profile\", # value=\" N/A \", # height=INPUT_HEIGHT, # width=(PAGE_WIDTH /", ") self.canv.rect(self.size_box_x, 0, self.size_box_width, self.height) self.string_center += (self.qty_box_width / 2)", "reportlab.rl_config import defaultPageSize from reportlab.lib.units import inch from reportlab.platypus.flowables import", "+ 5, \"Width X Height\") string_center += 155 c.drawCentredString(string_center, cursor", "PAGE_WIDTH = defaultPageSize[0] LEFT_MARGIN = 30 LINE_HEIGHT = 18 BACKGROUND_COLOR", "12) cursor -= LINE_HEIGHT c.drawString(LEFT_MARGIN, cursor, f\"Boring For Hinges :", "door_qty, door_size = door_list[i][\"qty\"], door_list[i][\"size\"] except IndexError: door_qty, door_size =", "No\") c.drawString( (PAGE_WIDTH / 2) + (LEFT_MARGIN / 2), cursor,", "self.string_center, 0.25 * self.height, self.dwr_qty, ) self.canv.rect( self.second_column_offset + self.size_box_x,", "name, door_list, drawer_list): doc = SimpleDocTemplate(f\"{path}/{name}-{STYLE}.pdf\") Story = [Spacer(1, 3.11", "- LEFT_MARGIN - (LEFT_MARGIN / 2) - 108, # borderWidth=0,", "0, self.qty_box_width, self.height) self.string_center += (self.idx_box_width / 2) + (self.qty_box_width", "name=\"Panel\", value=\" N/A \", height=INPUT_HEIGHT, width=(PAGE_WIDTH / 2) - LEFT_MARGIN", "cursor -= LINE_HEIGHT c.drawString(LEFT_MARGIN, cursor, f\"Outside Profile : {OUTSIDE_PROFILE}\") #", "2, 40, f\"Page {doc.page} of {job.name}-{STYLE}-{SPECIES}\" ) c.drawCentredString( PAGE_WIDTH /", "0) self.canv.rect(self.qty_box_x, 0, self.qty_box_width, self.height) self.string_center += (self.idx_box_width / 2)", "0.1) SPECIES = door_style.species STYLE = door_style.name INSIDE_PROFILE = door_style.inside_profile", "1, 1]), # relative=True, # ) c.line( (PAGE_WIDTH / 2)", "-= LINE_HEIGHT c.drawString(LEFT_MARGIN, cursor, f\"Add Hinges : No\") c.drawString( (PAGE_WIDTH", "Profile : {INSIDE_PROFILE}\") # c.acroForm.textfield( # x=LEFT_MARGIN + 78, #", ") cursor -= 24 c.setStrokeColorRGB(0, 0, 0) c.setFillColorRGB(BACKGROUND_COLOR[0], BACKGROUND_COLOR[1], BACKGROUND_COLOR[2])", "self.canv.rect( self.second_column_offset + self.size_box_x, 0, self.size_box_width, self.height, ) self.string_center +=", ") cursor -= LINE_HEIGHT c.drawString(LEFT_MARGIN, cursor, f\"Outside Profile : {OUTSIDE_PROFILE}\")", "= self.idx_box_width + xoffset self.qty_box_width = 60 self.size_box_x = self.qty_box_width", "defaultPageSize[1] PAGE_WIDTH = defaultPageSize[0] LEFT_MARGIN = 30 LINE_HEIGHT = 18", "- 4, # name=\"outside_profile\", # value=\" N/A \", # height=INPUT_HEIGHT,", "- 108, # borderWidth=0, # # fillColor=([1, 1, 1]), #", "= (33 / 255, 80 / 255, 156 / 255)", "20, fill=1) c.rect(LEFT_MARGIN + 308, cursor, 60, 20, fill=1) c.rect(LEFT_MARGIN", "+ 10, cursor, f\"Total Drawer Fronts: {TOTAL_DWRS}\" ) cursor -=", "reportlab.platypus import SimpleDocTemplate, Spacer from reportlab.rl_config import defaultPageSize from reportlab.lib.units", "= LEFT_MARGIN + 68 c.drawCentredString(string_center, cursor + 5, \"Qty\") string_center", "10, cursor, f\"Total Drawer Fronts: {TOTAL_DWRS}\" ) cursor -= 24", "/ 2) - 87, borderWidth=0, # fillColor=([1, 1, 1]), relative=True,", "+= 155 c.drawCentredString(string_center, cursor + 5, \"Qty\") string_center += 115", "class OrderEntry(Flowable): \"\"\"Draws table entry for each item in list", "10 c.drawString( LEFT_MARGIN, cursor, f\"and raised bead profile mitered doors\",", ") self.canv.drawCentredString( self.string_center, 0.25 * self.height, self.dwr_size ) def build_pdf(path,", "2) - 92, borderWidth=0, # fillColor=([1, 1, 1]), relative=True, )", "self.canv.setFillColorRGB( BACKGROUND_COLOR[0], BACKGROUND_COLOR[1], BACKGROUND_COLOR[2] ) self.canv.rect(self.idx_box_x, 0, self.idx_box_width, self.height, fill=1)", "- (LEFT_MARGIN * 2), 24, fill=1 ) c.setFillColorRGB(1, 1, 1)", "c.drawString(LEFT_MARGIN, cursor, f\"Boring For Hinges : No\") c.drawString( (PAGE_WIDTH /", ") c.restoreState() def myLaterPages(c, doc): cursor = PAGE_HEIGHT - 54", "self.dwr_size = dwr_size self.index = index self.height = height self.idx_box_x", ") cursor -= LINE_HEIGHT c.drawString(LEFT_MARGIN, cursor, f\"Inside Profile : {INSIDE_PROFILE}\")", ": \") c.acroForm.textfield( x=LEFT_MARGIN + 40, y=cursor - 4, name=\"Panel\",", "len(doors) TOTAL_DWRS = len(drawers) def myFirstPage(c, doc): cursor = CURSOR_HEIGHT", "-= LINE_HEIGHT c.drawString(LEFT_MARGIN, cursor, f\"Inside Profile : {INSIDE_PROFILE}\") # c.acroForm.textfield(", "Profile : {OUTSIDE_PROFILE}\") # c.acroForm.textfield( # x=LEFT_MARGIN + 88, #", "cursor, f\"Stile/Rails : \") c.acroForm.textfield( x=LEFT_MARGIN + 62, y=cursor -", "BACKGROUND_COLOR[2] ) c.setFillColorRGB(BACKGROUND_COLOR[0], BACKGROUND_COLOR[1], BACKGROUND_COLOR[2]) c.rect( LEFT_MARGIN, PAGE_HEIGHT - 40,", "LINE_HEIGHT c.setFont(\"Helvetica-Bold\", 12) c.drawString(LEFT_MARGIN, cursor, f\"Door Style : {STYLE}\") c.setFont(\"Helvetica\",", "- (LEFT_MARGIN / 2) - 108, # borderWidth=0, # #", "index=0, ): Flowable.__init__(self) self.dr_qty = dr_qty self.dr_size = dr_size self.dwr_qty", ": \" ) c.acroForm.textfield( x=LEFT_MARGIN + 370, y=cursor - 4,", "(33 / 255, 80 / 255, 156 / 255) CURSOR_HEIGHT", "cursor -= 12 c.setFont(\"Times-Italic\", 10) c.drawString( LEFT_MARGIN, cursor, f\"Boring not", "= defaultPageSize[0] LEFT_MARGIN = 30 LINE_HEIGHT = 18 BACKGROUND_COLOR =", "\") c.acroForm.textfield( x=LEFT_MARGIN + 62, y=cursor - 4, name=\"stiles_rails\", value=\"", "60, 20, fill=1) c.rect(LEFT_MARGIN + 98, cursor, 170, 20, fill=1)", "cursor, f\"Drawer Fronts : \") c.acroForm.textfield( x=LEFT_MARGIN + 375, y=cursor", "cursor, \"Comments : \") cursor -= LINE_HEIGHT c.drawString(LEFT_MARGIN, cursor, f\"Wood", "2) + (LEFT_MARGIN / 2), cursor, f\"Outside Profile : \"", "= self.qty_box_width - 10 self.size_box_width = 170 self.second_column_offset = 270", "except IndexError: door_qty, door_size = \"\", \"\" try: drawer_qty, drawer_size", "f\"Stile/Rails : \") c.acroForm.textfield( x=LEFT_MARGIN + 62, y=cursor - 4,", "name=\"stiles_rails\", value=\" N/A \", height=INPUT_HEIGHT, width=(PAGE_WIDTH / 2) - LEFT_MARGIN", "- (LEFT_MARGIN / 2) - 82, borderWidth=0, # fillColor=([1, 1,", "34, \"DOOR ORDER FORM\") c.setFont(\"Helvetica\", 12) c.setFillColorRGB(0, 0, 0) c.drawString(LEFT_MARGIN,", "fillColor=([1, 1, 1]), relative=True, ) cursor -= 12 c.setFont(\"Times-Italic\", 10)", "/ 2) - LEFT_MARGIN - (LEFT_MARGIN / 2) - 108,", "cursor -= LINE_HEIGHT c.drawString(LEFT_MARGIN, cursor, f\"Stile/Rails : \") c.acroForm.textfield( x=LEFT_MARGIN", "20, 'Reminder : Any doors 46\" and over in height", "self.dr_qty = dr_qty self.dr_size = dr_size self.dwr_qty = dwr_qty self.dwr_size", "CURSOR_HEIGHT = PAGE_HEIGHT - 60 INPUT_HEIGHT = LINE_HEIGHT - (LINE_HEIGHT", "PAGE_WIDTH - LEFT_MARGIN, cursor, ) cursor -= LINE_HEIGHT c.drawString(LEFT_MARGIN, cursor,", "string_center += 155 c.drawCentredString(string_center, cursor + 5, \"Qty\") string_center +=", "door sizes.\"\"\" def __init__( self, xoffset=0, height=20, dr_qty=\"\", dr_size=\"\", dwr_qty=\"\",", "cursor, f\"Panel : \") c.acroForm.textfield( x=LEFT_MARGIN + 40, y=cursor -", "- 34, \"DOOR ORDER FORM\") c.setFont(\"Helvetica\", 12) c.setFillColorRGB(0, 0, 0)", "bore any door with 2\" stiles' ) cursor -= 20", "c.drawCentredString(PAGE_WIDTH / 2.0, PAGE_HEIGHT - 34, \"DOOR ORDER FORM\") c.setFont(\"Helvetica\",", "BACKGROUND_COLOR[2] ) self.canv.rect(self.idx_box_x, 0, self.idx_box_width, self.height, fill=1) self.canv.setFillColorRGB(1, 1, 1)", "\"\", \"\" p = OrderEntry( xoffset=-50, dr_qty=door_qty, dr_size=door_size, dwr_qty=drawer_qty, dwr_size=drawer_size,", "c.drawCentredString(string_center, cursor + 5, \"Width X Height\") c.setFont(\"Helvetica\", 9) c.setFillColorRGB(0,", "12) c.drawString(LEFT_MARGIN, cursor, f\"Door Style : {STYLE}\") c.setFont(\"Helvetica\", 12) c.drawString(", "c.drawString( (PAGE_WIDTH / 2) + (LEFT_MARGIN / 2), cursor, f\"Order", "# c.acroForm.textfield( # x=LEFT_MARGIN + 78, # y=cursor - 4,", "door_style.species STYLE = door_style.name INSIDE_PROFILE = door_style.inside_profile OUTSIDE_PROFILE = door_style.outside_profile", "cursor, \"Doors\") c.drawCentredString(((PAGE_WIDTH / 4) * 3) + 10, cursor,", "door with 2\" stiles' ) cursor -= 20 c.setFont(\"Helvetica-Bold\", 14)", "dwr_size self.index = index self.height = height self.idx_box_x = xoffset", "* self.height, self.dr_size ) # Drawer if self.dwr_qty != \"\"", "Drawer if self.dwr_qty != \"\" and self.dwr_size != \"\": self.canv.rect(", "value=\" N/A \", height=INPUT_HEIGHT, width=30, borderWidth=0, # fillColor=([1, 1, 1]),", "2) + (LEFT_MARGIN / 2), cursor, \"Phone : 901-853-7568\" )", "drawer_size = drawer_list[i][\"qty\"], drawer_list[i][\"size\"] except IndexError: drawer_qty, drawer_size = \"\",", "(LEFT_MARGIN / 2), cursor, \"Comments : \") cursor -= LINE_HEIGHT", "borderWidth=0, # fillColor=([1, 1, 1]), relative=True, ) cursor -= LINE_HEIGHT", "width=30, borderWidth=0, # fillColor=([1, 1, 1]), relative=True, ) cursor -=", "14) c.setFillColorRGB(BACKGROUND_COLOR[0], BACKGROUND_COLOR[1], BACKGROUND_COLOR[2]) c.drawCentredString((PAGE_WIDTH / 4) + 30, cursor,", "from reportlab.platypus import SimpleDocTemplate, Spacer from reportlab.rl_config import defaultPageSize from", "c.setFont(\"Helvetica-Bold\", 12) c.drawString((PAGE_WIDTH / 2) + (LEFT_MARGIN / 2), cursor,", "-= LINE_HEIGHT c.drawString(LEFT_MARGIN, cursor, f\"Panel : \") c.acroForm.textfield( x=LEFT_MARGIN +", "self.height, ) self.string_center += (self.qty_box_width / 2) + ( self.size_box_width", "name=\"5_pc_front\", value=\" N/A \", height=INPUT_HEIGHT, width=30, borderWidth=0, # fillColor=([1, 1,", "92, borderWidth=0, # fillColor=([1, 1, 1]), relative=True, ) c.setFont(\"Helvetica\", 12)", "c.saveState() c.setStrokeColorRGB( BACKGROUND_COLOR[0], BACKGROUND_COLOR[1], BACKGROUND_COLOR[2] ) c.setFillColorRGB(BACKGROUND_COLOR[0], BACKGROUND_COLOR[1], BACKGROUND_COLOR[2]) c.rect(", "N/A \", # height=INPUT_HEIGHT, # width=(PAGE_WIDTH / 2) - LEFT_MARGIN", "170 self.second_column_offset = 270 def draw(self): # Door self.canv.setStrokeColorRGB(0, 0,", "LEFT_MARGIN, cursor, f\"Boring not available in arched doors, applied mould", "20, fill=1) c.rect(LEFT_MARGIN + 368, cursor, 170, 20, fill=1) c.setFont(\"Helvetica-Bold\",", "defaultPageSize[0] LEFT_MARGIN = 30 LINE_HEIGHT = 18 BACKGROUND_COLOR = (33", "JS Designs Shop, LLC\") c.drawString( (PAGE_WIDTH / 2) + (LEFT_MARGIN", "c.rect(LEFT_MARGIN + 98, cursor, 170, 20, fill=1) c.rect(LEFT_MARGIN + 308,", "c.setFont(\"Helvetica\", 9) c.setFillColorRGB(0, 0, 0) c.drawCentredString( PAGE_WIDTH / 2, 40,", "c.drawCentredString( PAGE_WIDTH / 2, 20, 'Reminder : Any doors 46\"", "LEFT_MARGIN - (LEFT_MARGIN / 2) - 82, borderWidth=0, # fillColor=([1,", "/ 2), cursor, \"Comments : \") cursor -= LINE_HEIGHT c.drawString(LEFT_MARGIN,", "c.setFillColorRGB(BACKGROUND_COLOR[0], BACKGROUND_COLOR[1], BACKGROUND_COLOR[2]) c.rect(LEFT_MARGIN + 38, cursor, 60, 20, fill=1)", "- 4, name=\"out_profile\", value=\" N/A \", height=INPUT_HEIGHT, width=(PAGE_WIDTH / 2)", "self.string_center += (self.qty_box_width / 2) + ( self.size_box_width / 2", "+ 1, ) Story.append(p) doc.build(Story, onFirstPage=myFirstPage, onLaterPages=myLaterPages) build_pdf(path, job.name, doors,", "- 60, borderWidth=0, # fillColor=([1, 1, 1]), relative=True, ) c.drawString((PAGE_WIDTH", "1]), relative=True, ) c.setFont(\"Helvetica-Bold\", 12) c.drawString((PAGE_WIDTH / 2) + (LEFT_MARGIN", "* inch)] num_of_doors = len(door_list) num_of_drawers = len(drawer_list) num_of_entries =", "dwr_size=drawer_size, index=i + 1, ) Story.append(p) doc.build(Story, onFirstPage=myFirstPage, onLaterPages=myLaterPages) build_pdf(path,", "fillColor=([1, 1, 1]), relative=True, ) cursor -= LINE_HEIGHT c.drawString(LEFT_MARGIN, cursor,", "reportlab.lib.units import inch from reportlab.platypus import SimpleDocTemplate, Spacer from reportlab.rl_config", "4, name=\"Panel\", value=\" N/A \", height=INPUT_HEIGHT, width=(PAGE_WIDTH / 2) -", "- LEFT_MARGIN, cursor, ) cursor -= LINE_HEIGHT c.drawString(LEFT_MARGIN, cursor, f\"Outside", "c.drawString( LEFT_MARGIN, cursor, f\"Boring not available in arched doors, applied", "2), cursor, \"Delivery Date : ASAP\" ) cursor -= LINE_HEIGHT", "/ 2) + (LEFT_MARGIN / 2), cursor, \"Delivery Date :", "STYLE = door_style.name INSIDE_PROFILE = door_style.inside_profile OUTSIDE_PROFILE = door_style.outside_profile TOTAL_DRS", "name=\"drawer_fronts\", value=\" N/A \", height=INPUT_HEIGHT, width=(PAGE_WIDTH / 2) - LEFT_MARGIN", "2, 40, f\"Page 1 of {job.name}-{STYLE}-{SPECIES}\" ) c.drawCentredString( PAGE_WIDTH /", "PAGE_WIDTH / 2, 20, 'Reminder : Any doors 46\" and", "self.height, str(self.index) ) self.canv.setFillColorRGB(0, 0, 0) self.canv.rect(self.qty_box_x, 0, self.qty_box_width, self.height)", "try: drawer_qty, drawer_size = drawer_list[i][\"qty\"], drawer_list[i][\"size\"] except IndexError: drawer_qty, drawer_size", "Date : ASAP\" ) cursor -= LINE_HEIGHT c.setFont(\"Helvetica-Bold\", 12) c.drawString(LEFT_MARGIN,", "2) + (LEFT_MARGIN / 2), cursor, \"Comments : \") cursor", "+ 38, cursor, 60, 20, fill=1) c.rect(LEFT_MARGIN + 98, cursor,", "cursor, \"Drawer Fronts\") cursor -= 24 c.setStrokeColorRGB(0, 0, 0) c.setFillColorRGB(BACKGROUND_COLOR[0],", "self.dwr_qty, ) self.canv.rect( self.second_column_offset + self.size_box_x, 0, self.size_box_width, self.height, )", "c.drawCentredString(((PAGE_WIDTH / 4) * 3) + 10, cursor, \"Drawer Fronts\")", "dwr_qty=drawer_qty, dwr_size=drawer_size, index=i + 1, ) Story.append(p) doc.build(Story, onFirstPage=myFirstPage, onLaterPages=myLaterPages)", "self.size_box_x = self.qty_box_width - 10 self.size_box_width = 170 self.second_column_offset =", "= 170 self.second_column_offset = 270 def draw(self): # Door self.canv.setStrokeColorRGB(0,", "a horizontal center rail unless otherwise noted.', ) c.restoreState() class", "# y=cursor - 4, # name=\"outside_profile\", # value=\" N/A \",", "(self.qty_box_width / 2) + ( self.size_box_width / 2 ) self.canv.drawCentredString(", "{job.name}-{STYLE}-{SPECIES}\") c.drawString( (PAGE_WIDTH / 2) + (LEFT_MARGIN / 2), cursor,", "width=30, borderWidth=0, # fillColor=([1, 1, 1]), relative=True, ) c.acroForm.textfield( x=LEFT_MARGIN", "60 INPUT_HEIGHT = LINE_HEIGHT - (LINE_HEIGHT * 0.1) SPECIES =", "c.acroForm.textfield( # x=LEFT_MARGIN + 78, # y=cursor - 4, #", "1, 1]), relative=True, ) cursor -= 12 c.setFont(\"Times-Italic\", 10) c.drawString(", "\"Phone : 901-853-7568\" ) cursor -= LINE_HEIGHT c.drawString(LEFT_MARGIN, cursor, f\"Panel", "name=\"out_profile\", value=\" N/A \", height=INPUT_HEIGHT, width=(PAGE_WIDTH / 2) - LEFT_MARGIN", "+ 430, y=cursor - 4, name=\"slab_front\", value=\" N/A \", height=INPUT_HEIGHT,", "Style : {STYLE}\") c.setFont(\"Helvetica\", 12) c.drawString( (PAGE_WIDTH / 2) +", "= dr_size self.dwr_qty = dwr_qty self.dwr_size = dwr_size self.index =", "2) + (self.size_box_width / 2) self.canv.drawCentredString( self.string_center, 0.25 * self.height,", "TOTAL_DWRS = len(drawers) def myFirstPage(c, doc): cursor = CURSOR_HEIGHT c.saveState()", "- 4, name=\"slab_front\", value=\" N/A \", height=INPUT_HEIGHT, width=30, borderWidth=0, #", "LEFT_MARGIN = 30 LINE_HEIGHT = 18 BACKGROUND_COLOR = (33 /", "# borderWidth=0, # # fillColor=([1, 1, 1]), # relative=True, #", "cursor, f\"Boring not available in arched doors, applied mould doors\",", "reportlab.platypus.flowables import Flowable def generate_order(job, path, door_style, doors=[], drawers=[]): PAGE_HEIGHT", "of {job.name}-{STYLE}-{SPECIES}\" ) c.drawCentredString( PAGE_WIDTH / 2, 20, 'Reminder :", "/ 2) + (LEFT_MARGIN / 2), cursor, f\"Outside Profile :", "BACKGROUND_COLOR[1], BACKGROUND_COLOR[2]) c.drawCentredString((PAGE_WIDTH / 4) + 30, cursor, f\"Total Doors:", "Door self.canv.setStrokeColorRGB(0, 0, 0) self.canv.setFillColorRGB( BACKGROUND_COLOR[0], BACKGROUND_COLOR[1], BACKGROUND_COLOR[2] ) self.canv.rect(self.idx_box_x,", "dr_qty=door_qty, dr_size=door_size, dwr_qty=drawer_qty, dwr_size=drawer_size, index=i + 1, ) Story.append(p) doc.build(Story,", "LINE_HEIGHT = 18 BACKGROUND_COLOR = (33 / 255, 80 /", "cursor, f\"Total Drawer Fronts: {TOTAL_DWRS}\" ) cursor -= 24 c.setStrokeColorRGB(0,", "/ 4) + 30, cursor, f\"Total Doors: {TOTAL_DRS}\") c.drawCentredString( ((PAGE_WIDTH", "self.height, self.dr_size ) # Drawer if self.dwr_qty != \"\" and", "borderWidth=0, # fillColor=([1, 1, 1]), relative=True, ) c.acroForm.textfield( x=LEFT_MARGIN +", "cursor, f\"Add Hinges : No\") c.drawString( (PAGE_WIDTH / 2) +", "BACKGROUND_COLOR[0], BACKGROUND_COLOR[1], BACKGROUND_COLOR[2] ) self.canv.rect(self.idx_box_x, 0, self.idx_box_width, self.height, fill=1) self.canv.setFillColorRGB(1,", "2) + ( self.size_box_width / 2 ) self.canv.drawCentredString( self.string_center, 0.25", "# value=\" N/A \", # height=INPUT_HEIGHT, # width=(PAGE_WIDTH / 2)", "Doors: {TOTAL_DRS}\") c.drawCentredString( ((PAGE_WIDTH / 4) * 3) + 10,", "cursor, 170, 20, fill=1) c.rect(LEFT_MARGIN + 308, cursor, 60, 20,", "375, y=cursor - 4, name=\"drawer_fronts\", value=\" N/A \", height=INPUT_HEIGHT, width=(PAGE_WIDTH", "(LEFT_MARGIN / 2) - 92, borderWidth=0, # fillColor=([1, 1, 1]),", "/ 2.0, PAGE_HEIGHT - 34, \"DOOR ORDER FORM\") c.setFont(\"Helvetica\", 12)", "2) self.qty_box_x = self.idx_box_width + xoffset self.qty_box_width = 60 self.size_box_x", "y=cursor - 4, # name=\"outside_profile\", # value=\" N/A \", #", "(LEFT_MARGIN * 2), 24, fill=1 ) c.setFillColorRGB(1, 1, 1) c.setFont(\"Helvetica-Bold\",", "2) - 98, # borderWidth=0, # # fillColor=([1, 1, 1]),", "+ ( self.size_box_width / 2 ) self.canv.drawCentredString( self.string_center, 0.25 *", "c.setFont(\"Helvetica\", 12) c.drawString( (PAGE_WIDTH / 2) + (LEFT_MARGIN / 2),", "(self.idx_box_width / 2) self.qty_box_x = self.idx_box_width + xoffset self.qty_box_width =", "draw(self): # Door self.canv.setStrokeColorRGB(0, 0, 0) self.canv.setFillColorRGB( BACKGROUND_COLOR[0], BACKGROUND_COLOR[1], BACKGROUND_COLOR[2]", "4) + 30, cursor, f\"Total Doors: {TOTAL_DRS}\") c.drawCentredString( ((PAGE_WIDTH /", "otherwise noted.', ) c.restoreState() class OrderEntry(Flowable): \"\"\"Draws table entry for", "2) + (LEFT_MARGIN / 2), cursor, \"Delivery Date : ASAP\"", "cursor, ) cursor -= LINE_HEIGHT c.drawString(LEFT_MARGIN, cursor, f\"Outside Profile :", "* self.height, self.dwr_qty, ) self.canv.rect( self.second_column_offset + self.size_box_x, 0, self.size_box_width,", "# fillColor=([1, 1, 1]), relative=True, ) c.setFont(\"Helvetica-Bold\", 12) c.drawString((PAGE_WIDTH /", "2\" stiles' ) cursor -= 20 c.setFont(\"Helvetica-Bold\", 14) c.setFillColorRGB(BACKGROUND_COLOR[0], BACKGROUND_COLOR[1],", "X Height\") string_center += 155 c.drawCentredString(string_center, cursor + 5, \"Qty\")", "cursor -= 10 c.drawString( LEFT_MARGIN, cursor, f\"and raised bead profile", "drawer_qty, drawer_size = drawer_list[i][\"qty\"], drawer_list[i][\"size\"] except IndexError: drawer_qty, drawer_size =", "= len(drawer_list) num_of_entries = max(num_of_doors, num_of_drawers) for i in range(0,", "path, door_style, doors=[], drawers=[]): PAGE_HEIGHT = defaultPageSize[1] PAGE_WIDTH = defaultPageSize[0]", ": JS Designs Shop, LLC\") c.drawString( (PAGE_WIDTH / 2) +", "* 3) + 10, cursor, \"Drawer Fronts\") cursor -= 24", "self.qty_box_x = self.idx_box_width + xoffset self.qty_box_width = 60 self.size_box_x =", "generate_order(job, path, door_style, doors=[], drawers=[]): PAGE_HEIGHT = defaultPageSize[1] PAGE_WIDTH =", "len(door_list) num_of_drawers = len(drawer_list) num_of_entries = max(num_of_doors, num_of_drawers) for i", "/ 2), cursor, PAGE_WIDTH - LEFT_MARGIN, cursor, ) cursor -=", "c.drawCentredString((PAGE_WIDTH / 4) + 30, cursor, f\"Total Doors: {TOTAL_DRS}\") c.drawCentredString(", "self.size_box_width, self.height) self.string_center += (self.qty_box_width / 2) + (self.size_box_width /", "num_of_drawers) for i in range(0, num_of_entries): try: door_qty, door_size =", "10, cursor, \"Drawer Fronts\") cursor -= 24 c.setStrokeColorRGB(0, 0, 0)", "30, cursor, f\"Total Doors: {TOTAL_DRS}\") c.drawCentredString( ((PAGE_WIDTH / 4) *", ") c.setFont(\"Helvetica-Bold\", 12) c.drawString((PAGE_WIDTH / 2) + (LEFT_MARGIN / 2),", "0, self.qty_box_width, self.height, ) self.string_center += 155 self.canv.drawCentredString( self.string_center, 0.25", "{INSIDE_PROFILE}\") # c.acroForm.textfield( # x=LEFT_MARGIN + 78, # y=cursor -", ") cursor -= 20 c.setFont(\"Helvetica-Bold\", 14) c.setFillColorRGB(BACKGROUND_COLOR[0], BACKGROUND_COLOR[1], BACKGROUND_COLOR[2]) c.drawCentredString((PAGE_WIDTH", "4, name=\"drawer_fronts\", value=\" N/A \", height=INPUT_HEIGHT, width=(PAGE_WIDTH / 2) -", "2) - 87, borderWidth=0, # fillColor=([1, 1, 1]), relative=True, )", "over in height will automatically receive a horizontal center rail", "Flowable.__init__(self) self.dr_qty = dr_qty self.dr_size = dr_size self.dwr_qty = dwr_qty", "2), cursor, f\"Drawer Fronts : \") c.acroForm.textfield( x=LEFT_MARGIN + 375,", "Profile : \" ) c.acroForm.textfield( x=LEFT_MARGIN + 370, y=cursor -", "LEFT_MARGIN, cursor, ) cursor -= LINE_HEIGHT c.drawString(LEFT_MARGIN, cursor, f\"Stile/Rails :", "LEFT_MARGIN - (LEFT_MARGIN / 2) - 98, # borderWidth=0, #", "self.second_column_offset + self.qty_box_x, 0, self.qty_box_width, self.height, ) self.string_center += 155", "/ 2) - LEFT_MARGIN - (LEFT_MARGIN / 2) - 82,", "cursor, f\"Total Doors: {TOTAL_DRS}\") c.drawCentredString( ((PAGE_WIDTH / 4) * 3)", "cursor -= LINE_HEIGHT c.drawString(LEFT_MARGIN, cursor, f\"Inside Profile : {INSIDE_PROFILE}\") #", ": {SPECIES}\") c.line( (PAGE_WIDTH / 2) + (LEFT_MARGIN / 2),", ") c.setFillColorRGB(1, 1, 1) c.setFont(\"Helvetica-Bold\", 16) c.drawCentredString(PAGE_WIDTH / 2.0, PAGE_HEIGHT", ": {job.order_date}\", ) cursor -= LINE_HEIGHT c.drawString(LEFT_MARGIN, cursor, f\"PO #", "{STYLE}\") c.setFont(\"Helvetica\", 12) c.drawString( (PAGE_WIDTH / 2) + (LEFT_MARGIN /", "= OrderEntry( xoffset=-50, dr_qty=door_qty, dr_size=door_size, dwr_qty=drawer_qty, dwr_size=drawer_size, index=i + 1,", "/ 2, 40, f\"Page {doc.page} of {job.name}-{STYLE}-{SPECIES}\" ) c.drawCentredString( PAGE_WIDTH", "* 2), 24, fill=1 ) c.setFillColorRGB(1, 1, 1) c.setFont(\"Helvetica-Bold\", 16)", "\"DOOR ORDER FORM\") c.setFont(\"Helvetica\", 12) c.setFillColorRGB(0, 0, 0) c.drawString(LEFT_MARGIN, cursor,", "- (LEFT_MARGIN / 2) - 92, borderWidth=0, # fillColor=([1, 1,", "mitered doors\", ) cursor -= 14 c.setFont(\"Times-BoldItalic\", 12) c.drawString( LEFT_MARGIN,", "/ 2), cursor, f\"Order Date : {job.order_date}\", ) cursor -=", "4, name=\"slab_front\", value=\" N/A \", height=INPUT_HEIGHT, width=30, borderWidth=0, # fillColor=([1,", "self.canv.drawCentredString( self.string_center, 0.25 * self.height, self.dr_qty ) self.canv.rect(self.size_box_x, 0, self.size_box_width,", "38, cursor, 60, 20, fill=1) c.rect(LEFT_MARGIN + 98, cursor, 170,", "c.setFont(\"Helvetica-Bold\", 12) c.drawString(LEFT_MARGIN, cursor, f\"Door Style : {STYLE}\") c.setFont(\"Helvetica\", 12)", "(PAGE_WIDTH / 2) + (LEFT_MARGIN / 2), cursor, PAGE_WIDTH -", "54 c.saveState() c.setFont(\"Helvetica-Bold\", 14) c.setFillColorRGB(BACKGROUND_COLOR[0], BACKGROUND_COLOR[1], BACKGROUND_COLOR[2]) c.drawCentredString((PAGE_WIDTH / 4)", "+ (self.qty_box_width / 2) self.canv.drawCentredString( self.string_center, 0.25 * self.height, self.dr_qty", "- LEFT_MARGIN - (LEFT_MARGIN / 2) - 82, borderWidth=0, #", "+ (LEFT_MARGIN / 2), cursor, \"Phone : 901-853-7568\" ) cursor", "cursor -= 14 c.setFont(\"Times-BoldItalic\", 12) c.drawString( LEFT_MARGIN, cursor, f'Cullman will", "170, 20, fill=1) c.setFont(\"Helvetica-Bold\", 12) c.setFillColorRGB(1, 1, 1) string_center =", "+ 5, \"Width X Height\") c.setFont(\"Helvetica\", 9) c.setFillColorRGB(0, 0, 0)", "c.acroForm.textfield( x=LEFT_MARGIN + 430, y=cursor - 4, name=\"slab_front\", value=\" N/A", "/ 4) + 30, cursor, \"Doors\") c.drawCentredString(((PAGE_WIDTH / 4) *", ") cursor -= 14 c.setFont(\"Times-BoldItalic\", 12) c.drawString( LEFT_MARGIN, cursor, f'Cullman", "f\"Drawer Fronts : \") c.acroForm.textfield( x=LEFT_MARGIN + 375, y=cursor -", "cursor, f\"Outside Profile : {OUTSIDE_PROFILE}\") # c.acroForm.textfield( # x=LEFT_MARGIN +", "1, 1]), relative=True, ) c.setFont(\"Helvetica-Bold\", 12) c.drawString((PAGE_WIDTH / 2) +", "- (LEFT_MARGIN / 2) - 98, # borderWidth=0, # #", "/ 2, 40, f\"Page 1 of {job.name}-{STYLE}-{SPECIES}\" ) c.drawCentredString( PAGE_WIDTH", "CURSOR_HEIGHT c.saveState() c.setStrokeColorRGB( BACKGROUND_COLOR[0], BACKGROUND_COLOR[1], BACKGROUND_COLOR[2] ) c.setFillColorRGB(BACKGROUND_COLOR[0], BACKGROUND_COLOR[1], BACKGROUND_COLOR[2])", "+ 30, cursor, f\"Total Doors: {TOTAL_DRS}\") c.drawCentredString( ((PAGE_WIDTH / 4)", "30, cursor, \"Doors\") c.drawCentredString(((PAGE_WIDTH / 4) * 3) + 10,", "c.drawCentredString( PAGE_WIDTH / 2, 40, f\"Page 1 of {job.name}-{STYLE}-{SPECIES}\" )", ") c.drawString((PAGE_WIDTH / 2) + (LEFT_MARGIN / 2), cursor, \"Comments", "self.canv.rect(self.qty_box_x, 0, self.qty_box_width, self.height) self.string_center += (self.idx_box_width / 2) +", "20, fill=1) c.rect(LEFT_MARGIN + 98, cursor, 170, 20, fill=1) c.rect(LEFT_MARGIN", "myLaterPages(c, doc): cursor = PAGE_HEIGHT - 54 c.saveState() c.setFont(\"Helvetica-Bold\", 14)", "Slab:\", ) c.acroForm.textfield( x=LEFT_MARGIN + 350, y=cursor - 4, name=\"5_pc_front\",", ") c.line( (PAGE_WIDTH / 2) + (LEFT_MARGIN / 2), cursor,", "f\"Order Date : {job.order_date}\", ) cursor -= LINE_HEIGHT c.drawString(LEFT_MARGIN, cursor,", "+ 40, y=cursor - 4, name=\"Panel\", value=\" N/A \", height=INPUT_HEIGHT,", "num_of_entries): try: door_qty, door_size = door_list[i][\"qty\"], door_list[i][\"size\"] except IndexError: door_qty,", "1]), relative=True, ) c.acroForm.textfield( x=LEFT_MARGIN + 430, y=cursor - 4,", "(LEFT_MARGIN / 2) - 108, # borderWidth=0, # # fillColor=([1,", "Spacer from reportlab.rl_config import defaultPageSize from reportlab.lib.units import inch from", "/ 2), cursor, \"Phone : 901-853-7568\" ) cursor -= LINE_HEIGHT", "(LEFT_MARGIN / 2), cursor, f\"Drawer Fronts : \") c.acroForm.textfield( x=LEFT_MARGIN", "SPECIES = door_style.species STYLE = door_style.name INSIDE_PROFILE = door_style.inside_profile OUTSIDE_PROFILE", "- 54 c.saveState() c.setFont(\"Helvetica-Bold\", 14) c.setFillColorRGB(BACKGROUND_COLOR[0], BACKGROUND_COLOR[1], BACKGROUND_COLOR[2]) c.drawCentredString((PAGE_WIDTH /", "num_of_doors = len(door_list) num_of_drawers = len(drawer_list) num_of_entries = max(num_of_doors, num_of_drawers)", "relative=True, # ) c.line( (PAGE_WIDTH / 2) + (LEFT_MARGIN /", "import inch from reportlab.platypus.flowables import Flowable def generate_order(job, path, door_style,", "{OUTSIDE_PROFILE}\") # c.acroForm.textfield( # x=LEFT_MARGIN + 88, # y=cursor -", "88, # y=cursor - 4, # name=\"outside_profile\", # value=\" N/A", "height=INPUT_HEIGHT, width=30, borderWidth=0, # fillColor=([1, 1, 1]), relative=True, ) cursor", "item in list of door sizes.\"\"\" def __init__( self, xoffset=0,", "= dwr_size self.index = index self.height = height self.idx_box_x =", "12) self.canv.drawCentredString( self.string_center, 0.25 * self.height, str(self.index) ) self.canv.setFillColorRGB(0, 0,", "(self.qty_box_width / 2) + (self.size_box_width / 2) self.canv.drawCentredString( self.string_center, 0.25", "drawer_list[i][\"size\"] except IndexError: drawer_qty, drawer_size = \"\", \"\" p =", "3.11 * inch)] num_of_doors = len(door_list) num_of_drawers = len(drawer_list) num_of_entries", "self.dwr_qty = dwr_qty self.dwr_size = dwr_size self.index = index self.height", "c.drawString( LEFT_MARGIN, cursor, f'Cullman will not bore any door with", "c.setFont(\"Helvetica\", 12) cursor -= LINE_HEIGHT c.drawString(LEFT_MARGIN, cursor, f\"Boring For Hinges", "in range(0, num_of_entries): try: door_qty, door_size = door_list[i][\"qty\"], door_list[i][\"size\"] except", ") cursor -= LINE_HEIGHT c.drawString(LEFT_MARGIN, cursor, f\"PO # : {job.name}-{STYLE}-{SPECIES}\")", "= door_style.species STYLE = door_style.name INSIDE_PROFILE = door_style.inside_profile OUTSIDE_PROFILE =", "/ 2), cursor, f\" 5 PC Front: Slab:\", ) c.acroForm.textfield(", "\", # height=INPUT_HEIGHT, # width=(PAGE_WIDTH / 2) - LEFT_MARGIN -", "[Spacer(1, 3.11 * inch)] num_of_doors = len(door_list) num_of_drawers = len(drawer_list)", "c.setFont(\"Helvetica-Bold\", 12) c.setFillColorRGB(1, 1, 1) string_center = LEFT_MARGIN + 68", "1]), # relative=True, # ) c.line( (PAGE_WIDTH / 2) +", "+ 370, y=cursor - 4, name=\"out_profile\", value=\" N/A \", height=INPUT_HEIGHT,", "# fillColor=([1, 1, 1]), relative=True, ) cursor -= 12 c.setFont(\"Times-Italic\",", "in arched doors, applied mould doors\", ) cursor -= 10", ") self.canv.rect( self.second_column_offset + self.size_box_x, 0, self.size_box_width, self.height, ) self.string_center", "for each item in list of door sizes.\"\"\" def __init__(", "1) self.canv.setFont(\"Helvetica\", 12) self.canv.drawCentredString( self.string_center, 0.25 * self.height, str(self.index) )", "self.string_center = xoffset + (self.idx_box_width / 2) self.qty_box_x = self.idx_box_width", "value=\" N/A \", # height=INPUT_HEIGHT, # width=(PAGE_WIDTH / 2) -", "2) + (LEFT_MARGIN / 2), cursor, f\"Drawer Fronts : \")", "20 c.setFont(\"Helvetica-Bold\", 14) c.setFillColorRGB(BACKGROUND_COLOR[0], BACKGROUND_COLOR[1], BACKGROUND_COLOR[2]) c.drawCentredString((PAGE_WIDTH / 4) +", "BACKGROUND_COLOR[2]) c.rect( LEFT_MARGIN, PAGE_HEIGHT - 40, PAGE_WIDTH - (LEFT_MARGIN *", "!= \"\" and self.dwr_size != \"\": self.canv.rect( self.second_column_offset + self.qty_box_x,", "14 c.setFont(\"Times-BoldItalic\", 12) c.drawString( LEFT_MARGIN, cursor, f'Cullman will not bore", "xoffset=0, height=20, dr_qty=\"\", dr_size=\"\", dwr_qty=\"\", dwr_size=\"\", index=0, ): Flowable.__init__(self) self.dr_qty", "table entry for each item in list of door sizes.\"\"\"", "0, self.idx_box_width, self.height, fill=1) self.canv.setFillColorRGB(1, 1, 1) self.canv.setFont(\"Helvetica\", 12) self.canv.drawCentredString(", "(LEFT_MARGIN / 2) - 82, borderWidth=0, # fillColor=([1, 1, 1]),", "self.canv.drawCentredString( self.string_center, 0.25 * self.height, self.dwr_qty, ) self.canv.rect( self.second_column_offset +", "cursor -= LINE_HEIGHT c.drawString(LEFT_MARGIN, cursor, f\"PO # : {job.name}-{STYLE}-{SPECIES}\") c.drawString(", "\"Width X Height\") string_center += 155 c.drawCentredString(string_center, cursor + 5,", ") self.canv.rect(self.idx_box_x, 0, self.idx_box_width, self.height, fill=1) self.canv.setFillColorRGB(1, 1, 1) self.canv.setFont(\"Helvetica\",", "+= 155 self.canv.drawCentredString( self.string_center, 0.25 * self.height, self.dwr_qty, ) self.canv.rect(", "BACKGROUND_COLOR[2]) c.rect(LEFT_MARGIN + 38, cursor, 60, 20, fill=1) c.rect(LEFT_MARGIN +", "/ 2) - LEFT_MARGIN - (LEFT_MARGIN / 2) - 92,", "max(num_of_doors, num_of_drawers) for i in range(0, num_of_entries): try: door_qty, door_size", "/ 2), cursor, \"Delivery Date : ASAP\" ) cursor -=", ") cursor -= LINE_HEIGHT c.setFont(\"Helvetica-Bold\", 12) c.drawString(LEFT_MARGIN, cursor, f\"Door Style", "= CURSOR_HEIGHT c.saveState() c.setStrokeColorRGB( BACKGROUND_COLOR[0], BACKGROUND_COLOR[1], BACKGROUND_COLOR[2] ) c.setFillColorRGB(BACKGROUND_COLOR[0], BACKGROUND_COLOR[1],", "/ 2) - LEFT_MARGIN - (LEFT_MARGIN / 2) - 98,", "cursor = CURSOR_HEIGHT c.saveState() c.setStrokeColorRGB( BACKGROUND_COLOR[0], BACKGROUND_COLOR[1], BACKGROUND_COLOR[2] ) c.setFillColorRGB(BACKGROUND_COLOR[0],", "each item in list of door sizes.\"\"\" def __init__( self,", ") c.drawCentredString( PAGE_WIDTH / 2, 20, 'Reminder : Any doors", "0, 0) c.drawCentredString( PAGE_WIDTH / 2, 40, f\"Page 1 of", "num_of_entries = max(num_of_doors, num_of_drawers) for i in range(0, num_of_entries): try:", "y=cursor - 4, name=\"Panel\", value=\" N/A \", height=INPUT_HEIGHT, width=(PAGE_WIDTH /", "- LEFT_MARGIN - (LEFT_MARGIN / 2) - 60, borderWidth=0, #", "build_pdf(path, name, door_list, drawer_list): doc = SimpleDocTemplate(f\"{path}/{name}-{STYLE}.pdf\") Story = [Spacer(1,", "cursor, 60, 20, fill=1) c.rect(LEFT_MARGIN + 368, cursor, 170, 20,", "available in arched doors, applied mould doors\", ) cursor -=", "cursor -= LINE_HEIGHT c.drawString(LEFT_MARGIN, cursor, f\"Panel : \") c.acroForm.textfield( x=LEFT_MARGIN", "self.string_center, 0.25 * self.height, self.dr_size ) # Drawer if self.dwr_qty", ") def build_pdf(path, name, door_list, drawer_list): doc = SimpleDocTemplate(f\"{path}/{name}-{STYLE}.pdf\") Story", "cursor, ) cursor -= LINE_HEIGHT c.drawString(LEFT_MARGIN, cursor, f\"Inside Profile :", "f\"Wood Type : {SPECIES}\") c.line( (PAGE_WIDTH / 2) + (LEFT_MARGIN", "Date : {job.order_date}\", ) cursor -= LINE_HEIGHT c.drawString(LEFT_MARGIN, cursor, f\"PO", "/ 2 ) self.canv.drawCentredString( self.string_center, 0.25 * self.height, self.dwr_size )", "Fronts : \") c.acroForm.textfield( x=LEFT_MARGIN + 375, y=cursor - 4,", "p = OrderEntry( xoffset=-50, dr_qty=door_qty, dr_size=door_size, dwr_qty=drawer_qty, dwr_size=drawer_size, index=i +", "= 30 LINE_HEIGHT = 18 BACKGROUND_COLOR = (33 / 255,", "self.string_center += (self.qty_box_width / 2) + (self.size_box_width / 2) self.canv.drawCentredString(", "- 98, # borderWidth=0, # # fillColor=([1, 1, 1]), #", "N/A \", height=INPUT_HEIGHT, width=(PAGE_WIDTH / 2) - LEFT_MARGIN - (LEFT_MARGIN", "+ 78, # y=cursor - 4, # name=\"inside_profile\", # value=\"", "/ 2) + (LEFT_MARGIN / 2), cursor, PAGE_WIDTH - LEFT_MARGIN,", "BACKGROUND_COLOR[1], BACKGROUND_COLOR[2] ) c.setFillColorRGB(BACKGROUND_COLOR[0], BACKGROUND_COLOR[1], BACKGROUND_COLOR[2]) c.rect( LEFT_MARGIN, PAGE_HEIGHT -", "/ 2, 20, 'Reminder : Any doors 46\" and over", "self.dwr_qty != \"\" and self.dwr_size != \"\": self.canv.rect( self.second_column_offset +", "OrderEntry(Flowable): \"\"\"Draws table entry for each item in list of", "receive a horizontal center rail unless otherwise noted.', ) c.restoreState()", "c.drawString( (PAGE_WIDTH / 2) + (LEFT_MARGIN / 2), cursor, \"Phone", "-= LINE_HEIGHT c.drawString(LEFT_MARGIN, cursor, f\"Outside Profile : {OUTSIDE_PROFILE}\") # c.acroForm.textfield(", "unless otherwise noted.', ) c.restoreState() class OrderEntry(Flowable): \"\"\"Draws table entry", "door_list[i][\"qty\"], door_list[i][\"size\"] except IndexError: door_qty, door_size = \"\", \"\" try:", "2) - LEFT_MARGIN - (LEFT_MARGIN / 2) - 60, borderWidth=0,", "c.drawCentredString((PAGE_WIDTH / 4) + 30, cursor, \"Doors\") c.drawCentredString(((PAGE_WIDTH / 4)", "inch from reportlab.platypus import SimpleDocTemplate, Spacer from reportlab.rl_config import defaultPageSize", "def generate_order(job, path, door_style, doors=[], drawers=[]): PAGE_HEIGHT = defaultPageSize[1] PAGE_WIDTH", "PAGE_HEIGHT - 54 c.saveState() c.setFont(\"Helvetica-Bold\", 14) c.setFillColorRGB(BACKGROUND_COLOR[0], BACKGROUND_COLOR[1], BACKGROUND_COLOR[2]) c.drawCentredString((PAGE_WIDTH", "sizes.\"\"\" def __init__( self, xoffset=0, height=20, dr_qty=\"\", dr_size=\"\", dwr_qty=\"\", dwr_size=\"\",", "* self.height, self.dwr_size ) def build_pdf(path, name, door_list, drawer_list): doc", "# Door self.canv.setStrokeColorRGB(0, 0, 0) self.canv.setFillColorRGB( BACKGROUND_COLOR[0], BACKGROUND_COLOR[1], BACKGROUND_COLOR[2] )", "= door_list[i][\"qty\"], door_list[i][\"size\"] except IndexError: door_qty, door_size = \"\", \"\"", "60, 20, fill=1) c.rect(LEFT_MARGIN + 368, cursor, 170, 20, fill=1)", "2), 24, fill=1 ) c.setFillColorRGB(1, 1, 1) c.setFont(\"Helvetica-Bold\", 16) c.drawCentredString(PAGE_WIDTH", "xoffset=-50, dr_qty=door_qty, dr_size=door_size, dwr_qty=drawer_qty, dwr_size=drawer_size, index=i + 1, ) Story.append(p)", "= door_style.inside_profile OUTSIDE_PROFILE = door_style.outside_profile TOTAL_DRS = len(doors) TOTAL_DWRS =", "2), cursor, PAGE_WIDTH - LEFT_MARGIN, cursor, ) cursor -= LINE_HEIGHT", "cursor, f\"Inside Profile : {INSIDE_PROFILE}\") # c.acroForm.textfield( # x=LEFT_MARGIN +", "LEFT_MARGIN, cursor, f\"and raised bead profile mitered doors\", ) cursor", "-= 24 c.setStrokeColorRGB(0, 0, 0) c.setFillColorRGB(BACKGROUND_COLOR[0], BACKGROUND_COLOR[1], BACKGROUND_COLOR[2]) c.rect(LEFT_MARGIN +", "(LINE_HEIGHT * 0.1) SPECIES = door_style.species STYLE = door_style.name INSIDE_PROFILE", "c.rect(LEFT_MARGIN + 368, cursor, 170, 20, fill=1) c.setFont(\"Helvetica-Bold\", 12) c.setFillColorRGB(1,", "len(drawers) def myFirstPage(c, doc): cursor = CURSOR_HEIGHT c.saveState() c.setStrokeColorRGB( BACKGROUND_COLOR[0],", "2) - LEFT_MARGIN - (LEFT_MARGIN / 2) - 92, borderWidth=0,", "\"\", \"\" try: drawer_qty, drawer_size = drawer_list[i][\"qty\"], drawer_list[i][\"size\"] except IndexError:", "(LEFT_MARGIN / 2), cursor, f\" 5 PC Front: Slab:\", )", "2) - LEFT_MARGIN - (LEFT_MARGIN / 2) - 82, borderWidth=0,", "+= 115 c.drawCentredString(string_center, cursor + 5, \"Width X Height\") c.setFont(\"Helvetica\",", "self.string_center, 0.25 * self.height, self.dwr_size ) def build_pdf(path, name, door_list,", "ORDER FORM\") c.setFont(\"Helvetica\", 12) c.setFillColorRGB(0, 0, 0) c.drawString(LEFT_MARGIN, cursor, f\"Customer", "(PAGE_WIDTH / 2) + (LEFT_MARGIN / 2), cursor, f\"Outside Profile", "(PAGE_WIDTH / 2) + (LEFT_MARGIN / 2), cursor, f\" 5", "# : {job.name}-{STYLE}-{SPECIES}\") c.drawString( (PAGE_WIDTH / 2) + (LEFT_MARGIN /", "5, \"Width X Height\") c.setFont(\"Helvetica\", 9) c.setFillColorRGB(0, 0, 0) c.drawCentredString(", "center rail unless otherwise noted.', ) c.restoreState() class OrderEntry(Flowable): \"\"\"Draws", "relative=True, ) c.setFont(\"Helvetica\", 12) cursor -= LINE_HEIGHT c.drawString(LEFT_MARGIN, cursor, f\"Boring", "# name=\"inside_profile\", # value=\" N/A \", # height=INPUT_HEIGHT, # width=(PAGE_WIDTH", "__init__( self, xoffset=0, height=20, dr_qty=\"\", dr_size=\"\", dwr_qty=\"\", dwr_size=\"\", index=0, ):", "1]), relative=True, ) cursor -= LINE_HEIGHT c.drawString(LEFT_MARGIN, cursor, f\"Add Hinges", "c.acroForm.textfield( x=LEFT_MARGIN + 350, y=cursor - 4, name=\"5_pc_front\", value=\" N/A", "+ 30, cursor, \"Doors\") c.drawCentredString(((PAGE_WIDTH / 4) * 3) +", "TOTAL_DRS = len(doors) TOTAL_DWRS = len(drawers) def myFirstPage(c, doc): cursor", "2) + (LEFT_MARGIN / 2), cursor, f\" 5 PC Front:", "-= LINE_HEIGHT c.setFont(\"Helvetica-Bold\", 12) c.drawString(LEFT_MARGIN, cursor, f\"Door Style : {STYLE}\")", "not available in arched doors, applied mould doors\", ) cursor", "with 2\" stiles' ) cursor -= 20 c.setFont(\"Helvetica-Bold\", 14) c.setFillColorRGB(BACKGROUND_COLOR[0],", ": {STYLE}\") c.setFont(\"Helvetica\", 12) c.drawString( (PAGE_WIDTH / 2) + (LEFT_MARGIN", "x=LEFT_MARGIN + 430, y=cursor - 4, name=\"slab_front\", value=\" N/A \",", "- 4, name=\"stiles_rails\", value=\" N/A \", height=INPUT_HEIGHT, width=(PAGE_WIDTH / 2)", "c.setFillColorRGB(1, 1, 1) string_center = LEFT_MARGIN + 68 c.drawCentredString(string_center, cursor", "1) string_center = LEFT_MARGIN + 68 c.drawCentredString(string_center, cursor + 5,", "drawer_list): doc = SimpleDocTemplate(f\"{path}/{name}-{STYLE}.pdf\") Story = [Spacer(1, 3.11 * inch)]", "doc = SimpleDocTemplate(f\"{path}/{name}-{STYLE}.pdf\") Story = [Spacer(1, 3.11 * inch)] num_of_doors", "2) - 108, # borderWidth=0, # # fillColor=([1, 1, 1]),", "c.acroForm.textfield( x=LEFT_MARGIN + 40, y=cursor - 4, name=\"Panel\", value=\" N/A", "c.rect(LEFT_MARGIN + 38, cursor, 60, 20, fill=1) c.rect(LEFT_MARGIN + 98,", "door_size = \"\", \"\" try: drawer_qty, drawer_size = drawer_list[i][\"qty\"], drawer_list[i][\"size\"]", "# fillColor=([1, 1, 1]), relative=True, ) c.setFont(\"Helvetica\", 12) cursor -=", "(LEFT_MARGIN / 2), cursor, \"Phone : 901-853-7568\" ) cursor -=", "borderWidth=0, # fillColor=([1, 1, 1]), relative=True, ) cursor -= 12", "relative=True, ) cursor -= 12 c.setFont(\"Times-Italic\", 10) c.drawString( LEFT_MARGIN, cursor,", "c.setFont(\"Times-BoldItalic\", 12) c.drawString( LEFT_MARGIN, cursor, f'Cullman will not bore any", "2), cursor, f\"Order Date : {job.order_date}\", ) cursor -= LINE_HEIGHT", "-= LINE_HEIGHT c.drawString(LEFT_MARGIN, cursor, f\"Stile/Rails : \") c.acroForm.textfield( x=LEFT_MARGIN +", "0) c.drawCentredString( PAGE_WIDTH / 2, 40, f\"Page {doc.page} of {job.name}-{STYLE}-{SPECIES}\"", "horizontal center rail unless otherwise noted.', ) c.restoreState() class OrderEntry(Flowable):", "will automatically receive a horizontal center rail unless otherwise noted.',", "rail unless otherwise noted.', ) c.restoreState() def myLaterPages(c, doc): cursor", "/ 2) - 92, borderWidth=0, # fillColor=([1, 1, 1]), relative=True,", "+= (self.idx_box_width / 2) + (self.qty_box_width / 2) self.canv.drawCentredString( self.string_center,", "270 def draw(self): # Door self.canv.setStrokeColorRGB(0, 0, 0) self.canv.setFillColorRGB( BACKGROUND_COLOR[0],", "relative=True, ) c.setFont(\"Helvetica-Bold\", 12) c.drawString((PAGE_WIDTH / 2) + (LEFT_MARGIN /", "y=cursor - 4, name=\"slab_front\", value=\" N/A \", height=INPUT_HEIGHT, width=30, borderWidth=0,", "0.25 * self.height, str(self.index) ) self.canv.setFillColorRGB(0, 0, 0) self.canv.rect(self.qty_box_x, 0,", "= index self.height = height self.idx_box_x = xoffset self.idx_box_width =", "/ 2), cursor, f\"Drawer Fronts : \") c.acroForm.textfield( x=LEFT_MARGIN +", "= 60 self.size_box_x = self.qty_box_width - 10 self.size_box_width = 170", "c.setFillColorRGB(BACKGROUND_COLOR[0], BACKGROUND_COLOR[1], BACKGROUND_COLOR[2]) c.rect( LEFT_MARGIN, PAGE_HEIGHT - 40, PAGE_WIDTH -", "LEFT_MARGIN, cursor, f'Cullman will not bore any door with 2\"", "0, 0) self.canv.setFillColorRGB( BACKGROUND_COLOR[0], BACKGROUND_COLOR[1], BACKGROUND_COLOR[2] ) self.canv.rect(self.idx_box_x, 0, self.idx_box_width,", "borderWidth=0, # # fillColor=([1, 1, 1]), # relative=True, # )", "78, # y=cursor - 4, # name=\"inside_profile\", # value=\" N/A", "= dwr_qty self.dwr_size = dwr_size self.index = index self.height =", "2) self.canv.drawCentredString( self.string_center, 0.25 * self.height, self.dr_size ) # Drawer", "3) + 10, cursor, f\"Total Drawer Fronts: {TOTAL_DWRS}\" ) cursor", "fill=1) c.rect(LEFT_MARGIN + 98, cursor, 170, 20, fill=1) c.rect(LEFT_MARGIN +", "automatically receive a horizontal center rail unless otherwise noted.', )", "BACKGROUND_COLOR[0], BACKGROUND_COLOR[1], BACKGROUND_COLOR[2] ) c.setFillColorRGB(BACKGROUND_COLOR[0], BACKGROUND_COLOR[1], BACKGROUND_COLOR[2]) c.rect( LEFT_MARGIN, PAGE_HEIGHT", "62, y=cursor - 4, name=\"stiles_rails\", value=\" N/A \", height=INPUT_HEIGHT, width=(PAGE_WIDTH", "doors\", ) cursor -= 10 c.drawString( LEFT_MARGIN, cursor, f\"and raised", "LEFT_MARGIN - (LEFT_MARGIN / 2) - 60, borderWidth=0, # fillColor=([1,", "12) c.drawString( LEFT_MARGIN, cursor, f'Cullman will not bore any door", "borderWidth=0, # fillColor=([1, 1, 1]), relative=True, ) c.setFont(\"Helvetica\", 12) cursor", "cursor, f\"Outside Profile : \" ) c.acroForm.textfield( x=LEFT_MARGIN + 370,", "f\"and raised bead profile mitered doors\", ) cursor -= 14", "Height\") c.setFont(\"Helvetica\", 9) c.setFillColorRGB(0, 0, 0) c.drawCentredString( PAGE_WIDTH / 2,", "46\" and over in height will automatically receive a horizontal", "cursor -= 20 c.setFont(\"Helvetica-Bold\", 14) c.setFillColorRGB(BACKGROUND_COLOR[0], BACKGROUND_COLOR[1], BACKGROUND_COLOR[2]) c.drawCentredString((PAGE_WIDTH /", "1, 1) string_center = LEFT_MARGIN + 68 c.drawCentredString(string_center, cursor +", "noted.', ) c.restoreState() def myLaterPages(c, doc): cursor = PAGE_HEIGHT -", "fillColor=([1, 1, 1]), relative=True, ) c.setFont(\"Helvetica-Bold\", 12) c.drawString((PAGE_WIDTH / 2)", "c.setFillColorRGB(0, 0, 0) c.drawCentredString( PAGE_WIDTH / 2, 40, f\"Page 1", "doors\", ) cursor -= 14 c.setFont(\"Times-BoldItalic\", 12) c.drawString( LEFT_MARGIN, cursor,", "- 10 self.size_box_width = 170 self.second_column_offset = 270 def draw(self):", "from reportlab.rl_config import defaultPageSize from reportlab.lib.units import inch from reportlab.platypus.flowables", "- 82, borderWidth=0, # fillColor=([1, 1, 1]), relative=True, ) c.setFont(\"Helvetica-Bold\",", "= LINE_HEIGHT - (LINE_HEIGHT * 0.1) SPECIES = door_style.species STYLE", "5, \"Width X Height\") string_center += 155 c.drawCentredString(string_center, cursor +", "- (LEFT_MARGIN / 2) - 60, borderWidth=0, # fillColor=([1, 1,", "x=LEFT_MARGIN + 62, y=cursor - 4, name=\"stiles_rails\", value=\" N/A \",", "cursor -= LINE_HEIGHT c.drawString(LEFT_MARGIN, cursor, f\"Add Hinges : No\") c.drawString(", "c.setFillColorRGB(BACKGROUND_COLOR[0], BACKGROUND_COLOR[1], BACKGROUND_COLOR[2]) c.drawCentredString((PAGE_WIDTH / 4) + 30, cursor, \"Doors\")", "self.height = height self.idx_box_x = xoffset self.idx_box_width = 40 self.string_center", "2) + (LEFT_MARGIN / 2), cursor, PAGE_WIDTH - LEFT_MARGIN, cursor,", "2) self.canv.drawCentredString( self.string_center, 0.25 * self.height, self.dr_qty ) self.canv.rect(self.size_box_x, 0,", "f\"Panel : \") c.acroForm.textfield( x=LEFT_MARGIN + 40, y=cursor - 4,", "# ) c.line( (PAGE_WIDTH / 2) + (LEFT_MARGIN / 2),", "self.string_center += (self.idx_box_width / 2) + (self.qty_box_width / 2) self.canv.drawCentredString(", ") c.acroForm.textfield( x=LEFT_MARGIN + 430, y=cursor - 4, name=\"slab_front\", value=\"", "SimpleDocTemplate(f\"{path}/{name}-{STYLE}.pdf\") Story = [Spacer(1, 3.11 * inch)] num_of_doors = len(door_list)", "# relative=True, # ) c.line( (PAGE_WIDTH / 2) + (LEFT_MARGIN", "self.idx_box_width = 40 self.string_center = xoffset + (self.idx_box_width / 2)", "c.line( (PAGE_WIDTH / 2) + (LEFT_MARGIN / 2), cursor, PAGE_WIDTH", "(LEFT_MARGIN / 2), cursor, f\"Order Date : {job.order_date}\", ) cursor", "((PAGE_WIDTH / 4) * 3) + 10, cursor, f\"Total Drawer", "self.dr_size = dr_size self.dwr_qty = dwr_qty self.dwr_size = dwr_size self.index", "+ (LEFT_MARGIN / 2), cursor, \"Comments : \") cursor -=", ": No\") c.drawString( (PAGE_WIDTH / 2) + (LEFT_MARGIN / 2),", "+ (LEFT_MARGIN / 2), cursor, f\"Order Date : {job.order_date}\", )", "reportlab.lib.units import inch from reportlab.platypus.flowables import Flowable def generate_order(job, path,", "368, cursor, 170, 20, fill=1) c.setFont(\"Helvetica-Bold\", 12) c.setFillColorRGB(1, 1, 1)", "cursor -= LINE_HEIGHT c.drawString(LEFT_MARGIN, cursor, f\"Wood Type : {SPECIES}\") c.line(", "c.setFillColorRGB(BACKGROUND_COLOR[0], BACKGROUND_COLOR[1], BACKGROUND_COLOR[2]) c.drawCentredString((PAGE_WIDTH / 4) + 30, cursor, f\"Total", ") c.restoreState() class OrderEntry(Flowable): \"\"\"Draws table entry for each item", "width=(PAGE_WIDTH / 2) - LEFT_MARGIN - (LEFT_MARGIN / 2) -", "borderWidth=0, # fillColor=([1, 1, 1]), relative=True, ) c.setFont(\"Helvetica-Bold\", 12) c.drawString((PAGE_WIDTH", "c.drawString(LEFT_MARGIN, cursor, f\"Wood Type : {SPECIES}\") c.line( (PAGE_WIDTH / 2)", "= [Spacer(1, 3.11 * inch)] num_of_doors = len(door_list) num_of_drawers =", "c.drawString(LEFT_MARGIN, cursor, f\"Customer : JS Designs Shop, LLC\") c.drawString( (PAGE_WIDTH", "self.second_column_offset + self.size_box_x, 0, self.size_box_width, self.height, ) self.string_center += (self.qty_box_width", "PC Front: Slab:\", ) c.acroForm.textfield( x=LEFT_MARGIN + 350, y=cursor -", "2), cursor, f\" 5 PC Front: Slab:\", ) c.acroForm.textfield( x=LEFT_MARGIN", "PAGE_HEIGHT - 34, \"DOOR ORDER FORM\") c.setFont(\"Helvetica\", 12) c.setFillColorRGB(0, 0,", "+= 115 c.drawCentredString(string_center, cursor + 5, \"Width X Height\") string_center", "4) * 3) + 10, cursor, \"Drawer Fronts\") cursor -=", "+ 368, cursor, 170, 20, fill=1) c.setFont(\"Helvetica-Bold\", 12) c.setFillColorRGB(1, 1,", "+ 5, \"Qty\") string_center += 115 c.drawCentredString(string_center, cursor + 5,", "# # fillColor=([1, 1, 1]), # relative=True, # ) c.line(", "+ 308, cursor, 60, 20, fill=1) c.rect(LEFT_MARGIN + 368, cursor,", "BACKGROUND_COLOR = (33 / 255, 80 / 255, 156 /", "2), cursor, \"Phone : 901-853-7568\" ) cursor -= LINE_HEIGHT c.drawString(LEFT_MARGIN,", "+ 350, y=cursor - 4, name=\"5_pc_front\", value=\" N/A \", height=INPUT_HEIGHT,", "1, 1]), relative=True, ) c.drawString((PAGE_WIDTH / 2) + (LEFT_MARGIN /", "string_center = LEFT_MARGIN + 68 c.drawCentredString(string_center, cursor + 5, \"Qty\")", "cursor, \"Delivery Date : ASAP\" ) cursor -= LINE_HEIGHT c.setFont(\"Helvetica-Bold\",", ": {job.name}-{STYLE}-{SPECIES}\") c.drawString( (PAGE_WIDTH / 2) + (LEFT_MARGIN / 2),", "For Hinges : No\") c.drawString( (PAGE_WIDTH / 2) + (LEFT_MARGIN", "c.drawString( (PAGE_WIDTH / 2) + (LEFT_MARGIN / 2), cursor, f\"Outside", "entry for each item in list of door sizes.\"\"\" def", "c.rect( LEFT_MARGIN, PAGE_HEIGHT - 40, PAGE_WIDTH - (LEFT_MARGIN * 2),", "# c.acroForm.textfield( # x=LEFT_MARGIN + 88, # y=cursor - 4,", "c.acroForm.textfield( x=LEFT_MARGIN + 375, y=cursor - 4, name=\"drawer_fronts\", value=\" N/A", "- (LINE_HEIGHT * 0.1) SPECIES = door_style.species STYLE = door_style.name", ") cursor -= LINE_HEIGHT c.drawString(LEFT_MARGIN, cursor, f\"Stile/Rails : \") c.acroForm.textfield(", "- 60 INPUT_HEIGHT = LINE_HEIGHT - (LINE_HEIGHT * 0.1) SPECIES", "/ 2) + (self.qty_box_width / 2) self.canv.drawCentredString( self.string_center, 0.25 *", "len(drawer_list) num_of_entries = max(num_of_doors, num_of_drawers) for i in range(0, num_of_entries):", "Story = [Spacer(1, 3.11 * inch)] num_of_doors = len(door_list) num_of_drawers", "cursor = PAGE_HEIGHT - 54 c.saveState() c.setFont(\"Helvetica-Bold\", 14) c.setFillColorRGB(BACKGROUND_COLOR[0], BACKGROUND_COLOR[1],", "Hinges : No\") c.drawString( (PAGE_WIDTH / 2) + (LEFT_MARGIN /", "c.drawString( LEFT_MARGIN, cursor, f\"and raised bead profile mitered doors\", )", "/ 2) + ( self.size_box_width / 2 ) self.canv.drawCentredString( self.string_center,", "door_qty, door_size = \"\", \"\" try: drawer_qty, drawer_size = drawer_list[i][\"qty\"],", "(PAGE_WIDTH / 2) + (LEFT_MARGIN / 2), cursor, \"Phone :", "# x=LEFT_MARGIN + 78, # y=cursor - 4, # name=\"inside_profile\",", "/ 2) - LEFT_MARGIN - (LEFT_MARGIN / 2) - 60,", "cursor, f\"Boring For Hinges : No\") c.drawString( (PAGE_WIDTH / 2)", "4, name=\"5_pc_front\", value=\" N/A \", height=INPUT_HEIGHT, width=30, borderWidth=0, # fillColor=([1,", "# x=LEFT_MARGIN + 88, # y=cursor - 4, # name=\"outside_profile\",", "value=\" N/A \", height=INPUT_HEIGHT, width=(PAGE_WIDTH / 2) - LEFT_MARGIN -", "-= LINE_HEIGHT c.drawString(LEFT_MARGIN, cursor, f\"PO # : {job.name}-{STYLE}-{SPECIES}\") c.drawString( (PAGE_WIDTH", "def __init__( self, xoffset=0, height=20, dr_qty=\"\", dr_size=\"\", dwr_qty=\"\", dwr_size=\"\", index=0,", ") cursor -= LINE_HEIGHT c.drawString(LEFT_MARGIN, cursor, f\"Panel : \") c.acroForm.textfield(", "2.0, PAGE_HEIGHT - 34, \"DOOR ORDER FORM\") c.setFont(\"Helvetica\", 12) c.setFillColorRGB(0,", "PAGE_HEIGHT = defaultPageSize[1] PAGE_WIDTH = defaultPageSize[0] LEFT_MARGIN = 30 LINE_HEIGHT", "\") cursor -= LINE_HEIGHT c.drawString(LEFT_MARGIN, cursor, f\"Wood Type : {SPECIES}\")", "f\" 5 PC Front: Slab:\", ) c.acroForm.textfield( x=LEFT_MARGIN + 350,", "self.index = index self.height = height self.idx_box_x = xoffset self.idx_box_width", "name=\"slab_front\", value=\" N/A \", height=INPUT_HEIGHT, width=30, borderWidth=0, # fillColor=([1, 1,", "x=LEFT_MARGIN + 88, # y=cursor - 4, # name=\"outside_profile\", #", "def myFirstPage(c, doc): cursor = CURSOR_HEIGHT c.saveState() c.setStrokeColorRGB( BACKGROUND_COLOR[0], BACKGROUND_COLOR[1],", "def build_pdf(path, name, door_list, drawer_list): doc = SimpleDocTemplate(f\"{path}/{name}-{STYLE}.pdf\") Story =", "2) + (self.qty_box_width / 2) self.canv.drawCentredString( self.string_center, 0.25 * self.height,", "PAGE_WIDTH / 2, 40, f\"Page {doc.page} of {job.name}-{STYLE}-{SPECIES}\" ) c.drawCentredString(", "fill=1) c.rect(LEFT_MARGIN + 368, cursor, 170, 20, fill=1) c.setFont(\"Helvetica-Bold\", 12)", ") self.string_center += (self.qty_box_width / 2) + ( self.size_box_width /", "self.canv.setFont(\"Helvetica\", 12) self.canv.drawCentredString( self.string_center, 0.25 * self.height, str(self.index) ) self.canv.setFillColorRGB(0,", "-= 14 c.setFont(\"Times-BoldItalic\", 12) c.drawString( LEFT_MARGIN, cursor, f'Cullman will not", "82, borderWidth=0, # fillColor=([1, 1, 1]), relative=True, ) c.setFont(\"Helvetica-Bold\", 12)", ") # Drawer if self.dwr_qty != \"\" and self.dwr_size !=", "4) + 30, cursor, \"Doors\") c.drawCentredString(((PAGE_WIDTH / 4) * 3)", "drawer_list[i][\"qty\"], drawer_list[i][\"size\"] except IndexError: drawer_qty, drawer_size = \"\", \"\" p", "FORM\") c.setFont(\"Helvetica\", 12) c.setFillColorRGB(0, 0, 0) c.drawString(LEFT_MARGIN, cursor, f\"Customer :", "i in range(0, num_of_entries): try: door_qty, door_size = door_list[i][\"qty\"], door_list[i][\"size\"]", "for i in range(0, num_of_entries): try: door_qty, door_size = door_list[i][\"qty\"],", "20, fill=1) c.setFont(\"Helvetica-Bold\", 12) c.setFillColorRGB(1, 1, 1) string_center = LEFT_MARGIN", "= 270 def draw(self): # Door self.canv.setStrokeColorRGB(0, 0, 0) self.canv.setFillColorRGB(", "255, 156 / 255) CURSOR_HEIGHT = PAGE_HEIGHT - 60 INPUT_HEIGHT", "16) c.drawCentredString(PAGE_WIDTH / 2.0, PAGE_HEIGHT - 34, \"DOOR ORDER FORM\")", "c.drawString(LEFT_MARGIN, cursor, f\"Outside Profile : {OUTSIDE_PROFILE}\") # c.acroForm.textfield( # x=LEFT_MARGIN", "/ 2) - LEFT_MARGIN - (LEFT_MARGIN / 2) - 87,", "self.height, self.dwr_qty, ) self.canv.rect( self.second_column_offset + self.size_box_x, 0, self.size_box_width, self.height,", "except IndexError: drawer_qty, drawer_size = \"\", \"\" p = OrderEntry(", "(self.qty_box_width / 2) self.canv.drawCentredString( self.string_center, 0.25 * self.height, self.dr_qty )", "LEFT_MARGIN - (LEFT_MARGIN / 2) - 108, # borderWidth=0, #", "str(self.index) ) self.canv.setFillColorRGB(0, 0, 0) self.canv.rect(self.qty_box_x, 0, self.qty_box_width, self.height) self.string_center", "borderWidth=0, # fillColor=([1, 1, 1]), relative=True, ) c.drawString((PAGE_WIDTH / 2)", "= 40 self.string_center = xoffset + (self.idx_box_width / 2) self.qty_box_x", "(PAGE_WIDTH / 2) + (LEFT_MARGIN / 2), cursor, \"Delivery Date", "/ 2) - 82, borderWidth=0, # fillColor=([1, 1, 1]), relative=True,", "self.canv.rect(self.idx_box_x, 0, self.idx_box_width, self.height, fill=1) self.canv.setFillColorRGB(1, 1, 1) self.canv.setFont(\"Helvetica\", 12)", ": \") c.acroForm.textfield( x=LEFT_MARGIN + 62, y=cursor - 4, name=\"stiles_rails\",", "self.size_box_width, self.height, ) self.string_center += (self.qty_box_width / 2) + (", "= PAGE_HEIGHT - 54 c.saveState() c.setFont(\"Helvetica-Bold\", 14) c.setFillColorRGB(BACKGROUND_COLOR[0], BACKGROUND_COLOR[1], BACKGROUND_COLOR[2])", "2) - 82, borderWidth=0, # fillColor=([1, 1, 1]), relative=True, )", "0, self.size_box_width, self.height, ) self.string_center += (self.qty_box_width / 2) +", "\"\" p = OrderEntry( xoffset=-50, dr_qty=door_qty, dr_size=door_size, dwr_qty=drawer_qty, dwr_size=drawer_size, index=i", "f\"Inside Profile : {INSIDE_PROFILE}\") # c.acroForm.textfield( # x=LEFT_MARGIN + 78,", "-= 20 c.setFont(\"Helvetica-Bold\", 14) c.setFillColorRGB(BACKGROUND_COLOR[0], BACKGROUND_COLOR[1], BACKGROUND_COLOR[2]) c.drawCentredString((PAGE_WIDTH / 4)", "# height=INPUT_HEIGHT, # width=(PAGE_WIDTH / 2) - LEFT_MARGIN - (LEFT_MARGIN", "/ 2) - 60, borderWidth=0, # fillColor=([1, 1, 1]), relative=True,", "/ 4) * 3) + 10, cursor, f\"Total Drawer Fronts:", "{job.order_date}\", ) cursor -= LINE_HEIGHT c.drawString(LEFT_MARGIN, cursor, f\"PO # :", "self.size_box_width / 2 ) self.canv.drawCentredString( self.string_center, 0.25 * self.height, self.dwr_size", "import inch from reportlab.platypus import SimpleDocTemplate, Spacer from reportlab.rl_config import", "30 LINE_HEIGHT = 18 BACKGROUND_COLOR = (33 / 255, 80", "f\"Outside Profile : \" ) c.acroForm.textfield( x=LEFT_MARGIN + 370, y=cursor", "bead profile mitered doors\", ) cursor -= 14 c.setFont(\"Times-BoldItalic\", 12)", "+ 375, y=cursor - 4, name=\"drawer_fronts\", value=\" N/A \", height=INPUT_HEIGHT,", "{TOTAL_DRS}\") c.drawCentredString( ((PAGE_WIDTH / 4) * 3) + 10, cursor,", "fill=1) c.rect(LEFT_MARGIN + 308, cursor, 60, 20, fill=1) c.rect(LEFT_MARGIN +", "height will automatically receive a horizontal center rail unless otherwise", "try: door_qty, door_size = door_list[i][\"qty\"], door_list[i][\"size\"] except IndexError: door_qty, door_size", "in list of door sizes.\"\"\" def __init__( self, xoffset=0, height=20,", "height=20, dr_qty=\"\", dr_size=\"\", dwr_qty=\"\", dwr_size=\"\", index=0, ): Flowable.__init__(self) self.dr_qty =", "LINE_HEIGHT c.drawString(LEFT_MARGIN, cursor, f\"Outside Profile : {OUTSIDE_PROFILE}\") # c.acroForm.textfield( #", "LLC\") c.drawString( (PAGE_WIDTH / 2) + (LEFT_MARGIN / 2), cursor,", "98, # borderWidth=0, # # fillColor=([1, 1, 1]), # relative=True,", "BACKGROUND_COLOR[2]) c.drawCentredString((PAGE_WIDTH / 4) + 30, cursor, \"Doors\") c.drawCentredString(((PAGE_WIDTH /", "0) c.drawString(LEFT_MARGIN, cursor, f\"Customer : JS Designs Shop, LLC\") c.drawString(", "/ 255, 80 / 255, 156 / 255) CURSOR_HEIGHT =", "# fillColor=([1, 1, 1]), # relative=True, # ) c.line( (PAGE_WIDTH", "{doc.page} of {job.name}-{STYLE}-{SPECIES}\" ) c.drawCentredString( PAGE_WIDTH / 2, 20, 'Reminder", "BACKGROUND_COLOR[1], BACKGROUND_COLOR[2]) c.rect( LEFT_MARGIN, PAGE_HEIGHT - 40, PAGE_WIDTH - (LEFT_MARGIN", "4, # name=\"outside_profile\", # value=\" N/A \", # height=INPUT_HEIGHT, #", "cursor, f\" 5 PC Front: Slab:\", ) c.acroForm.textfield( x=LEFT_MARGIN +", "fillColor=([1, 1, 1]), # relative=True, # ) c.line( (PAGE_WIDTH /", "* 3) + 10, cursor, f\"Total Drawer Fronts: {TOTAL_DWRS}\" )", "self.canv.drawCentredString( self.string_center, 0.25 * self.height, self.dr_size ) # Drawer if", "applied mould doors\", ) cursor -= 10 c.drawString( LEFT_MARGIN, cursor,", "( self.size_box_width / 2 ) self.canv.drawCentredString( self.string_center, 0.25 * self.height,", "'Reminder : Any doors 46\" and over in height will", "dr_qty=\"\", dr_size=\"\", dwr_qty=\"\", dwr_size=\"\", index=0, ): Flowable.__init__(self) self.dr_qty = dr_qty", "/ 2) + (self.size_box_width / 2) self.canv.drawCentredString( self.string_center, 0.25 *" ]
[ "if __name__ == \"__main__\": bq_service = BigQueryService() bq_service.migrate_daily_bot_probabilities_table() print(\"MIGRATION SUCCESSFUL!\")", "BigQueryService if __name__ == \"__main__\": bq_service = BigQueryService() bq_service.migrate_daily_bot_probabilities_table() print(\"MIGRATION", "from app.bq_service import BigQueryService if __name__ == \"__main__\": bq_service =", "<reponame>s2t2/tweet-analyzer-py from app.bq_service import BigQueryService if __name__ == \"__main__\": bq_service", "app.bq_service import BigQueryService if __name__ == \"__main__\": bq_service = BigQueryService()", "import BigQueryService if __name__ == \"__main__\": bq_service = BigQueryService() bq_service.migrate_daily_bot_probabilities_table()" ]
[ "self.other_distribution ) def _check_input_parameters(self): super(MiniGridDoorKeyMDP, self)._check_input_parameters() assert self.size >= 3", "== MiniGridDoorKeyDirection.UP: grid[self.cur_node.Y, self.cur_node.X] = \"^\" elif self.cur_node.Dir == MiniGridDoorKeyDirection.RIGHT:", "DropObject = 4 UseObject = 5 class MiniGridDoorKeyDirection(IntEnum): \"\"\"The possible", "for sampling rewards and next states. randomize_actions : bool, optional", "> self.wall_position if self.is_goal_before else i < self.wall_position ): starting_positions.append((j,", "enum import IntEnum from colosseum.utils.random_vars import deterministic, get_dist try: from", "if self.is_goal_before else i < self.wall_position ): starting_positions.append((j, i) if", "colosseum.utils.mdps import check_distributions class MiniGridDoorKeyAction(IntEnum): \"\"\"The action available in the", "% 4 if action == MiniGridDoorKeyAction.MoveForward: if node.Dir == MiniGridDoorKeyDirection.UP:", "parameters or as a rv_continuous object. \"\"\" if type(optimal_distribution) ==", "= (node.X, node.Y + 1) if node.Dir == MiniGridDoorKeyDirection.RIGHT: next_coord", "to two. optimal_distribution : Union[Tuple, rv_continuous], optional The distribution of", "make_reward_stochastic dists = [ optimal_distribution, other_distribution, ] if dists.count(None) ==", "return { **super(MiniGridDoorKeyMDP, self).parameters, **dict( size=self.size, n_starting_states=self.n_starting_states, optimal_distribution=self.optimal_distribution, other_distribution=self.other_distribution, ),", "( MiniGridRoomsContinuous.get_positions_coords_in_room(self.size, (0, 0)) .ravel() .tolist() ) for i in", "whether the rewards are to be made stochastic. By default,", "coords.ravel(): if ( i < self.wall_position if self.is_goal_before else i", "i < self.wall_position ): starting_positions.append((j, i) if self.is_wall_horizontal else (i,", "-> NextStateSampler: # noinspection PyAttributeOutsideInit self.wall_position = self._rng.randint(self.size - 2)", "optimal_distribution[1:] ) if type(other_distribution) == tuple: other_distribution = get_dist(other_distribution[0], other_distribution[1:])", "MiniGridDoorKeyDirection.UP: next_coord = (node.X, node.Y + 1) if node.Dir ==", "(3, 5, 7) t_params[\"make_reward_stochastic\"] = (True, False) t_params[\"n_starting_states\"] = (1,", "type(optimal_distribution) == tuple: optimal_distribution = get_dist( optimal_distribution[0], optimal_distribution[1:] ) if", "0.5 if self.is_wall_horizontal: self.door_position = self._rng.randint(self.size), self.wall_position else: self.door_position =", "def possible_starting_nodes(self) -> List[MiniGridDoorKeyNode]: return self._possible_starting_nodes @cached_property def coordinates_available(self): coords", "NextStateSampler: # noinspection PyAttributeOutsideInit self.wall_position = self._rng.randint(self.size - 2) +", "of the highly rewarding state. It can be either passed", "= -1 if node.XKey == -1 and not node.IsDoorOpened: if", "the MiniGridDoorKey MDP.\"\"\" UP = 0 RIGHT = 1 DOWN", "passed as a tuple containing Beta parameters or as a", "def num_actions(self): return len(MiniGridDoorKeyAction) def _calculate_next_nodes_prms( self, node: MiniGridDoorKeyNode, action:", "Union[Tuple, rv_continuous] The distribution of the non highly rewarding states.", "False, ) for x, y, dir in starting_positions ] return", "-> List[MiniGridDoorKeyNode]: return self._possible_starting_nodes @cached_property def coordinates_available(self): coords = (", "else self.other_distribution ) def _check_input_parameters(self): super(MiniGridDoorKeyMDP, self)._check_input_parameters() assert self.size >=", "- 1) % 4 if action == MiniGridDoorKeyAction.MoveForward: if node.Dir", "\"C\" ) if self.cur_node.Dir == MiniGridDoorKeyDirection.UP: grid[self.cur_node.Y, self.cur_node.X] = \"^\"", ": int the seed used for sampling rewards and next", "By default, it is set to False. n_starting_states : int,", "highly rewarding state. It can be either passed as a", "if not is_wall_horizontal: grid[i, wall_position] = \"W_en\" else: grid[wall_position, i]", "tuple: other_distribution = get_dist(other_distribution[0], other_distribution[1:]) self.n_starting_states = n_starting_states self.size =", "the probability of an action not producing any effect on", "self.goal_position[0]] = \"G\" if self.cur_node.XKey != -1: grid[self.cur_node.YKey, self.cur_node.XKey] =", "self.cur_node.Dir == MiniGridDoorKeyDirection.LEFT: grid[self.cur_node.Y, self.cur_node.X] = \"<\" return grid[::-1, :]", "2 PickObject = 3 DropObject = 4 UseObject = 5", "super(MiniGridDoorKeyMDP, self)._check_input_parameters() assert self.size >= 3 check_distributions( [ self.optimal_distribution, self.other_distribution,", "Type, Union import numpy as np from scipy.stats import beta,", "a rv_continuous object. \"\"\" if type(optimal_distribution) == tuple: optimal_distribution =", "from copy import deepcopy from dataclasses import asdict, dataclass from", "== tuple: other_distribution = get_dist(other_distribution[0], other_distribution[1:]) self.n_starting_states = n_starting_states self.size", "IntEnum from colosseum.utils.random_vars import deterministic, get_dist try: from functools import", "the rewards are to be made stochastic. By default, it", "MiniGridDoorKeyDirection.DOWN: grid[self.cur_node.Y, self.cur_node.X] = \"v\" elif self.cur_node.Dir == MiniGridDoorKeyDirection.LEFT: grid[self.cur_node.Y,", "node.X - 1, node.Y if next_coord == self.door_position: newnode_prms[\"IsDoorOpened\"] =", "experiments to avoid immediately reaching highly rewarding states in some", "the non highly rewarding states. It can be either passed", "1, 1) else: self.optimal_distribution = deterministic(1.0) self.other_distribution = deterministic(0.0) super().__init__(", "n_starting_states : int, optional the number of states in the", "len(MiniGridDoorKeyAction) def _calculate_next_nodes_prms( self, node: MiniGridDoorKeyNode, action: int ) ->", "bool = True, lazy: float = None, make_reward_stochastic=False, n_starting_states: int", "it is set to true. lazy : float the probability", "**kwargs, ): \"\"\" Parameters ---------- seed : int the seed", "import cached_property from typing import Any, Dict, List, Tuple, Type,", "optional the number of states in the starting distribution. By", "import numpy as np from scipy.stats import beta, rv_continuous from", "= self.door_position wall_position = self.wall_position is_wall_horizontal = self.is_wall_horizontal grid =", "action == MiniGridDoorKeyAction.UseObject: if node.Dir == MiniGridDoorKeyDirection.UP: next_coord = (node.X,", "== node.XKey and node.Y == node.YKey: newnode_prms[\"XKey\"] = newnode_prms[\"YKey\"] =", "node.X - 1, node.Y if next_coord in self.coordinates_available or (", "= node.Y if action == MiniGridDoorKeyAction.UseObject: if node.Dir == MiniGridDoorKeyDirection.UP:", "1, node.Y if next_coord in self.coordinates_available or ( node.IsDoorOpened and", "try: from functools import cached_property except: from backports.cached_property import cached_property", "if action == MiniGridDoorKeyAction.UseObject: if node.Dir == MiniGridDoorKeyDirection.UP: next_coord =", "-> Dict[str, Tuple]: t_params = MDP.testing_parameters() t_params[\"size\"] = (3, 5,", "a tuple containing Beta parameters or as a rv_continuous object.", "numpy as np from scipy.stats import beta, rv_continuous from colosseum.mdps", "optimal_distribution: Union[Tuple, rv_continuous] = None, other_distribution: Union[Tuple, rv_continuous] = None,", "value to true when doing experiments to avoid immediately reaching", "can be either passed as a tuple containing Beta parameters", "if self.is_wall_horizontal: coords.remove((i, self.wall_position)) else: coords.remove((self.wall_position, i)) return tuple(coords) @property", "= node.X newnode_prms[\"YKey\"] = node.Y if action == MiniGridDoorKeyAction.UseObject: if", "grid[wall_position, i] = \"W_en\" grid[door_position[1], door_position[0]] = ( \"O\" if", ": Union[Tuple, rv_continuous] The distribution of the non highly rewarding", "def _calculate_reward_distribution( self, node: Any, action: IntEnum, next_node: Any )", "other_distribution: Union[Tuple, rv_continuous] = None, **kwargs, ): \"\"\" Parameters ----------", "( \"O\" if self.cur_node.IsDoorOpened else \"C\" ) if self.cur_node.Dir ==", "backports.cached_property import cached_property from typing import Any, Dict, List, Tuple,", "states. It can be either passed as a tuple containing", "= newnode_prms[\"YKey\"] = -1 if node.XKey == -1 and not", ".tolist() ) for i in range(self.size): if self.is_wall_horizontal: coords.remove((i, self.wall_position))", "self.wall_position is_wall_horizontal = self.is_wall_horizontal grid = np.zeros((grid_size, grid_size), dtype=str) grid[:,", "self.is_goal_before else i > self.wall_position ): goal_positions.append((j, i) if self.is_wall_horizontal", "@dataclass(frozen=True) class MiniGridDoorKeyNode: X: int Y: int Dir: MiniGridDoorKeyDirection XKey:", "non highly rewarding states. It can be either passed as", "highly rewarding states. It can be either passed as a", "self.wall_position)) else: coords.remove((self.wall_position, i)) return tuple(coords) @property def num_actions(self): return", "False) t_params[\"n_starting_states\"] = (1, 4) return t_params @staticmethod def get_node_class()", "int the seed used for sampling rewards and next states.", "state. It can be either passed as a tuple containing", "parameters(self) -> Dict[str, Any]: return { **super(MiniGridDoorKeyMDP, self).parameters, **dict( size=self.size,", "self, node: MiniGridDoorKeyNode, action: int ) -> Tuple[Tuple[dict, float], ...]:", "grid[self.cur_node.Y, self.cur_node.X] = \"^\" elif self.cur_node.Dir == MiniGridDoorKeyDirection.RIGHT: grid[self.cur_node.Y, self.cur_node.X]", "= beta(size ** 2 - 1, 1) else: self.optimal_distribution =", "the MiniGridDoorKey MDP.\"\"\" MoveForward = 0 TurnRight = 1 TurnLeft", "the seed used for sampling rewards and next states. randomize_actions", "node.Dir == MiniGridDoorKeyDirection.DOWN: next_coord = node.X, node.Y - 1 if", "float the probability of an action not producing any effect", "Tuple, Type, Union import numpy as np from scipy.stats import", "avoid immediately reaching highly rewarding states in some MDPs by", ".ravel() .tolist() ) for i in range(self.size): if self.is_wall_horizontal: coords.remove((i,", "starting_positions ] return NextStateSampler( next_states=self._possible_starting_nodes[: self.n_starting_states], probs=[1 / self.n_starting_states for", "if action == MiniGridDoorKeyAction.TurnRight: newnode_prms[\"Dir\"] = (node.Dir + 1) %", "x, y in starting_positions for dir in MiniGridDoorKeyDirection ] assert", "coordinates_available(self): coords = ( MiniGridRoomsContinuous.get_positions_coords_in_room(self.size, (0, 0)) .ravel() .tolist() )", "node.Y if next_coord == self.door_position: newnode_prms[\"IsDoorOpened\"] = True return ((newnode_prms,", "noinspection PyAttributeOutsideInit self.wall_position = self._rng.randint(self.size - 2) + 1 #", "next_states=self._possible_starting_nodes[: self.n_starting_states], probs=[1 / self.n_starting_states for _ in range(self.n_starting_states)], seed=self._next_seed(),", "def _check_input_parameters(self): super(MiniGridDoorKeyMDP, self)._check_input_parameters() assert self.size >= 3 check_distributions( [", "= deterministic(1.0) self.other_distribution = deterministic(0.0) super().__init__( seed=seed, randomize_actions=randomize_actions, lazy=lazy, **kwargs,", "grid[door_position[1], door_position[0]] = ( \"O\" if self.cur_node.IsDoorOpened else \"C\" )", "be either passed as a tuple containing Beta parameters or", "super().__init__( seed=seed, randomize_actions=randomize_actions, lazy=lazy, **kwargs, ) @property def parameters(self) ->", "self.wall_position if self.is_goal_before else i > self.wall_position ): goal_positions.append((j, i)", "randomize_actions : bool, optional whether the effect of the actions", "= self.size door_position = self.door_position wall_position = self.wall_position is_wall_horizontal =", "(node.Dir + 1) % 4 if action == MiniGridDoorKeyAction.TurnLeft: newnode_prms[\"Dir\"]", "repeatedly. By default, it is set to true. lazy :", "newnode_prms[\"IsDoorOpened\"] = True return ((newnode_prms, 1.0),) def _calculate_reward_distribution( self, node:", "object. \"\"\" if type(optimal_distribution) == tuple: optimal_distribution = get_dist( optimal_distribution[0],", "MiniGridDoorKeyDirection.UP: grid[self.cur_node.Y, self.cur_node.X] = \"^\" elif self.cur_node.Dir == MiniGridDoorKeyDirection.RIGHT: grid[self.cur_node.Y,", "class MiniGridDoorKeyDirection(IntEnum): \"\"\"The possible agent direction in the MiniGridDoorKey MDP.\"\"\"", "node.Y if node.Dir == MiniGridDoorKeyDirection.DOWN: next_coord = node.X, node.Y -", "node.XKey == -1 and not node.IsDoorOpened: if action == MiniGridDoorKeyAction.DropObject:", "self.wall_position else: self.door_position = self.wall_position, self._rng.randint(self.size) self.is_goal_before = self._rng.rand() >", "LEFT = 3 @dataclass(frozen=True) class MiniGridDoorKeyNode: X: int Y: int", "distribution of the non highly rewarding states. It can be", "if dists.count(None) == 0: self.optimal_distribution = optimal_distribution self.other_distribution = other_distribution", "self._rng.shuffle(starting_positions) self.start_key_position = starting_positions.pop(0) starting_positions = [ (x, y, dir)", "get_dist(other_distribution[0], other_distribution[1:]) self.n_starting_states = n_starting_states self.size = size self.make_reward_stochastic =", "tuple containing Beta parameters or as a rv_continuous object. \"\"\"", "Dict[str, Tuple]: t_params = MDP.testing_parameters() t_params[\"size\"] = (3, 5, 7)", "import IntEnum from colosseum.utils.random_vars import deterministic, get_dist try: from functools", "YKey: int IsDoorOpened: bool def __str__(self): return f\"X={self.X},Y={self.Y},Dir={MiniGridDoorKeyDirection(self.Dir).name},XKey={self.XKey},YKey={self.YKey},IsDoorOpened{self.IsDoorOpened}\" class MiniGridDoorKeyMDP(MDP):", "the number of states in the starting distribution. By default,", "newnode_prms[\"XKey\"] = newnode_prms[\"YKey\"] = -1 if node.XKey == -1 and", "the same action repeatedly. By default, it is set to", "int the size of the grid. make_reward_stochastic : bool, optional", "in starting_positions for dir in MiniGridDoorKeyDirection ] assert self.n_starting_states <", "= np.zeros((grid_size, grid_size), dtype=str) grid[:, :] = \" \" grid[self.goal_position[1],", "4 if action == MiniGridDoorKeyAction.TurnLeft: newnode_prms[\"Dir\"] = (node.Dir - 1)", "action repeatedly. By default, it is set to true. lazy", "else: self.door_position = self.wall_position, self._rng.randint(self.size) self.is_goal_before = self._rng.rand() > 0.5", "action == MiniGridDoorKeyAction.TurnLeft: newnode_prms[\"Dir\"] = (node.Dir - 1) % 4", "rv_continuous], optional The distribution of the highly rewarding state. It", ") def calc_grid_repr(self, node: Any) -> np.array: grid_size = self.size", "action == MiniGridDoorKeyAction.MoveForward: if node.Dir == MiniGridDoorKeyDirection.UP: next_coord = (node.X,", "Any, Dict, List, Tuple, Type, Union import numpy as np", ") -> rv_continuous: return ( self.optimal_distribution if next_node.X == self.goal_position[0]", "elif self.cur_node.Dir == MiniGridDoorKeyDirection.RIGHT: grid[self.cur_node.Y, self.cur_node.X] = \">\" elif self.cur_node.Dir", "goal_positions[0] self._rng.shuffle(starting_positions) self.start_key_position = starting_positions.pop(0) starting_positions = [ (x, y,", "i)) return tuple(coords) @property def num_actions(self): return len(MiniGridDoorKeyAction) def _calculate_next_nodes_prms(", "MiniGridDoorKeyDirection.DOWN: next_coord = node.X, node.Y - 1 if node.Dir ==", "-> Tuple[Tuple[dict, float], ...]: newnode_prms = deepcopy(asdict(node)) if action ==", "( node.IsDoorOpened and next_coord == self.door_position ): newnode_prms[\"X\"], newnode_prms[\"Y\"] =", "None, make_reward_stochastic=False, n_starting_states: int = 2, optimal_distribution: Union[Tuple, rv_continuous] =", "NextStateSampler( next_states=self._possible_starting_nodes[: self.n_starting_states], probs=[1 / self.n_starting_states for _ in range(self.n_starting_states)],", "self)._check_input_parameters() assert self.size >= 3 check_distributions( [ self.optimal_distribution, self.other_distribution, ],", "= deterministic(0.0) super().__init__( seed=seed, randomize_actions=randomize_actions, lazy=lazy, **kwargs, ) @property def", "return self._possible_starting_nodes @cached_property def coordinates_available(self): coords = ( MiniGridRoomsContinuous.get_positions_coords_in_room(self.size, (0,", "**dict( size=self.size, n_starting_states=self.n_starting_states, optimal_distribution=self.optimal_distribution, other_distribution=self.other_distribution, ), } @property def possible_starting_nodes(self)", "grid[self.cur_node.Y, self.cur_node.X] = \">\" elif self.cur_node.Dir == MiniGridDoorKeyDirection.DOWN: grid[self.cur_node.Y, self.cur_node.X]", ": bool, optional checks whether the rewards are to be", "self._rng.rand() > 0.5 coords = MiniGridRoomsContinuous.get_positions_coords_in_room(self.size, (0, 0)) goal_positions =", "set this value to true when doing experiments to avoid", "+ 1) if node.Dir == MiniGridDoorKeyDirection.RIGHT: next_coord = node.X +", "= self.wall_position is_wall_horizontal = self.is_wall_horizontal grid = np.zeros((grid_size, grid_size), dtype=str)", "MiniGridDoorKeyDirection.RIGHT: grid[self.cur_node.Y, self.cur_node.X] = \">\" elif self.cur_node.Dir == MiniGridDoorKeyDirection.DOWN: grid[self.cur_node.Y,", "def testing_parameters() -> Dict[str, Tuple]: t_params = MDP.testing_parameters() t_params[\"size\"] =", "5, 7) t_params[\"make_reward_stochastic\"] = (True, False) t_params[\"n_starting_states\"] = (1, 4)", "bool, optional whether the effect of the actions changes for", "next_coord in self.coordinates_available or ( node.IsDoorOpened and next_coord == self.door_position", "rewards are to be made stochastic. By default, it is", "possible_starting_positions = deepcopy(starting_positions) self._rng.shuffle(goal_positions) self.goal_position = goal_positions[0] self._rng.shuffle(starting_positions) self.start_key_position =", "self.optimal_distribution, self.other_distribution, ], self.make_reward_stochastic, ) def _instantiate_starting_node_sampler(self) -> NextStateSampler: #", "door_position[0]] = ( \"O\" if self.cur_node.IsDoorOpened else \"C\" ) if", "0.5 coords = MiniGridRoomsContinuous.get_positions_coords_in_room(self.size, (0, 0)) goal_positions = [] starting_positions", "two. optimal_distribution : Union[Tuple, rv_continuous], optional The distribution of the", "from dataclasses import asdict, dataclass from enum import IntEnum from", "-1 if node.XKey == -1 and not node.IsDoorOpened: if action", "effect on the MDP. size : int the size of", "1 TurnLeft = 2 PickObject = 3 DropObject = 4", "\"G\" if self.cur_node.XKey != -1: grid[self.cur_node.YKey, self.cur_node.XKey] = \"K\" for", "rv_continuous object. other_distribution : Union[Tuple, rv_continuous] The distribution of the", "---------- seed : int the seed used for sampling rewards", "rewarding states in some MDPs by just selecting the same", "from enum import IntEnum from colosseum.utils.random_vars import deterministic, get_dist try:", "i > self.wall_position if self.is_goal_before else i < self.wall_position ):", "int, optional the number of states in the starting distribution.", "= self.is_wall_horizontal grid = np.zeros((grid_size, grid_size), dtype=str) grid[:, :] =", "self.other_distribution = other_distribution else: if make_reward_stochastic: self.other_distribution = beta(1, size", "starting_positions.append((j, i) if self.is_wall_horizontal else (i, j)) possible_starting_positions = deepcopy(starting_positions)", "MiniGridDoorKeyAction.TurnRight: newnode_prms[\"Dir\"] = (node.Dir + 1) % 4 if action", "node.X, node.Y - 1 if node.Dir == MiniGridDoorKeyDirection.LEFT: next_coord =", "of the actions changes for every node. It is particularly", "} @property def possible_starting_nodes(self) -> List[MiniGridDoorKeyNode]: return self._possible_starting_nodes @cached_property def", "changes for every node. It is particularly important to set", "when doing experiments to avoid immediately reaching highly rewarding states", "optional The distribution of the highly rewarding state. It can", "rv_continuous: return ( self.optimal_distribution if next_node.X == self.goal_position[0] and next_node.Y", "(i, j)) elif ( i > self.wall_position if self.is_goal_before else", "self.cur_node.Dir == MiniGridDoorKeyDirection.DOWN: grid[self.cur_node.Y, self.cur_node.X] = \"v\" elif self.cur_node.Dir ==", "1) % 4 if action == MiniGridDoorKeyAction.TurnLeft: newnode_prms[\"Dir\"] = (node.Dir", "It can be either passed as a tuple containing Beta", "int, size: int, randomize_actions: bool = True, lazy: float =", "if self.is_wall_horizontal: self.door_position = self._rng.randint(self.size), self.wall_position else: self.door_position = self.wall_position,", "is_wall_horizontal = self.is_wall_horizontal grid = np.zeros((grid_size, grid_size), dtype=str) grid[:, :]", "coords = ( MiniGridRoomsContinuous.get_positions_coords_in_room(self.size, (0, 0)) .ravel() .tolist() ) for", "stochastic. By default, it is set to False. n_starting_states :", ">= 3 check_distributions( [ self.optimal_distribution, self.other_distribution, ], self.make_reward_stochastic, ) def", "{ **super(MiniGridDoorKeyMDP, self).parameters, **dict( size=self.size, n_starting_states=self.n_starting_states, optimal_distribution=self.optimal_distribution, other_distribution=self.other_distribution, ), }", "be made stochastic. By default, it is set to False.", "newnode_prms[\"XKey\"] = node.X newnode_prms[\"YKey\"] = node.Y if action == MiniGridDoorKeyAction.UseObject:", "self.n_starting_states < len(starting_positions) self._possible_starting_nodes = [ MiniGridDoorKeyNode( x, y, dir.value,", "= \"W_en\" else: grid[wall_position, i] = \"W_en\" grid[door_position[1], door_position[0]] =", "== tuple: optimal_distribution = get_dist( optimal_distribution[0], optimal_distribution[1:] ) if type(other_distribution)", "** 2 - 1, 1) else: self.optimal_distribution = deterministic(1.0) self.other_distribution", "action not producing any effect on the MDP. size :", "> 0.5 if self.is_wall_horizontal: self.door_position = self._rng.randint(self.size), self.wall_position else: self.door_position", "else: coords.remove((self.wall_position, i)) return tuple(coords) @property def num_actions(self): return len(MiniGridDoorKeyAction)", "= node.X + 1, node.Y if node.Dir == MiniGridDoorKeyDirection.DOWN: next_coord", "y in starting_positions for dir in MiniGridDoorKeyDirection ] assert self.n_starting_states", "= \"G\" if self.cur_node.XKey != -1: grid[self.cur_node.YKey, self.cur_node.XKey] = \"K\"", "assert self.size >= 3 check_distributions( [ self.optimal_distribution, self.other_distribution, ], self.make_reward_stochastic,", "1, node.Y if node.Dir == MiniGridDoorKeyDirection.DOWN: next_coord = node.X, node.Y", "= starting_positions.pop(0) starting_positions = [ (x, y, dir) for x,", "grid[:, :] = \" \" grid[self.goal_position[1], self.goal_position[0]] = \"G\" if", "of an action not producing any effect on the MDP.", "newnode_prms[\"Dir\"] = (node.Dir + 1) % 4 if action ==", "self.size = size self.make_reward_stochastic = make_reward_stochastic dists = [ optimal_distribution,", "Dict[str, Any]: return { **super(MiniGridDoorKeyMDP, self).parameters, **dict( size=self.size, n_starting_states=self.n_starting_states, optimal_distribution=self.optimal_distribution,", "states in some MDPs by just selecting the same action", "num_actions(self): return len(MiniGridDoorKeyAction) def _calculate_next_nodes_prms( self, node: MiniGridDoorKeyNode, action: int", "\"O\" if self.cur_node.IsDoorOpened else \"C\" ) if self.cur_node.Dir == MiniGridDoorKeyDirection.UP:", "scipy.stats import beta, rv_continuous from colosseum.mdps import MDP from colosseum.mdps.base_mdp", "doing experiments to avoid immediately reaching highly rewarding states in", "np.array: grid_size = self.size door_position = self.door_position wall_position = self.wall_position", "[ self.optimal_distribution, self.other_distribution, ], self.make_reward_stochastic, ) def _instantiate_starting_node_sampler(self) -> NextStateSampler:", "grid[self.cur_node.Y, self.cur_node.X] = \"v\" elif self.cur_node.Dir == MiniGridDoorKeyDirection.LEFT: grid[self.cur_node.Y, self.cur_node.X]", "optimal_distribution = get_dist( optimal_distribution[0], optimal_distribution[1:] ) if type(other_distribution) == tuple:", "and next_node.Y == self.goal_position[1] else self.other_distribution ) def _check_input_parameters(self): super(MiniGridDoorKeyMDP,", "self._rng.rand() > 0.5 if self.is_wall_horizontal: self.door_position = self._rng.randint(self.size), self.wall_position else:", "from backports.cached_property import cached_property from typing import Any, Dict, List,", "checks whether the rewards are to be made stochastic. By", "y, dir in starting_positions ] return NextStateSampler( next_states=self._possible_starting_nodes[: self.n_starting_states], probs=[1", "sampling rewards and next states. randomize_actions : bool, optional whether", "self.door_position = self._rng.randint(self.size), self.wall_position else: self.door_position = self.wall_position, self._rng.randint(self.size) self.is_goal_before", "optimal_distribution[0], optimal_distribution[1:] ) if type(other_distribution) == tuple: other_distribution = get_dist(other_distribution[0],", "not producing any effect on the MDP. size : int", "and not node.IsDoorOpened: if action == MiniGridDoorKeyAction.DropObject: newnode_prms[\"XKey\"] = node.X", "newnode_prms[\"X\"], newnode_prms[\"Y\"] = next_coord if action == MiniGridDoorKeyAction.PickObject: if node.X", "np.zeros((grid_size, grid_size), dtype=str) grid[:, :] = \" \" grid[self.goal_position[1], self.goal_position[0]]", "probs=[1 / self.n_starting_states for _ in range(self.n_starting_states)], seed=self._next_seed(), ) def", "parameters or as a rv_continuous object. other_distribution : Union[Tuple, rv_continuous]", "None, other_distribution: Union[Tuple, rv_continuous] = None, **kwargs, ): \"\"\" Parameters", "the effect of the actions changes for every node. It", "is particularly important to set this value to true when", "i in range(self.size): if self.is_wall_horizontal: coords.remove((i, self.wall_position)) else: coords.remove((self.wall_position, i))", "possible agent direction in the MiniGridDoorKey MDP.\"\"\" UP = 0", "grid. make_reward_stochastic : bool, optional checks whether the rewards are", "__init__( self, seed: int, size: int, randomize_actions: bool = True,", "other_distribution else: if make_reward_stochastic: self.other_distribution = beta(1, size ** 2", "i in range(grid_size): if not is_wall_horizontal: grid[i, wall_position] = \"W_en\"", "self.is_wall_horizontal else (i, j)) elif ( i > self.wall_position if", "\"W_en\" grid[door_position[1], door_position[0]] = ( \"O\" if self.cur_node.IsDoorOpened else \"C\"", "np from scipy.stats import beta, rv_continuous from colosseum.mdps import MDP", "node.Y + 1) if node.Dir == MiniGridDoorKeyDirection.RIGHT: next_coord = node.X", "def calc_grid_repr(self, node: Any) -> np.array: grid_size = self.size door_position", "rewarding states. It can be either passed as a tuple", "MDP.testing_parameters() t_params[\"size\"] = (3, 5, 7) t_params[\"make_reward_stochastic\"] = (True, False)", "as np from scipy.stats import beta, rv_continuous from colosseum.mdps import", "optimal_distribution self.other_distribution = other_distribution else: if make_reward_stochastic: self.other_distribution = beta(1,", "size : int the size of the grid. make_reward_stochastic :", "node.Dir == MiniGridDoorKeyDirection.LEFT: next_coord = node.X - 1, node.Y if", "j in coords.ravel(): if ( i < self.wall_position if self.is_goal_before", "-1 and not node.IsDoorOpened: if action == MiniGridDoorKeyAction.DropObject: newnode_prms[\"XKey\"] =", "else i < self.wall_position ): starting_positions.append((j, i) if self.is_wall_horizontal else", "if action == MiniGridDoorKeyAction.DropObject: newnode_prms[\"XKey\"] = node.X newnode_prms[\"YKey\"] = node.Y", "dir.value, *self.start_key_position, False, ) for x, y, dir in starting_positions", "None, **kwargs, ): \"\"\" Parameters ---------- seed : int the", "= None, other_distribution: Union[Tuple, rv_continuous] = None, **kwargs, ): \"\"\"", "randomize_actions: bool = True, lazy: float = None, make_reward_stochastic=False, n_starting_states:", "\"W_en\" else: grid[wall_position, i] = \"W_en\" grid[door_position[1], door_position[0]] = (", "return ( self.optimal_distribution if next_node.X == self.goal_position[0] and next_node.Y ==", "grid[self.cur_node.YKey, self.cur_node.XKey] = \"K\" for i in range(grid_size): if not", "== MiniGridDoorKeyAction.DropObject: newnode_prms[\"XKey\"] = node.X newnode_prms[\"YKey\"] = node.Y if action", "len(starting_positions) self._possible_starting_nodes = [ MiniGridDoorKeyNode( x, y, dir.value, *self.start_key_position, False,", "self.coordinates_available or ( node.IsDoorOpened and next_coord == self.door_position ): newnode_prms[\"X\"],", "starting_positions.pop(0) starting_positions = [ (x, y, dir) for x, y", "UseObject = 5 class MiniGridDoorKeyDirection(IntEnum): \"\"\"The possible agent direction in", "goal_positions = [] starting_positions = [] for i, j in", "self.other_distribution, ], self.make_reward_stochastic, ) def _instantiate_starting_node_sampler(self) -> NextStateSampler: # noinspection", "self.door_position wall_position = self.wall_position is_wall_horizontal = self.is_wall_horizontal grid = np.zeros((grid_size,", "is_wall_horizontal: grid[i, wall_position] = \"W_en\" else: grid[wall_position, i] = \"W_en\"", "action: IntEnum, next_node: Any ) -> rv_continuous: return ( self.optimal_distribution", "self, seed: int, size: int, randomize_actions: bool = True, lazy:", "\"\"\" if type(optimal_distribution) == tuple: optimal_distribution = get_dist( optimal_distribution[0], optimal_distribution[1:]", "next_coord if action == MiniGridDoorKeyAction.PickObject: if node.X == node.XKey and", "default, it is set to two. optimal_distribution : Union[Tuple, rv_continuous],", "= node.X - 1, node.Y if next_coord in self.coordinates_available or", "else \"C\" ) if self.cur_node.Dir == MiniGridDoorKeyDirection.UP: grid[self.cur_node.Y, self.cur_node.X] =", "\"v\" elif self.cur_node.Dir == MiniGridDoorKeyDirection.LEFT: grid[self.cur_node.Y, self.cur_node.X] = \"<\" return", "= 0 TurnRight = 1 TurnLeft = 2 PickObject =", ") for i in range(self.size): if self.is_wall_horizontal: coords.remove((i, self.wall_position)) else:", "size: int, randomize_actions: bool = True, lazy: float = None,", "= [] starting_positions = [] for i, j in coords.ravel():", "MiniGridDoorKeyDirection.LEFT: next_coord = node.X - 1, node.Y if next_coord ==", "or as a rv_continuous object. \"\"\" if type(optimal_distribution) == tuple:", "= node.X - 1, node.Y if next_coord == self.door_position: newnode_prms[\"IsDoorOpened\"]", "containing Beta parameters or as a rv_continuous object. \"\"\" if", "== self.goal_position[1] else self.other_distribution ) def _check_input_parameters(self): super(MiniGridDoorKeyMDP, self)._check_input_parameters() assert", "self.make_reward_stochastic = make_reward_stochastic dists = [ optimal_distribution, other_distribution, ] if", "or as a rv_continuous object. other_distribution : Union[Tuple, rv_continuous] The", "[] for i, j in coords.ravel(): if ( i <", "= 2 LEFT = 3 @dataclass(frozen=True) class MiniGridDoorKeyNode: X: int", "probability of an action not producing any effect on the", "= (3, 5, 7) t_params[\"make_reward_stochastic\"] = (True, False) t_params[\"n_starting_states\"] =", "1) self.optimal_distribution = beta(size ** 2 - 1, 1) else:", "if node.Dir == MiniGridDoorKeyDirection.DOWN: next_coord = node.X, node.Y - 1", "** 2 - 1) self.optimal_distribution = beta(size ** 2 -", "_check_input_parameters(self): super(MiniGridDoorKeyMDP, self)._check_input_parameters() assert self.size >= 3 check_distributions( [ self.optimal_distribution,", "y, dir) for x, y in starting_positions for dir in", "as a rv_continuous object. \"\"\" if type(optimal_distribution) == tuple: optimal_distribution", "if self.is_wall_horizontal else (i, j)) elif ( i > self.wall_position", "if self.cur_node.IsDoorOpened else \"C\" ) if self.cur_node.Dir == MiniGridDoorKeyDirection.UP: grid[self.cur_node.Y,", "either passed as a tuple containing Beta parameters or as", "node.Y if next_coord in self.coordinates_available or ( node.IsDoorOpened and next_coord", "states. randomize_actions : bool, optional whether the effect of the", "== node.YKey: newnode_prms[\"XKey\"] = newnode_prms[\"YKey\"] = -1 if node.XKey ==", "the MDP. size : int the size of the grid.", "lazy: float = None, make_reward_stochastic=False, n_starting_states: int = 2, optimal_distribution:", "MiniGridDoorKeyNode def __init__( self, seed: int, size: int, randomize_actions: bool", "testing_parameters() -> Dict[str, Tuple]: t_params = MDP.testing_parameters() t_params[\"size\"] = (3,", "_calculate_reward_distribution( self, node: Any, action: IntEnum, next_node: Any ) ->", "colosseum.mdps.base_mdp import NextStateSampler from colosseum.mdps.minigrid_rooms.continuous.mdp import MiniGridRoomsContinuous from colosseum.utils.mdps import", "node. It is particularly important to set this value to", "else (i, j)) possible_starting_positions = deepcopy(starting_positions) self._rng.shuffle(goal_positions) self.goal_position = goal_positions[0]", "!= -1: grid[self.cur_node.YKey, self.cur_node.XKey] = \"K\" for i in range(grid_size):", "The distribution of the highly rewarding state. It can be", ") @property def parameters(self) -> Dict[str, Any]: return { **super(MiniGridDoorKeyMDP,", "elif self.cur_node.Dir == MiniGridDoorKeyDirection.DOWN: grid[self.cur_node.Y, self.cur_node.X] = \"v\" elif self.cur_node.Dir", "@property def parameters(self) -> Dict[str, Any]: return { **super(MiniGridDoorKeyMDP, self).parameters,", "of states in the starting distribution. By default, it is", "By default, it is set to two. optimal_distribution : Union[Tuple,", "def _calculate_next_nodes_prms( self, node: MiniGridDoorKeyNode, action: int ) -> Tuple[Tuple[dict,", "def get_node_class() -> Type[MiniGridDoorKeyNode]: return MiniGridDoorKeyNode def __init__( self, seed:", "rv_continuous object. \"\"\" if type(optimal_distribution) == tuple: optimal_distribution = get_dist(", "j)) possible_starting_positions = deepcopy(starting_positions) self._rng.shuffle(goal_positions) self.goal_position = goal_positions[0] self._rng.shuffle(starting_positions) self.start_key_position", "= n_starting_states self.size = size self.make_reward_stochastic = make_reward_stochastic dists =", "for dir in MiniGridDoorKeyDirection ] assert self.n_starting_states < len(starting_positions) self._possible_starting_nodes", "= size self.make_reward_stochastic = make_reward_stochastic dists = [ optimal_distribution, other_distribution,", "[ MiniGridDoorKeyNode( x, y, dir.value, *self.start_key_position, False, ) for x,", "not is_wall_horizontal: grid[i, wall_position] = \"W_en\" else: grid[wall_position, i] =", "self.goal_position = goal_positions[0] self._rng.shuffle(starting_positions) self.start_key_position = starting_positions.pop(0) starting_positions = [", "producing any effect on the MDP. size : int the", "as a tuple containing Beta parameters or as a rv_continuous", "Beta parameters or as a rv_continuous object. other_distribution : Union[Tuple,", "self.optimal_distribution = deterministic(1.0) self.other_distribution = deterministic(0.0) super().__init__( seed=seed, randomize_actions=randomize_actions, lazy=lazy,", "effect of the actions changes for every node. It is", "to be made stochastic. By default, it is set to", "other_distribution, ] if dists.count(None) == 0: self.optimal_distribution = optimal_distribution self.other_distribution", "-> Type[MiniGridDoorKeyNode]: return MiniGridDoorKeyNode def __init__( self, seed: int, size:", "other_distribution = get_dist(other_distribution[0], other_distribution[1:]) self.n_starting_states = n_starting_states self.size = size", "= 3 DropObject = 4 UseObject = 5 class MiniGridDoorKeyDirection(IntEnum):", "if action == MiniGridDoorKeyAction.PickObject: if node.X == node.XKey and node.Y", "= self._rng.rand() > 0.5 if self.is_wall_horizontal: self.door_position = self._rng.randint(self.size), self.wall_position", "reaching highly rewarding states in some MDPs by just selecting", "- 1, 1) else: self.optimal_distribution = deterministic(1.0) self.other_distribution = deterministic(0.0)", "-> rv_continuous: return ( self.optimal_distribution if next_node.X == self.goal_position[0] and", "if type(other_distribution) == tuple: other_distribution = get_dist(other_distribution[0], other_distribution[1:]) self.n_starting_states =", "self.wall_position ): starting_positions.append((j, i) if self.is_wall_horizontal else (i, j)) possible_starting_positions", "if make_reward_stochastic: self.other_distribution = beta(1, size ** 2 - 1)", "to set this value to true when doing experiments to", "randomize_actions=randomize_actions, lazy=lazy, **kwargs, ) @property def parameters(self) -> Dict[str, Any]:", "X: int Y: int Dir: MiniGridDoorKeyDirection XKey: int YKey: int", "-> Dict[str, Any]: return { **super(MiniGridDoorKeyMDP, self).parameters, **dict( size=self.size, n_starting_states=self.n_starting_states,", "= self._rng.randint(self.size - 2) + 1 # noinspection PyAttributeOutsideInit self.is_wall_horizontal", "optional checks whether the rewards are to be made stochastic.", "t_params[\"make_reward_stochastic\"] = (True, False) t_params[\"n_starting_states\"] = (1, 4) return t_params", "deterministic(0.0) super().__init__( seed=seed, randomize_actions=randomize_actions, lazy=lazy, **kwargs, ) @property def parameters(self)", "self.is_wall_horizontal: coords.remove((i, self.wall_position)) else: coords.remove((self.wall_position, i)) return tuple(coords) @property def", "immediately reaching highly rewarding states in some MDPs by just", "made stochastic. By default, it is set to False. n_starting_states", "1) if node.Dir == MiniGridDoorKeyDirection.RIGHT: next_coord = node.X + 1,", "1.0),) def _calculate_reward_distribution( self, node: Any, action: IntEnum, next_node: Any", "= \" \" grid[self.goal_position[1], self.goal_position[0]] = \"G\" if self.cur_node.XKey !=", "asdict, dataclass from enum import IntEnum from colosseum.utils.random_vars import deterministic,", "( i > self.wall_position if self.is_goal_before else i < self.wall_position", "from colosseum.utils.mdps import check_distributions class MiniGridDoorKeyAction(IntEnum): \"\"\"The action available in", "if action == MiniGridDoorKeyAction.MoveForward: if node.Dir == MiniGridDoorKeyDirection.UP: next_coord =", "# noinspection PyAttributeOutsideInit self.wall_position = self._rng.randint(self.size - 2) + 1", "grid_size), dtype=str) grid[:, :] = \" \" grid[self.goal_position[1], self.goal_position[0]] =", "= 2, optimal_distribution: Union[Tuple, rv_continuous] = None, other_distribution: Union[Tuple, rv_continuous]", "particularly important to set this value to true when doing", "default, it is set to False. n_starting_states : int, optional", "import asdict, dataclass from enum import IntEnum from colosseum.utils.random_vars import", "import Any, Dict, List, Tuple, Type, Union import numpy as", "import NextStateSampler from colosseum.mdps.minigrid_rooms.continuous.mdp import MiniGridRoomsContinuous from colosseum.utils.mdps import check_distributions", "for i, j in coords.ravel(): if ( i < self.wall_position", "- 1 if node.Dir == MiniGridDoorKeyDirection.LEFT: next_coord = node.X -", "self.cur_node.X] = \">\" elif self.cur_node.Dir == MiniGridDoorKeyDirection.DOWN: grid[self.cur_node.Y, self.cur_node.X] =", "__str__(self): return f\"X={self.X},Y={self.Y},Dir={MiniGridDoorKeyDirection(self.Dir).name},XKey={self.XKey},YKey={self.YKey},IsDoorOpened{self.IsDoorOpened}\" class MiniGridDoorKeyMDP(MDP): @staticmethod def testing_parameters() -> Dict[str,", "colosseum.utils.random_vars import deterministic, get_dist try: from functools import cached_property except:", "seed=self._next_seed(), ) def calc_grid_repr(self, node: Any) -> np.array: grid_size =", "action == MiniGridDoorKeyAction.DropObject: newnode_prms[\"XKey\"] = node.X newnode_prms[\"YKey\"] = node.Y if", "type(other_distribution) == tuple: other_distribution = get_dist(other_distribution[0], other_distribution[1:]) self.n_starting_states = n_starting_states", "== MiniGridDoorKeyDirection.RIGHT: grid[self.cur_node.Y, self.cur_node.X] = \">\" elif self.cur_node.Dir == MiniGridDoorKeyDirection.DOWN:", "for x, y in starting_positions for dir in MiniGridDoorKeyDirection ]", "Tuple]: t_params = MDP.testing_parameters() t_params[\"size\"] = (3, 5, 7) t_params[\"make_reward_stochastic\"]", "to avoid immediately reaching highly rewarding states in some MDPs", "= (node.Dir - 1) % 4 if action == MiniGridDoorKeyAction.MoveForward:", "self.optimal_distribution = beta(size ** 2 - 1, 1) else: self.optimal_distribution", "2, optimal_distribution: Union[Tuple, rv_continuous] = None, other_distribution: Union[Tuple, rv_continuous] =", "1) % 4 if action == MiniGridDoorKeyAction.MoveForward: if node.Dir ==", "from colosseum.mdps.base_mdp import NextStateSampler from colosseum.mdps.minigrid_rooms.continuous.mdp import MiniGridRoomsContinuous from colosseum.utils.mdps", "if self.is_wall_horizontal else (i, j)) possible_starting_positions = deepcopy(starting_positions) self._rng.shuffle(goal_positions) self.goal_position", "(0, 0)) goal_positions = [] starting_positions = [] for i,", "= \"^\" elif self.cur_node.Dir == MiniGridDoorKeyDirection.RIGHT: grid[self.cur_node.Y, self.cur_node.X] = \">\"", "): \"\"\" Parameters ---------- seed : int the seed used", "0)) goal_positions = [] starting_positions = [] for i, j", "5 class MiniGridDoorKeyDirection(IntEnum): \"\"\"The possible agent direction in the MiniGridDoorKey", "in range(self.size): if self.is_wall_horizontal: coords.remove((i, self.wall_position)) else: coords.remove((self.wall_position, i)) return", "deterministic, get_dist try: from functools import cached_property except: from backports.cached_property", "self.cur_node.IsDoorOpened else \"C\" ) if self.cur_node.Dir == MiniGridDoorKeyDirection.UP: grid[self.cur_node.Y, self.cur_node.X]", "MiniGridDoorKey MDP.\"\"\" MoveForward = 0 TurnRight = 1 TurnLeft =", "1) else: self.optimal_distribution = deterministic(1.0) self.other_distribution = deterministic(0.0) super().__init__( seed=seed,", "self._rng.randint(self.size - 2) + 1 # noinspection PyAttributeOutsideInit self.is_wall_horizontal =", "MoveForward = 0 TurnRight = 1 TurnLeft = 2 PickObject", "PickObject = 3 DropObject = 4 UseObject = 5 class", "in some MDPs by just selecting the same action repeatedly.", ": float the probability of an action not producing any", "from colosseum.utils.random_vars import deterministic, get_dist try: from functools import cached_property", "== MiniGridDoorKeyAction.PickObject: if node.X == node.XKey and node.Y == node.YKey:", "= 5 class MiniGridDoorKeyDirection(IntEnum): \"\"\"The possible agent direction in the", "It is particularly important to set this value to true", "Any]: return { **super(MiniGridDoorKeyMDP, self).parameters, **dict( size=self.size, n_starting_states=self.n_starting_states, optimal_distribution=self.optimal_distribution, other_distribution=self.other_distribution,", "import deepcopy from dataclasses import asdict, dataclass from enum import", "rv_continuous] The distribution of the non highly rewarding states. It", "(True, False) t_params[\"n_starting_states\"] = (1, 4) return t_params @staticmethod def", "int, randomize_actions: bool = True, lazy: float = None, make_reward_stochastic=False,", "self.start_key_position = starting_positions.pop(0) starting_positions = [ (x, y, dir) for", "seed used for sampling rewards and next states. randomize_actions :", "_calculate_next_nodes_prms( self, node: MiniGridDoorKeyNode, action: int ) -> Tuple[Tuple[dict, float],", "node.Y if action == MiniGridDoorKeyAction.UseObject: if node.Dir == MiniGridDoorKeyDirection.UP: next_coord", "i] = \"W_en\" grid[door_position[1], door_position[0]] = ( \"O\" if self.cur_node.IsDoorOpened", "this value to true when doing experiments to avoid immediately", "optimal_distribution : Union[Tuple, rv_continuous], optional The distribution of the highly", "= MiniGridRoomsContinuous.get_positions_coords_in_room(self.size, (0, 0)) goal_positions = [] starting_positions = []", "set to true. lazy : float the probability of an", "DOWN = 2 LEFT = 3 @dataclass(frozen=True) class MiniGridDoorKeyNode: X:", "MiniGridDoorKeyNode, action: int ) -> Tuple[Tuple[dict, float], ...]: newnode_prms =", "t_params = MDP.testing_parameters() t_params[\"size\"] = (3, 5, 7) t_params[\"make_reward_stochastic\"] =", "- 1, node.Y if next_coord == self.door_position: newnode_prms[\"IsDoorOpened\"] = True", "not node.IsDoorOpened: if action == MiniGridDoorKeyAction.DropObject: newnode_prms[\"XKey\"] = node.X newnode_prms[\"YKey\"]", "MiniGridDoorKeyDirection.LEFT: next_coord = node.X - 1, node.Y if next_coord in", "+ 1 # noinspection PyAttributeOutsideInit self.is_wall_horizontal = self._rng.rand() > 0.5", "tuple: optimal_distribution = get_dist( optimal_distribution[0], optimal_distribution[1:] ) if type(other_distribution) ==", "self.other_distribution = beta(1, size ** 2 - 1) self.optimal_distribution =", "MiniGridDoorKeyAction.TurnLeft: newnode_prms[\"Dir\"] = (node.Dir - 1) % 4 if action", "1, node.Y if next_coord == self.door_position: newnode_prms[\"IsDoorOpened\"] = True return", "\"K\" for i in range(grid_size): if not is_wall_horizontal: grid[i, wall_position]", "on the MDP. size : int the size of the", "By default, it is set to true. lazy : float", "grid_size = self.size door_position = self.door_position wall_position = self.wall_position is_wall_horizontal", "MiniGridRoomsContinuous.get_positions_coords_in_room(self.size, (0, 0)) goal_positions = [] starting_positions = [] for", "True return ((newnode_prms, 1.0),) def _calculate_reward_distribution( self, node: Any, action:", "( self.optimal_distribution if next_node.X == self.goal_position[0] and next_node.Y == self.goal_position[1]", "in starting_positions ] return NextStateSampler( next_states=self._possible_starting_nodes[: self.n_starting_states], probs=[1 / self.n_starting_states", "(1, 4) return t_params @staticmethod def get_node_class() -> Type[MiniGridDoorKeyNode]: return", "true. lazy : float the probability of an action not", "3 check_distributions( [ self.optimal_distribution, self.other_distribution, ], self.make_reward_stochastic, ) def _instantiate_starting_node_sampler(self)", "action available in the MiniGridDoorKey MDP.\"\"\" MoveForward = 0 TurnRight", "= get_dist( optimal_distribution[0], optimal_distribution[1:] ) if type(other_distribution) == tuple: other_distribution", "self.n_starting_states], probs=[1 / self.n_starting_states for _ in range(self.n_starting_states)], seed=self._next_seed(), )", "in range(grid_size): if not is_wall_horizontal: grid[i, wall_position] = \"W_en\" else:", "== -1 and not node.IsDoorOpened: if action == MiniGridDoorKeyAction.DropObject: newnode_prms[\"XKey\"]", ") def _check_input_parameters(self): super(MiniGridDoorKeyMDP, self)._check_input_parameters() assert self.size >= 3 check_distributions(", "7) t_params[\"make_reward_stochastic\"] = (True, False) t_params[\"n_starting_states\"] = (1, 4) return", "make_reward_stochastic: self.other_distribution = beta(1, size ** 2 - 1) self.optimal_distribution", "lazy : float the probability of an action not producing", "self.optimal_distribution if next_node.X == self.goal_position[0] and next_node.Y == self.goal_position[1] else", "self.wall_position ): goal_positions.append((j, i) if self.is_wall_horizontal else (i, j)) elif", "\" \" grid[self.goal_position[1], self.goal_position[0]] = \"G\" if self.cur_node.XKey != -1:", "self._rng.randint(self.size), self.wall_position else: self.door_position = self.wall_position, self._rng.randint(self.size) self.is_goal_before = self._rng.rand()", "check_distributions( [ self.optimal_distribution, self.other_distribution, ], self.make_reward_stochastic, ) def _instantiate_starting_node_sampler(self) ->", "dir) for x, y in starting_positions for dir in MiniGridDoorKeyDirection", "self.is_goal_before = self._rng.rand() > 0.5 coords = MiniGridRoomsContinuous.get_positions_coords_in_room(self.size, (0, 0))", "newnode_prms = deepcopy(asdict(node)) if action == MiniGridDoorKeyAction.TurnRight: newnode_prms[\"Dir\"] = (node.Dir", "if node.Dir == MiniGridDoorKeyDirection.LEFT: next_coord = node.X - 1, node.Y", "2 - 1, 1) else: self.optimal_distribution = deterministic(1.0) self.other_distribution =", "1 DOWN = 2 LEFT = 3 @dataclass(frozen=True) class MiniGridDoorKeyNode:", "node.Y - 1 if node.Dir == MiniGridDoorKeyDirection.LEFT: next_coord = node.X", "node.X + 1, node.Y if node.Dir == MiniGridDoorKeyDirection.DOWN: next_coord =", "== self.door_position ): newnode_prms[\"X\"], newnode_prms[\"Y\"] = next_coord if action ==", "= True return ((newnode_prms, 1.0),) def _calculate_reward_distribution( self, node: Any,", "beta(1, size ** 2 - 1) self.optimal_distribution = beta(size **", "range(self.size): if self.is_wall_horizontal: coords.remove((i, self.wall_position)) else: coords.remove((self.wall_position, i)) return tuple(coords)", "@staticmethod def get_node_class() -> Type[MiniGridDoorKeyNode]: return MiniGridDoorKeyNode def __init__( self,", "= self.wall_position, self._rng.randint(self.size) self.is_goal_before = self._rng.rand() > 0.5 coords =", "*self.start_key_position, False, ) for x, y, dir in starting_positions ]", "\"^\" elif self.cur_node.Dir == MiniGridDoorKeyDirection.RIGHT: grid[self.cur_node.Y, self.cur_node.X] = \">\" elif", "( i < self.wall_position if self.is_goal_before else i > self.wall_position", "Dir: MiniGridDoorKeyDirection XKey: int YKey: int IsDoorOpened: bool def __str__(self):", "class MiniGridDoorKeyMDP(MDP): @staticmethod def testing_parameters() -> Dict[str, Tuple]: t_params =", "self.goal_position[1] else self.other_distribution ) def _check_input_parameters(self): super(MiniGridDoorKeyMDP, self)._check_input_parameters() assert self.size", "_ in range(self.n_starting_states)], seed=self._next_seed(), ) def calc_grid_repr(self, node: Any) ->", "= None, **kwargs, ): \"\"\" Parameters ---------- seed : int", "are to be made stochastic. By default, it is set", "of the non highly rewarding states. It can be either", "0: self.optimal_distribution = optimal_distribution self.other_distribution = other_distribution else: if make_reward_stochastic:", "== MiniGridDoorKeyDirection.LEFT: next_coord = node.X - 1, node.Y if next_coord", "= make_reward_stochastic dists = [ optimal_distribution, other_distribution, ] if dists.count(None)", "bool def __str__(self): return f\"X={self.X},Y={self.Y},Dir={MiniGridDoorKeyDirection(self.Dir).name},XKey={self.XKey},YKey={self.YKey},IsDoorOpened{self.IsDoorOpened}\" class MiniGridDoorKeyMDP(MDP): @staticmethod def testing_parameters()", "int ) -> Tuple[Tuple[dict, float], ...]: newnode_prms = deepcopy(asdict(node)) if", "else: if make_reward_stochastic: self.other_distribution = beta(1, size ** 2 -", "MiniGridDoorKeyDirection(IntEnum): \"\"\"The possible agent direction in the MiniGridDoorKey MDP.\"\"\" UP", "dir in starting_positions ] return NextStateSampler( next_states=self._possible_starting_nodes[: self.n_starting_states], probs=[1 /", "Union import numpy as np from scipy.stats import beta, rv_continuous", "= 1 TurnLeft = 2 PickObject = 3 DropObject =", "node.XKey and node.Y == node.YKey: newnode_prms[\"XKey\"] = newnode_prms[\"YKey\"] = -1", "+ 1, node.Y if node.Dir == MiniGridDoorKeyDirection.DOWN: next_coord = node.X,", "deepcopy from dataclasses import asdict, dataclass from enum import IntEnum", "== MiniGridDoorKeyAction.UseObject: if node.Dir == MiniGridDoorKeyDirection.UP: next_coord = (node.X, node.Y", "some MDPs by just selecting the same action repeatedly. By", "just selecting the same action repeatedly. By default, it is", "else: self.optimal_distribution = deterministic(1.0) self.other_distribution = deterministic(0.0) super().__init__( seed=seed, randomize_actions=randomize_actions,", "(node.Dir - 1) % 4 if action == MiniGridDoorKeyAction.MoveForward: if", "self.n_starting_states for _ in range(self.n_starting_states)], seed=self._next_seed(), ) def calc_grid_repr(self, node:", "- 1, node.Y if next_coord in self.coordinates_available or ( node.IsDoorOpened", "next_node.Y == self.goal_position[1] else self.other_distribution ) def _check_input_parameters(self): super(MiniGridDoorKeyMDP, self)._check_input_parameters()", "size ** 2 - 1) self.optimal_distribution = beta(size ** 2", "import check_distributions class MiniGridDoorKeyAction(IntEnum): \"\"\"The action available in the MiniGridDoorKey", "return ((newnode_prms, 1.0),) def _calculate_reward_distribution( self, node: Any, action: IntEnum,", "MiniGridDoorKeyAction.DropObject: newnode_prms[\"XKey\"] = node.X newnode_prms[\"YKey\"] = node.Y if action ==", "to true when doing experiments to avoid immediately reaching highly", "if self.cur_node.XKey != -1: grid[self.cur_node.YKey, self.cur_node.XKey] = \"K\" for i", "colosseum.mdps.minigrid_rooms.continuous.mdp import MiniGridRoomsContinuous from colosseum.utils.mdps import check_distributions class MiniGridDoorKeyAction(IntEnum): \"\"\"The", "- 1) self.optimal_distribution = beta(size ** 2 - 1, 1)", "return tuple(coords) @property def num_actions(self): return len(MiniGridDoorKeyAction) def _calculate_next_nodes_prms( self,", "is set to False. n_starting_states : int, optional the number", "= (node.Dir + 1) % 4 if action == MiniGridDoorKeyAction.TurnLeft:", "if self.is_goal_before else i > self.wall_position ): goal_positions.append((j, i) if", "@property def possible_starting_nodes(self) -> List[MiniGridDoorKeyNode]: return self._possible_starting_nodes @cached_property def coordinates_available(self):", "next_coord = node.X - 1, node.Y if next_coord in self.coordinates_available", "node.X == node.XKey and node.Y == node.YKey: newnode_prms[\"XKey\"] = newnode_prms[\"YKey\"]", "dir in MiniGridDoorKeyDirection ] assert self.n_starting_states < len(starting_positions) self._possible_starting_nodes =", "self.size door_position = self.door_position wall_position = self.wall_position is_wall_horizontal = self.is_wall_horizontal", "from colosseum.mdps.minigrid_rooms.continuous.mdp import MiniGridRoomsContinuous from colosseum.utils.mdps import check_distributions class MiniGridDoorKeyAction(IntEnum):", "starting distribution. By default, it is set to two. optimal_distribution", "any effect on the MDP. size : int the size", "MiniGridDoorKeyNode: X: int Y: int Dir: MiniGridDoorKeyDirection XKey: int YKey:", "optimal_distribution, other_distribution, ] if dists.count(None) == 0: self.optimal_distribution = optimal_distribution", "node.IsDoorOpened: if action == MiniGridDoorKeyAction.DropObject: newnode_prms[\"XKey\"] = node.X newnode_prms[\"YKey\"] =", "if next_coord == self.door_position: newnode_prms[\"IsDoorOpened\"] = True return ((newnode_prms, 1.0),)", "= (True, False) t_params[\"n_starting_states\"] = (1, 4) return t_params @staticmethod", "n_starting_states self.size = size self.make_reward_stochastic = make_reward_stochastic dists = [", "= self._rng.rand() > 0.5 coords = MiniGridRoomsContinuous.get_positions_coords_in_room(self.size, (0, 0)) goal_positions", "float = None, make_reward_stochastic=False, n_starting_states: int = 2, optimal_distribution: Union[Tuple,", "= next_coord if action == MiniGridDoorKeyAction.PickObject: if node.X == node.XKey", "wall_position = self.wall_position is_wall_horizontal = self.is_wall_horizontal grid = np.zeros((grid_size, grid_size),", "direction in the MiniGridDoorKey MDP.\"\"\" UP = 0 RIGHT =", "self.make_reward_stochastic, ) def _instantiate_starting_node_sampler(self) -> NextStateSampler: # noinspection PyAttributeOutsideInit self.wall_position", "next_coord == self.door_position: newnode_prms[\"IsDoorOpened\"] = True return ((newnode_prms, 1.0),) def", "i, j in coords.ravel(): if ( i < self.wall_position if", "The distribution of the non highly rewarding states. It can", ") -> Tuple[Tuple[dict, float], ...]: newnode_prms = deepcopy(asdict(node)) if action", "self.goal_position[0] and next_node.Y == self.goal_position[1] else self.other_distribution ) def _check_input_parameters(self):", "] if dists.count(None) == 0: self.optimal_distribution = optimal_distribution self.other_distribution =", "starting_positions for dir in MiniGridDoorKeyDirection ] assert self.n_starting_states < len(starting_positions)", "2 LEFT = 3 @dataclass(frozen=True) class MiniGridDoorKeyNode: X: int Y:", "...]: newnode_prms = deepcopy(asdict(node)) if action == MiniGridDoorKeyAction.TurnRight: newnode_prms[\"Dir\"] =", "dtype=str) grid[:, :] = \" \" grid[self.goal_position[1], self.goal_position[0]] = \"G\"", "node.X newnode_prms[\"YKey\"] = node.Y if action == MiniGridDoorKeyAction.UseObject: if node.Dir", "<gh_stars>0 from copy import deepcopy from dataclasses import asdict, dataclass", "set to False. n_starting_states : int, optional the number of", "if node.X == node.XKey and node.Y == node.YKey: newnode_prms[\"XKey\"] =", "states in the starting distribution. By default, it is set", "assert self.n_starting_states < len(starting_positions) self._possible_starting_nodes = [ MiniGridDoorKeyNode( x, y,", "@cached_property def coordinates_available(self): coords = ( MiniGridRoomsContinuous.get_positions_coords_in_room(self.size, (0, 0)) .ravel()", "as a rv_continuous object. other_distribution : Union[Tuple, rv_continuous] The distribution", "0 RIGHT = 1 DOWN = 2 LEFT = 3", "< self.wall_position ): starting_positions.append((j, i) if self.is_wall_horizontal else (i, j))", "n_starting_states=self.n_starting_states, optimal_distribution=self.optimal_distribution, other_distribution=self.other_distribution, ), } @property def possible_starting_nodes(self) -> List[MiniGridDoorKeyNode]:", "== self.goal_position[0] and next_node.Y == self.goal_position[1] else self.other_distribution ) def", "goal_positions.append((j, i) if self.is_wall_horizontal else (i, j)) elif ( i", "coords.remove((i, self.wall_position)) else: coords.remove((self.wall_position, i)) return tuple(coords) @property def num_actions(self):", "node.YKey: newnode_prms[\"XKey\"] = newnode_prms[\"YKey\"] = -1 if node.XKey == -1", "range(grid_size): if not is_wall_horizontal: grid[i, wall_position] = \"W_en\" else: grid[wall_position,", "copy import deepcopy from dataclasses import asdict, dataclass from enum", "if next_node.X == self.goal_position[0] and next_node.Y == self.goal_position[1] else self.other_distribution", "= \"W_en\" grid[door_position[1], door_position[0]] = ( \"O\" if self.cur_node.IsDoorOpened else", "\"\"\" Parameters ---------- seed : int the seed used for", "**kwargs, ) @property def parameters(self) -> Dict[str, Any]: return {", "/ self.n_starting_states for _ in range(self.n_starting_states)], seed=self._next_seed(), ) def calc_grid_repr(self,", "((newnode_prms, 1.0),) def _calculate_reward_distribution( self, node: Any, action: IntEnum, next_node:", "seed: int, size: int, randomize_actions: bool = True, lazy: float", "rv_continuous] = None, other_distribution: Union[Tuple, rv_continuous] = None, **kwargs, ):", "the actions changes for every node. It is particularly important", "beta(size ** 2 - 1, 1) else: self.optimal_distribution = deterministic(1.0)", "next_coord = (node.X, node.Y + 1) if node.Dir == MiniGridDoorKeyDirection.RIGHT:", ") for x, y, dir in starting_positions ] return NextStateSampler(", "return MiniGridDoorKeyNode def __init__( self, seed: int, size: int, randomize_actions:", "float], ...]: newnode_prms = deepcopy(asdict(node)) if action == MiniGridDoorKeyAction.TurnRight: newnode_prms[\"Dir\"]", "= 2 PickObject = 3 DropObject = 4 UseObject =", "= (1, 4) return t_params @staticmethod def get_node_class() -> Type[MiniGridDoorKeyNode]:", "XKey: int YKey: int IsDoorOpened: bool def __str__(self): return f\"X={self.X},Y={self.Y},Dir={MiniGridDoorKeyDirection(self.Dir).name},XKey={self.XKey},YKey={self.YKey},IsDoorOpened{self.IsDoorOpened}\"", "0 TurnRight = 1 TurnLeft = 2 PickObject = 3", "it is set to two. optimal_distribution : Union[Tuple, rv_continuous], optional", "range(self.n_starting_states)], seed=self._next_seed(), ) def calc_grid_repr(self, node: Any) -> np.array: grid_size", "node: Any, action: IntEnum, next_node: Any ) -> rv_continuous: return", "import MDP from colosseum.mdps.base_mdp import NextStateSampler from colosseum.mdps.minigrid_rooms.continuous.mdp import MiniGridRoomsContinuous", "import cached_property except: from backports.cached_property import cached_property from typing import", "t_params[\"size\"] = (3, 5, 7) t_params[\"make_reward_stochastic\"] = (True, False) t_params[\"n_starting_states\"]", "i > self.wall_position ): goal_positions.append((j, i) if self.is_wall_horizontal else (i,", "-1: grid[self.cur_node.YKey, self.cur_node.XKey] = \"K\" for i in range(grid_size): if", "is set to true. lazy : float the probability of", "node.Dir == MiniGridDoorKeyDirection.UP: next_coord = (node.X, node.Y + 1) if", "- 2) + 1 # noinspection PyAttributeOutsideInit self.is_wall_horizontal = self._rng.rand()", "optional whether the effect of the actions changes for every", "noinspection PyAttributeOutsideInit self.is_wall_horizontal = self._rng.rand() > 0.5 if self.is_wall_horizontal: self.door_position", "int Dir: MiniGridDoorKeyDirection XKey: int YKey: int IsDoorOpened: bool def", "PyAttributeOutsideInit self.wall_position = self._rng.randint(self.size - 2) + 1 # noinspection", "from functools import cached_property except: from backports.cached_property import cached_property from", "wall_position] = \"W_en\" else: grid[wall_position, i] = \"W_en\" grid[door_position[1], door_position[0]]", "def __init__( self, seed: int, size: int, randomize_actions: bool =", "# noinspection PyAttributeOutsideInit self.is_wall_horizontal = self._rng.rand() > 0.5 if self.is_wall_horizontal:", "size=self.size, n_starting_states=self.n_starting_states, optimal_distribution=self.optimal_distribution, other_distribution=self.other_distribution, ), } @property def possible_starting_nodes(self) ->", "in MiniGridDoorKeyDirection ] assert self.n_starting_states < len(starting_positions) self._possible_starting_nodes = [", "= True, lazy: float = None, make_reward_stochastic=False, n_starting_states: int =", "@staticmethod def testing_parameters() -> Dict[str, Tuple]: t_params = MDP.testing_parameters() t_params[\"size\"]", "self.is_wall_horizontal = self._rng.rand() > 0.5 if self.is_wall_horizontal: self.door_position = self._rng.randint(self.size),", "== MiniGridDoorKeyDirection.RIGHT: next_coord = node.X + 1, node.Y if node.Dir", "Union[Tuple, rv_continuous], optional The distribution of the highly rewarding state.", ") if type(other_distribution) == tuple: other_distribution = get_dist(other_distribution[0], other_distribution[1:]) self.n_starting_states", "MiniGridDoorKeyAction.MoveForward: if node.Dir == MiniGridDoorKeyDirection.UP: next_coord = (node.X, node.Y +", "< len(starting_positions) self._possible_starting_nodes = [ MiniGridDoorKeyNode( x, y, dir.value, *self.start_key_position,", "default, it is set to true. lazy : float the", "self).parameters, **dict( size=self.size, n_starting_states=self.n_starting_states, optimal_distribution=self.optimal_distribution, other_distribution=self.other_distribution, ), } @property def", "= [ MiniGridDoorKeyNode( x, y, dir.value, *self.start_key_position, False, ) for", "self.is_wall_horizontal grid = np.zeros((grid_size, grid_size), dtype=str) grid[:, :] = \"", "Dict, List, Tuple, Type, Union import numpy as np from", "possible_starting_nodes(self) -> List[MiniGridDoorKeyNode]: return self._possible_starting_nodes @cached_property def coordinates_available(self): coords =", "= [ optimal_distribution, other_distribution, ] if dists.count(None) == 0: self.optimal_distribution", "MDP from colosseum.mdps.base_mdp import NextStateSampler from colosseum.mdps.minigrid_rooms.continuous.mdp import MiniGridRoomsContinuous from", "return t_params @staticmethod def get_node_class() -> Type[MiniGridDoorKeyNode]: return MiniGridDoorKeyNode def", "1 if node.Dir == MiniGridDoorKeyDirection.LEFT: next_coord = node.X - 1,", "MiniGridDoorKeyNode( x, y, dir.value, *self.start_key_position, False, ) for x, y,", "j)) elif ( i > self.wall_position if self.is_goal_before else i", "if ( i < self.wall_position if self.is_goal_before else i >", "the size of the grid. make_reward_stochastic : bool, optional checks", "if node.Dir == MiniGridDoorKeyDirection.RIGHT: next_coord = node.X + 1, node.Y", "def _instantiate_starting_node_sampler(self) -> NextStateSampler: # noinspection PyAttributeOutsideInit self.wall_position = self._rng.randint(self.size", "= ( \"O\" if self.cur_node.IsDoorOpened else \"C\" ) if self.cur_node.Dir", "= [ (x, y, dir) for x, y in starting_positions", "int = 2, optimal_distribution: Union[Tuple, rv_continuous] = None, other_distribution: Union[Tuple,", "rv_continuous] = None, **kwargs, ): \"\"\" Parameters ---------- seed :", "4) return t_params @staticmethod def get_node_class() -> Type[MiniGridDoorKeyNode]: return MiniGridDoorKeyNode", "number of states in the starting distribution. By default, it", "], self.make_reward_stochastic, ) def _instantiate_starting_node_sampler(self) -> NextStateSampler: # noinspection PyAttributeOutsideInit", "\">\" elif self.cur_node.Dir == MiniGridDoorKeyDirection.DOWN: grid[self.cur_node.Y, self.cur_node.X] = \"v\" elif", "Union[Tuple, rv_continuous] = None, other_distribution: Union[Tuple, rv_continuous] = None, **kwargs,", "newnode_prms[\"YKey\"] = -1 if node.XKey == -1 and not node.IsDoorOpened:", "optimal_distribution=self.optimal_distribution, other_distribution=self.other_distribution, ), } @property def possible_starting_nodes(self) -> List[MiniGridDoorKeyNode]: return", "== MiniGridDoorKeyAction.TurnLeft: newnode_prms[\"Dir\"] = (node.Dir - 1) % 4 if", "MiniGridRoomsContinuous from colosseum.utils.mdps import check_distributions class MiniGridDoorKeyAction(IntEnum): \"\"\"The action available", "action == MiniGridDoorKeyAction.PickObject: if node.X == node.XKey and node.Y ==", "size self.make_reward_stochastic = make_reward_stochastic dists = [ optimal_distribution, other_distribution, ]", "action == MiniGridDoorKeyAction.TurnRight: newnode_prms[\"Dir\"] = (node.Dir + 1) % 4", "same action repeatedly. By default, it is set to true.", "MiniGridDoorKeyDirection XKey: int YKey: int IsDoorOpened: bool def __str__(self): return", "an action not producing any effect on the MDP. size", "< self.wall_position if self.is_goal_before else i > self.wall_position ): goal_positions.append((j,", "= get_dist(other_distribution[0], other_distribution[1:]) self.n_starting_states = n_starting_states self.size = size self.make_reward_stochastic", "lazy=lazy, **kwargs, ) @property def parameters(self) -> Dict[str, Any]: return", "Any) -> np.array: grid_size = self.size door_position = self.door_position wall_position", "MiniGridDoorKeyDirection ] assert self.n_starting_states < len(starting_positions) self._possible_starting_nodes = [ MiniGridDoorKeyNode(", "= goal_positions[0] self._rng.shuffle(starting_positions) self.start_key_position = starting_positions.pop(0) starting_positions = [ (x,", "4 if action == MiniGridDoorKeyAction.MoveForward: if node.Dir == MiniGridDoorKeyDirection.UP: next_coord", "= node.X, node.Y - 1 if node.Dir == MiniGridDoorKeyDirection.LEFT: next_coord", "grid[self.goal_position[1], self.goal_position[0]] = \"G\" if self.cur_node.XKey != -1: grid[self.cur_node.YKey, self.cur_node.XKey]", "0)) .ravel() .tolist() ) for i in range(self.size): if self.is_wall_horizontal:", "): goal_positions.append((j, i) if self.is_wall_horizontal else (i, j)) elif (", "x, y, dir in starting_positions ] return NextStateSampler( next_states=self._possible_starting_nodes[: self.n_starting_states],", "def __str__(self): return f\"X={self.X},Y={self.Y},Dir={MiniGridDoorKeyDirection(self.Dir).name},XKey={self.XKey},YKey={self.YKey},IsDoorOpened{self.IsDoorOpened}\" class MiniGridDoorKeyMDP(MDP): @staticmethod def testing_parameters() ->", "available in the MiniGridDoorKey MDP.\"\"\" MoveForward = 0 TurnRight =", "for x, y, dir in starting_positions ] return NextStateSampler( next_states=self._possible_starting_nodes[:", "by just selecting the same action repeatedly. By default, it", "get_node_class() -> Type[MiniGridDoorKeyNode]: return MiniGridDoorKeyNode def __init__( self, seed: int,", "action: int ) -> Tuple[Tuple[dict, float], ...]: newnode_prms = deepcopy(asdict(node))", "Parameters ---------- seed : int the seed used for sampling", "self.wall_position if self.is_goal_before else i < self.wall_position ): starting_positions.append((j, i)", "check_distributions class MiniGridDoorKeyAction(IntEnum): \"\"\"The action available in the MiniGridDoorKey MDP.\"\"\"", "List, Tuple, Type, Union import numpy as np from scipy.stats", "bool, optional checks whether the rewards are to be made", "or ( node.IsDoorOpened and next_coord == self.door_position ): newnode_prms[\"X\"], newnode_prms[\"Y\"]", "next states. randomize_actions : bool, optional whether the effect of", "next_node: Any ) -> rv_continuous: return ( self.optimal_distribution if next_node.X", "beta, rv_continuous from colosseum.mdps import MDP from colosseum.mdps.base_mdp import NextStateSampler", "self.other_distribution = deterministic(0.0) super().__init__( seed=seed, randomize_actions=randomize_actions, lazy=lazy, **kwargs, ) @property", "int Y: int Dir: MiniGridDoorKeyDirection XKey: int YKey: int IsDoorOpened:", "dists.count(None) == 0: self.optimal_distribution = optimal_distribution self.other_distribution = other_distribution else:", "self.cur_node.Dir == MiniGridDoorKeyDirection.RIGHT: grid[self.cur_node.Y, self.cur_node.X] = \">\" elif self.cur_node.Dir ==", ") def _instantiate_starting_node_sampler(self) -> NextStateSampler: # noinspection PyAttributeOutsideInit self.wall_position =", "functools import cached_property except: from backports.cached_property import cached_property from typing", "the starting distribution. By default, it is set to two.", "to False. n_starting_states : int, optional the number of states", "[ optimal_distribution, other_distribution, ] if dists.count(None) == 0: self.optimal_distribution =", "distribution. By default, it is set to two. optimal_distribution :", "Y: int Dir: MiniGridDoorKeyDirection XKey: int YKey: int IsDoorOpened: bool", "Type[MiniGridDoorKeyNode]: return MiniGridDoorKeyNode def __init__( self, seed: int, size: int,", "whether the effect of the actions changes for every node.", "MDP. size : int the size of the grid. make_reward_stochastic", "int YKey: int IsDoorOpened: bool def __str__(self): return f\"X={self.X},Y={self.Y},Dir={MiniGridDoorKeyDirection(self.Dir).name},XKey={self.XKey},YKey={self.YKey},IsDoorOpened{self.IsDoorOpened}\" class", "dists = [ optimal_distribution, other_distribution, ] if dists.count(None) == 0:", "dataclasses import asdict, dataclass from enum import IntEnum from colosseum.utils.random_vars", "to true. lazy : float the probability of an action", "**super(MiniGridDoorKeyMDP, self).parameters, **dict( size=self.size, n_starting_states=self.n_starting_states, optimal_distribution=self.optimal_distribution, other_distribution=self.other_distribution, ), } @property", "MiniGridDoorKeyAction.PickObject: if node.X == node.XKey and node.Y == node.YKey: newnode_prms[\"XKey\"]", "UP = 0 RIGHT = 1 DOWN = 2 LEFT", "(node.X, node.Y + 1) if node.Dir == MiniGridDoorKeyDirection.RIGHT: next_coord =", "2) + 1 # noinspection PyAttributeOutsideInit self.is_wall_horizontal = self._rng.rand() >", "of the grid. make_reward_stochastic : bool, optional checks whether the", "True, lazy: float = None, make_reward_stochastic=False, n_starting_states: int = 2,", "used for sampling rewards and next states. randomize_actions : bool,", "self.optimal_distribution = optimal_distribution self.other_distribution = other_distribution else: if make_reward_stochastic: self.other_distribution", "next_node.X == self.goal_position[0] and next_node.Y == self.goal_position[1] else self.other_distribution )", "deepcopy(asdict(node)) if action == MiniGridDoorKeyAction.TurnRight: newnode_prms[\"Dir\"] = (node.Dir + 1)", "self.cur_node.X] = \"^\" elif self.cur_node.Dir == MiniGridDoorKeyDirection.RIGHT: grid[self.cur_node.Y, self.cur_node.X] =", "cached_property except: from backports.cached_property import cached_property from typing import Any,", "node: MiniGridDoorKeyNode, action: int ) -> Tuple[Tuple[dict, float], ...]: newnode_prms", "is set to two. optimal_distribution : Union[Tuple, rv_continuous], optional The", "class MiniGridDoorKeyNode: X: int Y: int Dir: MiniGridDoorKeyDirection XKey: int", "= other_distribution else: if make_reward_stochastic: self.other_distribution = beta(1, size **", "== MiniGridDoorKeyAction.MoveForward: if node.Dir == MiniGridDoorKeyDirection.UP: next_coord = (node.X, node.Y", "= ( MiniGridRoomsContinuous.get_positions_coords_in_room(self.size, (0, 0)) .ravel() .tolist() ) for i", "IntEnum, next_node: Any ) -> rv_continuous: return ( self.optimal_distribution if", "agent direction in the MiniGridDoorKey MDP.\"\"\" UP = 0 RIGHT", ": bool, optional whether the effect of the actions changes", "return f\"X={self.X},Y={self.Y},Dir={MiniGridDoorKeyDirection(self.Dir).name},XKey={self.XKey},YKey={self.YKey},IsDoorOpened{self.IsDoorOpened}\" class MiniGridDoorKeyMDP(MDP): @staticmethod def testing_parameters() -> Dict[str, Tuple]:", "TurnRight = 1 TurnLeft = 2 PickObject = 3 DropObject", "self.cur_node.Dir == MiniGridDoorKeyDirection.UP: grid[self.cur_node.Y, self.cur_node.X] = \"^\" elif self.cur_node.Dir ==", "deterministic(1.0) self.other_distribution = deterministic(0.0) super().__init__( seed=seed, randomize_actions=randomize_actions, lazy=lazy, **kwargs, )", "i) if self.is_wall_horizontal else (i, j)) possible_starting_positions = deepcopy(starting_positions) self._rng.shuffle(goal_positions)", "PyAttributeOutsideInit self.is_wall_horizontal = self._rng.rand() > 0.5 if self.is_wall_horizontal: self.door_position =", "n_starting_states: int = 2, optimal_distribution: Union[Tuple, rv_continuous] = None, other_distribution:", "node.Y == node.YKey: newnode_prms[\"XKey\"] = newnode_prms[\"YKey\"] = -1 if node.XKey", "= optimal_distribution self.other_distribution = other_distribution else: if make_reward_stochastic: self.other_distribution =", "tuple(coords) @property def num_actions(self): return len(MiniGridDoorKeyAction) def _calculate_next_nodes_prms( self, node:", "starting_positions = [] for i, j in coords.ravel(): if (", "= 1 DOWN = 2 LEFT = 3 @dataclass(frozen=True) class", "self.n_starting_states = n_starting_states self.size = size self.make_reward_stochastic = make_reward_stochastic dists", "rv_continuous from colosseum.mdps import MDP from colosseum.mdps.base_mdp import NextStateSampler from", ": int the size of the grid. make_reward_stochastic : bool,", "node: Any) -> np.array: grid_size = self.size door_position = self.door_position", "except: from backports.cached_property import cached_property from typing import Any, Dict,", "MiniGridDoorKeyDirection.RIGHT: next_coord = node.X + 1, node.Y if node.Dir ==", "self, node: Any, action: IntEnum, next_node: Any ) -> rv_continuous:", "the grid. make_reward_stochastic : bool, optional checks whether the rewards", "Tuple[Tuple[dict, float], ...]: newnode_prms = deepcopy(asdict(node)) if action == MiniGridDoorKeyAction.TurnRight:", "\" grid[self.goal_position[1], self.goal_position[0]] = \"G\" if self.cur_node.XKey != -1: grid[self.cur_node.YKey,", "t_params @staticmethod def get_node_class() -> Type[MiniGridDoorKeyNode]: return MiniGridDoorKeyNode def __init__(", "seed : int the seed used for sampling rewards and", "in the MiniGridDoorKey MDP.\"\"\" MoveForward = 0 TurnRight = 1", "if type(optimal_distribution) == tuple: optimal_distribution = get_dist( optimal_distribution[0], optimal_distribution[1:] )", "get_dist( optimal_distribution[0], optimal_distribution[1:] ) if type(other_distribution) == tuple: other_distribution =", "starting_positions = [ (x, y, dir) for x, y in", "= 4 UseObject = 5 class MiniGridDoorKeyDirection(IntEnum): \"\"\"The possible agent", "class MiniGridDoorKeyAction(IntEnum): \"\"\"The action available in the MiniGridDoorKey MDP.\"\"\" MoveForward", "distribution of the highly rewarding state. It can be either", "y, dir.value, *self.start_key_position, False, ) for x, y, dir in", "from typing import Any, Dict, List, Tuple, Type, Union import", ": Union[Tuple, rv_continuous], optional The distribution of the highly rewarding", "coords.remove((self.wall_position, i)) return tuple(coords) @property def num_actions(self): return len(MiniGridDoorKeyAction) def", "% 4 if action == MiniGridDoorKeyAction.TurnLeft: newnode_prms[\"Dir\"] = (node.Dir -", "> 0.5 coords = MiniGridRoomsContinuous.get_positions_coords_in_room(self.size, (0, 0)) goal_positions = []", "self._possible_starting_nodes = [ MiniGridDoorKeyNode( x, y, dir.value, *self.start_key_position, False, )", "import MiniGridRoomsContinuous from colosseum.utils.mdps import check_distributions class MiniGridDoorKeyAction(IntEnum): \"\"\"The action", "next_coord = node.X - 1, node.Y if next_coord == self.door_position:", "self.is_goal_before else i < self.wall_position ): starting_positions.append((j, i) if self.is_wall_horizontal", "make_reward_stochastic : bool, optional checks whether the rewards are to", "if action == MiniGridDoorKeyAction.TurnLeft: newnode_prms[\"Dir\"] = (node.Dir - 1) %", "typing import Any, Dict, List, Tuple, Type, Union import numpy", "in self.coordinates_available or ( node.IsDoorOpened and next_coord == self.door_position ):", "== 0: self.optimal_distribution = optimal_distribution self.other_distribution = other_distribution else: if", "= \"K\" for i in range(grid_size): if not is_wall_horizontal: grid[i,", "it is set to False. n_starting_states : int, optional the", "4 UseObject = 5 class MiniGridDoorKeyDirection(IntEnum): \"\"\"The possible agent direction", "= deepcopy(starting_positions) self._rng.shuffle(goal_positions) self.goal_position = goal_positions[0] self._rng.shuffle(starting_positions) self.start_key_position = starting_positions.pop(0)", "in the MiniGridDoorKey MDP.\"\"\" UP = 0 RIGHT = 1", ": int, optional the number of states in the starting", "= beta(1, size ** 2 - 1) self.optimal_distribution = beta(size", "self.is_wall_horizontal: self.door_position = self._rng.randint(self.size), self.wall_position else: self.door_position = self.wall_position, self._rng.randint(self.size)", "): newnode_prms[\"X\"], newnode_prms[\"Y\"] = next_coord if action == MiniGridDoorKeyAction.PickObject: if", "tuple containing Beta parameters or as a rv_continuous object. other_distribution", "newnode_prms[\"Y\"] = next_coord if action == MiniGridDoorKeyAction.PickObject: if node.X ==", "if self.cur_node.Dir == MiniGridDoorKeyDirection.UP: grid[self.cur_node.Y, self.cur_node.X] = \"^\" elif self.cur_node.Dir", "from colosseum.mdps import MDP from colosseum.mdps.base_mdp import NextStateSampler from colosseum.mdps.minigrid_rooms.continuous.mdp", "int IsDoorOpened: bool def __str__(self): return f\"X={self.X},Y={self.Y},Dir={MiniGridDoorKeyDirection(self.Dir).name},XKey={self.XKey},YKey={self.YKey},IsDoorOpened{self.IsDoorOpened}\" class MiniGridDoorKeyMDP(MDP): @staticmethod", "TurnLeft = 2 PickObject = 3 DropObject = 4 UseObject", "2 - 1) self.optimal_distribution = beta(size ** 2 - 1,", "MiniGridDoorKeyMDP(MDP): @staticmethod def testing_parameters() -> Dict[str, Tuple]: t_params = MDP.testing_parameters()", "and next_coord == self.door_position ): newnode_prms[\"X\"], newnode_prms[\"Y\"] = next_coord if", "> self.wall_position ): goal_positions.append((j, i) if self.is_wall_horizontal else (i, j))", "i) if self.is_wall_horizontal else (i, j)) elif ( i >", "newnode_prms[\"Dir\"] = (node.Dir - 1) % 4 if action ==", "self.size >= 3 check_distributions( [ self.optimal_distribution, self.other_distribution, ], self.make_reward_stochastic, )", "MDPs by just selecting the same action repeatedly. By default,", "important to set this value to true when doing experiments", "in coords.ravel(): if ( i < self.wall_position if self.is_goal_before else", "self._rng.randint(self.size) self.is_goal_before = self._rng.rand() > 0.5 coords = MiniGridRoomsContinuous.get_positions_coords_in_room(self.size, (0,", "import deterministic, get_dist try: from functools import cached_property except: from", "for every node. It is particularly important to set this", "Beta parameters or as a rv_continuous object. \"\"\" if type(optimal_distribution)", "import beta, rv_continuous from colosseum.mdps import MDP from colosseum.mdps.base_mdp import", "List[MiniGridDoorKeyNode]: return self._possible_starting_nodes @cached_property def coordinates_available(self): coords = ( MiniGridRoomsContinuous.get_positions_coords_in_room(self.size,", "1 # noinspection PyAttributeOutsideInit self.is_wall_horizontal = self._rng.rand() > 0.5 if", "elif ( i > self.wall_position if self.is_goal_before else i <", "MiniGridDoorKeyAction(IntEnum): \"\"\"The action available in the MiniGridDoorKey MDP.\"\"\" MoveForward =", "= [] for i, j in coords.ravel(): if ( i", "in the starting distribution. By default, it is set to", "(i, j)) possible_starting_positions = deepcopy(starting_positions) self._rng.shuffle(goal_positions) self.goal_position = goal_positions[0] self._rng.shuffle(starting_positions)", "MDP.\"\"\" MoveForward = 0 TurnRight = 1 TurnLeft = 2", "\"\"\"The action available in the MiniGridDoorKey MDP.\"\"\" MoveForward = 0", "rewards and next states. randomize_actions : bool, optional whether the", "@property def num_actions(self): return len(MiniGridDoorKeyAction) def _calculate_next_nodes_prms( self, node: MiniGridDoorKeyNode,", "= None, make_reward_stochastic=False, n_starting_states: int = 2, optimal_distribution: Union[Tuple, rv_continuous]", "== MiniGridDoorKeyDirection.UP: next_coord = (node.X, node.Y + 1) if node.Dir", "def coordinates_available(self): coords = ( MiniGridRoomsContinuous.get_positions_coords_in_room(self.size, (0, 0)) .ravel() .tolist()", "): starting_positions.append((j, i) if self.is_wall_horizontal else (i, j)) possible_starting_positions =", "3 @dataclass(frozen=True) class MiniGridDoorKeyNode: X: int Y: int Dir: MiniGridDoorKeyDirection", "== MiniGridDoorKeyDirection.DOWN: grid[self.cur_node.Y, self.cur_node.X] = \"v\" elif self.cur_node.Dir == MiniGridDoorKeyDirection.LEFT:", "== MiniGridDoorKeyAction.TurnRight: newnode_prms[\"Dir\"] = (node.Dir + 1) % 4 if", "Any ) -> rv_continuous: return ( self.optimal_distribution if next_node.X ==", "NextStateSampler from colosseum.mdps.minigrid_rooms.continuous.mdp import MiniGridRoomsContinuous from colosseum.utils.mdps import check_distributions class", "size of the grid. make_reward_stochastic : bool, optional checks whether", "newnode_prms[\"YKey\"] = node.Y if action == MiniGridDoorKeyAction.UseObject: if node.Dir ==", "for _ in range(self.n_starting_states)], seed=self._next_seed(), ) def calc_grid_repr(self, node: Any)", "(x, y, dir) for x, y in starting_positions for dir", "in range(self.n_starting_states)], seed=self._next_seed(), ) def calc_grid_repr(self, node: Any) -> np.array:", "node.IsDoorOpened and next_coord == self.door_position ): newnode_prms[\"X\"], newnode_prms[\"Y\"] = next_coord", ":] = \" \" grid[self.goal_position[1], self.goal_position[0]] = \"G\" if self.cur_node.XKey", "] return NextStateSampler( next_states=self._possible_starting_nodes[: self.n_starting_states], probs=[1 / self.n_starting_states for _", "False. n_starting_states : int, optional the number of states in", "set to two. optimal_distribution : Union[Tuple, rv_continuous], optional The distribution", "), } @property def possible_starting_nodes(self) -> List[MiniGridDoorKeyNode]: return self._possible_starting_nodes @cached_property", "\"\"\"The possible agent direction in the MiniGridDoorKey MDP.\"\"\" UP =", "f\"X={self.X},Y={self.Y},Dir={MiniGridDoorKeyDirection(self.Dir).name},XKey={self.XKey},YKey={self.YKey},IsDoorOpened{self.IsDoorOpened}\" class MiniGridDoorKeyMDP(MDP): @staticmethod def testing_parameters() -> Dict[str, Tuple]: t_params", "coords = MiniGridRoomsContinuous.get_positions_coords_in_room(self.size, (0, 0)) goal_positions = [] starting_positions =", "i < self.wall_position if self.is_goal_before else i > self.wall_position ):", "deepcopy(starting_positions) self._rng.shuffle(goal_positions) self.goal_position = goal_positions[0] self._rng.shuffle(starting_positions) self.start_key_position = starting_positions.pop(0) starting_positions", "get_dist try: from functools import cached_property except: from backports.cached_property import", "== self.door_position: newnode_prms[\"IsDoorOpened\"] = True return ((newnode_prms, 1.0),) def _calculate_reward_distribution(", "every node. It is particularly important to set this value", "rewarding state. It can be either passed as a tuple", "next_coord == self.door_position ): newnode_prms[\"X\"], newnode_prms[\"Y\"] = next_coord if action", "self.door_position: newnode_prms[\"IsDoorOpened\"] = True return ((newnode_prms, 1.0),) def _calculate_reward_distribution( self,", "t_params[\"n_starting_states\"] = (1, 4) return t_params @staticmethod def get_node_class() ->", "def parameters(self) -> Dict[str, Any]: return { **super(MiniGridDoorKeyMDP, self).parameters, **dict(", "self._rng.shuffle(goal_positions) self.goal_position = goal_positions[0] self._rng.shuffle(starting_positions) self.start_key_position = starting_positions.pop(0) starting_positions =", "MDP.\"\"\" UP = 0 RIGHT = 1 DOWN = 2", "RIGHT = 1 DOWN = 2 LEFT = 3 @dataclass(frozen=True)", "return NextStateSampler( next_states=self._possible_starting_nodes[: self.n_starting_states], probs=[1 / self.n_starting_states for _ in", "dataclass from enum import IntEnum from colosseum.utils.random_vars import deterministic, get_dist", "make_reward_stochastic=False, n_starting_states: int = 2, optimal_distribution: Union[Tuple, rv_continuous] = None,", "+ 1) % 4 if action == MiniGridDoorKeyAction.TurnLeft: newnode_prms[\"Dir\"] =", "actions changes for every node. It is particularly important to", "self.cur_node.X] = \"v\" elif self.cur_node.Dir == MiniGridDoorKeyDirection.LEFT: grid[self.cur_node.Y, self.cur_node.X] =", "self.wall_position = self._rng.randint(self.size - 2) + 1 # noinspection PyAttributeOutsideInit", "the highly rewarding state. It can be either passed as", "== MiniGridDoorKeyDirection.DOWN: next_coord = node.X, node.Y - 1 if node.Dir", "grid[i, wall_position] = \"W_en\" else: grid[wall_position, i] = \"W_en\" grid[door_position[1],", "MiniGridRoomsContinuous.get_positions_coords_in_room(self.size, (0, 0)) .ravel() .tolist() ) for i in range(self.size):", "= self._rng.randint(self.size), self.wall_position else: self.door_position = self.wall_position, self._rng.randint(self.size) self.is_goal_before =", "calc_grid_repr(self, node: Any) -> np.array: grid_size = self.size door_position =", "elif self.cur_node.Dir == MiniGridDoorKeyDirection.LEFT: grid[self.cur_node.Y, self.cur_node.X] = \"<\" return grid[::-1,", "= 3 @dataclass(frozen=True) class MiniGridDoorKeyNode: X: int Y: int Dir:", "a rv_continuous object. other_distribution : Union[Tuple, rv_continuous] The distribution of", "= deepcopy(asdict(node)) if action == MiniGridDoorKeyAction.TurnRight: newnode_prms[\"Dir\"] = (node.Dir +", "self.wall_position, self._rng.randint(self.size) self.is_goal_before = self._rng.rand() > 0.5 coords = MiniGridRoomsContinuous.get_positions_coords_in_room(self.size,", "-> np.array: grid_size = self.size door_position = self.door_position wall_position =", "MiniGridDoorKeyAction.UseObject: if node.Dir == MiniGridDoorKeyDirection.UP: next_coord = (node.X, node.Y +", "colosseum.mdps import MDP from colosseum.mdps.base_mdp import NextStateSampler from colosseum.mdps.minigrid_rooms.continuous.mdp import", "if next_coord in self.coordinates_available or ( node.IsDoorOpened and next_coord ==", "return len(MiniGridDoorKeyAction) def _calculate_next_nodes_prms( self, node: MiniGridDoorKeyNode, action: int )", "node.Dir == MiniGridDoorKeyDirection.RIGHT: next_coord = node.X + 1, node.Y if", "for i in range(self.size): if self.is_wall_horizontal: coords.remove((i, self.wall_position)) else: coords.remove((self.wall_position,", "for i in range(grid_size): if not is_wall_horizontal: grid[i, wall_position] =", "if node.Dir == MiniGridDoorKeyDirection.UP: next_coord = (node.X, node.Y + 1)", "= 0 RIGHT = 1 DOWN = 2 LEFT =", "if node.XKey == -1 and not node.IsDoorOpened: if action ==", "Any, action: IntEnum, next_node: Any ) -> rv_continuous: return (", "other_distribution[1:]) self.n_starting_states = n_starting_states self.size = size self.make_reward_stochastic = make_reward_stochastic", "self.is_wall_horizontal else (i, j)) possible_starting_positions = deepcopy(starting_positions) self._rng.shuffle(goal_positions) self.goal_position =", "next_coord = node.X + 1, node.Y if node.Dir == MiniGridDoorKeyDirection.DOWN:", "from scipy.stats import beta, rv_continuous from colosseum.mdps import MDP from", "seed=seed, randomize_actions=randomize_actions, lazy=lazy, **kwargs, ) @property def parameters(self) -> Dict[str,", "[ (x, y, dir) for x, y in starting_positions for", "true when doing experiments to avoid immediately reaching highly rewarding", "x, y, dir.value, *self.start_key_position, False, ) for x, y, dir", "cached_property from typing import Any, Dict, List, Tuple, Type, Union", "3 DropObject = 4 UseObject = 5 class MiniGridDoorKeyDirection(IntEnum): \"\"\"The", "other_distribution : Union[Tuple, rv_continuous] The distribution of the non highly", "self._possible_starting_nodes @cached_property def coordinates_available(self): coords = ( MiniGridRoomsContinuous.get_positions_coords_in_room(self.size, (0, 0))", "= \">\" elif self.cur_node.Dir == MiniGridDoorKeyDirection.DOWN: grid[self.cur_node.Y, self.cur_node.X] = \"v\"", "highly rewarding states in some MDPs by just selecting the", "] assert self.n_starting_states < len(starting_positions) self._possible_starting_nodes = [ MiniGridDoorKeyNode( x,", "(0, 0)) .ravel() .tolist() ) for i in range(self.size): if", "else (i, j)) elif ( i > self.wall_position if self.is_goal_before", "= MDP.testing_parameters() t_params[\"size\"] = (3, 5, 7) t_params[\"make_reward_stochastic\"] = (True,", "self.cur_node.XKey != -1: grid[self.cur_node.YKey, self.cur_node.XKey] = \"K\" for i in", "Union[Tuple, rv_continuous] = None, **kwargs, ): \"\"\" Parameters ---------- seed", "self.door_position = self.wall_position, self._rng.randint(self.size) self.is_goal_before = self._rng.rand() > 0.5 coords", "and next states. randomize_actions : bool, optional whether the effect", "next_coord = node.X, node.Y - 1 if node.Dir == MiniGridDoorKeyDirection.LEFT:", "selecting the same action repeatedly. By default, it is set", "[] starting_positions = [] for i, j in coords.ravel(): if", "= \"v\" elif self.cur_node.Dir == MiniGridDoorKeyDirection.LEFT: grid[self.cur_node.Y, self.cur_node.X] = \"<\"", "other_distribution=self.other_distribution, ), } @property def possible_starting_nodes(self) -> List[MiniGridDoorKeyNode]: return self._possible_starting_nodes", "object. other_distribution : Union[Tuple, rv_continuous] The distribution of the non", "grid = np.zeros((grid_size, grid_size), dtype=str) grid[:, :] = \" \"", "else: grid[wall_position, i] = \"W_en\" grid[door_position[1], door_position[0]] = ( \"O\"", "IsDoorOpened: bool def __str__(self): return f\"X={self.X},Y={self.Y},Dir={MiniGridDoorKeyDirection(self.Dir).name},XKey={self.XKey},YKey={self.YKey},IsDoorOpened{self.IsDoorOpened}\" class MiniGridDoorKeyMDP(MDP): @staticmethod def", "_instantiate_starting_node_sampler(self) -> NextStateSampler: # noinspection PyAttributeOutsideInit self.wall_position = self._rng.randint(self.size -", "self.cur_node.XKey] = \"K\" for i in range(grid_size): if not is_wall_horizontal:", "door_position = self.door_position wall_position = self.wall_position is_wall_horizontal = self.is_wall_horizontal grid", ") if self.cur_node.Dir == MiniGridDoorKeyDirection.UP: grid[self.cur_node.Y, self.cur_node.X] = \"^\" elif", "containing Beta parameters or as a rv_continuous object. other_distribution :", "MiniGridDoorKey MDP.\"\"\" UP = 0 RIGHT = 1 DOWN =", "and node.Y == node.YKey: newnode_prms[\"XKey\"] = newnode_prms[\"YKey\"] = -1 if", "self.door_position ): newnode_prms[\"X\"], newnode_prms[\"Y\"] = next_coord if action == MiniGridDoorKeyAction.PickObject:", "else i > self.wall_position ): goal_positions.append((j, i) if self.is_wall_horizontal else" ]
[ "# #print(con_admin.firebird_version) # # # this removes ALL connections -->", "# # This DOES NOT remove all attachments (only 'last'", "# # c = fdb.connect(dsn = dsn) # a =", "0: # time.sleep( ATT_DELAY ) # # c = fdb.connect(dsn", "from MON$ATTACHMENTS using ORDER BY clause doesn't close the corresponding", "Don't close attach while deleting record from MON$ATTACHMENTS using ORDER", ") # i += 1 # print('Number of attachments that", "Exception as e: # pass # #print('Got exception:', sys.exc_info()[0]) #", "ATT_DELAY ) # # c = fdb.connect(dsn = dsn) #", "distinct from 1 and mon$attachment_id != current_connection' ) # i=0", "mon$system_flag is distinct from 1 order by mon$timestamp') # #", "# #print('Final cleanup before quit from Python.') # # for", "of attachments that remains alive: ',i) # # cur_admin.close() #", "e: # pass # #print('Got exception:', sys.exc_info()[0]) # #print(e[0]) #", "on 3.0.6.33272 (SS/CS) - works fine. # 22.04.2020. Checked separately", "#act_1 = python_act('db_1', test_script_1, substitutions=substitutions_1) expected_stdout_1 = \"\"\" Number of", "close attach while deleting record from MON$ATTACHMENTS using ORDER BY", "i > 0: # time.sleep( ATT_DELAY ) # # c", "should NOT be used for reproducing ticket issue: # #con_admin.execute_immediate('delete", "ORDER BY clause doesn't close the corresponding attachments # decription:", "on 3.0.6.33271. # Checked on 3.0.6.33272 (SS/CS) - works fine.", "= dsn) # a = c.attachment_id # con_list[ i ]", "be used for reproducing ticket issue: # #con_admin.execute_immediate('delete from mon$attachments", "!= current_connection' ) # i=0 # for r in cur_admin:", "ACHTUNG ### STILL ALIVE ATTACHMENT DETECTED: ', r[0], r[1].strip(), '###'", "as e: # pass # #print('Got exception:', sys.exc_info()[0]) # #print(e[0])", "= (a, c) # # print('created attachment ', (a,c) )", "from 1 order by mon$timestamp') # # con_admin.commit() # #", "- works fine. # 22.04.2020. Checked separately on 4.0.0.1931 SS/CS:", "# con_list[ i ] = (a, c) # # print('created", ") # # con_admin = con_list[0][1] # # #print(con_admin.firebird_version) #", "con_admin.cursor() # cur_admin.execute('select mon$attachment_id,mon$user from mon$attachments where mon$system_flag is distinct", "#coding:utf-8 # # id: bugs.core_6266 # title: Deleting records from", "# # cur_admin.close() # # #print('Final cleanup before quit from", "if i > 0: # time.sleep( ATT_DELAY ) # #", "current_connection order by mon$timestamp') # # # This DOES NOT", "= user_name # os.environ[\"ISC_PASSWORD\"] = <PASSWORD> # # db_conn.close() #", "1 order by mon$timestamp') # # con_admin.commit() # # cur_admin", "ORDER BY clause. # Confirmed bug on 3.0.6.33271. # Checked", "remains alive: ',i) # # cur_admin.close() # # #print('Final cleanup", "# import os # import sys # import time #", "cur_admin: # print( '### ACHTUNG ### STILL ALIVE ATTACHMENT DETECTED:", "works fine. # 22.04.2020. Checked separately on 4.0.0.1931 SS/CS: all", "pytest from firebird.qa import db_factory, isql_act, Action # version: 3.0", "'mon$attachment_id != current_connection': # con_admin.execute_immediate('delete from mon$attachments where mon$system_flag is", "DETECTED: ', r[0], r[1].strip(), '###' ) # i += 1", "in cur_admin: # print( '### ACHTUNG ### STILL ALIVE ATTACHMENT", "ATT_CNT): # if i > 0: # time.sleep( ATT_DELAY )", "!= current_connection': # con_admin.execute_immediate('delete from mon$attachments where mon$system_flag is distinct", "of timestamp), but # # DELETE statement must NOT contain", "close attachment ', v[0] ) # try: # v[1].close() #", "on 4.0.0.1931 SS/CS: all OK. FB 4.0 can also be", "(a, c) # # print('created attachment ', (a,c) ) #", "# import sys # import time # import fdb #", "v[0] ) # try: # v[1].close() # #print('done.') # except", "is distinct from 1 and mon$attachment_id != current_connection' ) #", "# this removes ALL connections --> should NOT be used", "): # #print('attempt to close attachment ', v[0] ) #", "ticket issue: # #con_admin.execute_immediate('delete from mon$attachments where mon$attachment_id != current_connection", "tracker_id: CORE-6266 # min_versions: ['3.0.0'] # versions: 3.0 # qmid:", "!= current_connection order by mon$timestamp') # # # this removes", "# #print('done.') # except Exception as e: # pass #", "',i) # # cur_admin.close() # # #print('Final cleanup before quit", "mon$attachment_id != current_connection order by mon$timestamp') # # # this", "fdb.connect(dsn = dsn) # a = c.attachment_id # con_list[ i", "# # db_conn.close() # # con_list={} # for i in", "remains alive: 0 \"\"\" @pytest.mark.version('>=3.0') @pytest.mark.xfail def test_1(db_1): pytest.fail(\"Test not", "# os.environ[\"ISC_PASSWORD\"] = <PASSWORD> # # db_conn.close() # # con_list={}", "Deleting records from MON$ATTACHMENTS using ORDER BY clause doesn't close", "(a,c) ) # # con_admin = con_list[0][1] # # #print(con_admin.firebird_version)", "con_admin = con_list[0][1] # # #print(con_admin.firebird_version) # # # this", "statement must NOT contain phrase 'mon$attachment_id != current_connection': # con_admin.execute_immediate('delete", "mon$timestamp') # # # This DOES NOT remove all attachments", "# DELETE statement must NOT contain phrase 'mon$attachment_id != current_connection':", "build. # # tracker_id: CORE-6266 # min_versions: ['3.0.0'] # versions:", "issue: # #con_admin.execute_immediate('delete from mon$attachments where mon$attachment_id != current_connection order", "substitutions_1 = [] init_script_1 = \"\"\"\"\"\" db_1 = db_factory(sql_dialect=3, init=init_script_1)", "os.environ[\"ISC_USER\"] = user_name # os.environ[\"ISC_PASSWORD\"] = <PASSWORD> # # db_conn.close()", "# ATT_CNT=5 # ATT_DELAY=1 # # os.environ[\"ISC_USER\"] = user_name #", "fine. # 22.04.2020. Checked separately on 4.0.0.1931 SS/CS: all OK.", "from 1 and mon$attachment_id != current_connection order by mon$timestamp') #", "where mon$attachment_id != current_connection order by mon$timestamp') # # #", "#print(e[0]) # # #--- #act_1 = python_act('db_1', test_script_1, substitutions=substitutions_1) expected_stdout_1", "os.environ[\"ISC_PASSWORD\"] = <PASSWORD> # # db_conn.close() # # con_list={} #", "# # print('created attachment ', (a,c) ) # # con_admin", "# decription: # Old title: Don't close attach while deleting", "['3.0.0'] # versions: 3.0 # qmid: None import pytest from", "r in cur_admin: # print( '### ACHTUNG ### STILL ALIVE", "# for r in cur_admin: # print( '### ACHTUNG ###", "\"\"\" Number of attachments that remains alive: 0 \"\"\" @pytest.mark.version('>=3.0')", "try: # v[1].close() # #print('done.') # except Exception as e:", "# # # this removes ALL connections --> should NOT", "of attachments that remains alive: 0 \"\"\" @pytest.mark.version('>=3.0') @pytest.mark.xfail def", "BY clause. # Confirmed bug on 3.0.6.33271. # Checked on", "# con_list={} # for i in range(0, ATT_CNT): # if", "# This DOES NOT remove all attachments (only 'last' in", "# import time # import fdb # # ATT_CNT=5 #", "'###' ) # i += 1 # print('Number of attachments", "MON$ATTACHMENTS using ORDER BY clause. # Confirmed bug on 3.0.6.33271.", "# test_script_1 #--- # import os # import sys #", "db_factory(sql_dialect=3, init=init_script_1) # test_script_1 #--- # import os # import", "for reproducing ticket issue: # #con_admin.execute_immediate('delete from mon$attachments where mon$attachment_id", "time.sleep( ATT_DELAY ) # # c = fdb.connect(dsn = dsn)", "title: Deleting records from MON$ATTACHMENTS using ORDER BY clause doesn't", "attachments that remains alive: ',i) # # cur_admin.close() # #", "dsn) # a = c.attachment_id # con_list[ i ] =", "# #print('Got exception:', sys.exc_info()[0]) # #print(e[0]) # # #--- #act_1", "<filename>tests/bugs/core_6266_test.py #coding:utf-8 # # id: bugs.core_6266 # title: Deleting records", "exception:', sys.exc_info()[0]) # #print(e[0]) # # #--- #act_1 = python_act('db_1',", "mon$attachment_id,mon$user from mon$attachments where mon$system_flag is distinct from 1 and", "= <PASSWORD> # # db_conn.close() # # con_list={} # for", "distinct from 1 order by mon$timestamp') # # con_admin.commit() #", "order by mon$timestamp') # # # this removes ALL connections", "also be tested since this build. # # tracker_id: CORE-6266", "i in range(0, ATT_CNT): # if i > 0: #", "is distinct from 1 order by mon$timestamp') # # con_admin.commit()", "r[1].strip(), '###' ) # i += 1 # print('Number of", "substitutions=substitutions_1) expected_stdout_1 = \"\"\" Number of attachments that remains alive:", "# except Exception as e: # pass # #print('Got exception:',", "\"\"\"\"\"\" db_1 = db_factory(sql_dialect=3, init=init_script_1) # test_script_1 #--- # import", "that remains alive: ',i) # # cur_admin.close() # # #print('Final", "min_versions: ['3.0.0'] # versions: 3.0 # qmid: None import pytest", "tested since this build. # # tracker_id: CORE-6266 # min_versions:", "using ORDER BY clause doesn't close the corresponding attachments #", "# # for k,v in sorted( con_list.items() ): # #print('attempt", "None substitutions_1 = [] init_script_1 = \"\"\"\"\"\" db_1 = db_factory(sql_dialect=3,", ") # try: # v[1].close() # #print('done.') # except Exception", "# versions: 3.0 # qmid: None import pytest from firebird.qa", "from 1 and mon$attachment_id != current_connection' ) # i=0 #", ") # # c = fdb.connect(dsn = dsn) # a", "a = c.attachment_id # con_list[ i ] = (a, c)", "'last' in order of timestamp), but # # DELETE statement", "k,v in sorted( con_list.items() ): # #print('attempt to close attachment", "# # con_admin = con_list[0][1] # # #print(con_admin.firebird_version) # #", "id: bugs.core_6266 # title: Deleting records from MON$ATTACHMENTS using ORDER", "doesn't close the corresponding attachments # decription: # Old title:", "where mon$system_flag is distinct from 1 and mon$attachment_id != current_connection'", "for reproducing ticket issue: # #con_admin.execute_immediate('delete from mon$attachments where mon$system_flag", "= python_act('db_1', test_script_1, substitutions=substitutions_1) expected_stdout_1 = \"\"\" Number of attachments", "attachment ', (a,c) ) # # con_admin = con_list[0][1] #", "since this build. # # tracker_id: CORE-6266 # min_versions: ['3.0.0']", "# cur_admin.close() # # #print('Final cleanup before quit from Python.')", "MON$ATTACHMENTS using ORDER BY clause doesn't close the corresponding attachments", "ATT_CNT=5 # ATT_DELAY=1 # # os.environ[\"ISC_USER\"] = user_name # os.environ[\"ISC_PASSWORD\"]", "current_connection': # con_admin.execute_immediate('delete from mon$attachments where mon$system_flag is distinct from", "record from MON$ATTACHMENTS using ORDER BY clause. # Confirmed bug", "in range(0, ATT_CNT): # if i > 0: # time.sleep(", "# con_admin.commit() # # cur_admin = con_admin.cursor() # cur_admin.execute('select mon$attachment_id,mon$user", "all attachments (only 'last' in order of timestamp), but #", "con_list.items() ): # #print('attempt to close attachment ', v[0] )", "remove all attachments (only 'last' in order of timestamp), but", "# # # This DOES NOT remove all attachments (only", "# # tracker_id: CORE-6266 # min_versions: ['3.0.0'] # versions: 3.0", "firebird.qa import db_factory, isql_act, Action # version: 3.0 # resources:", "4.0.0.1931 SS/CS: all OK. FB 4.0 can also be tested", "cur_admin = con_admin.cursor() # cur_admin.execute('select mon$attachment_id,mon$user from mon$attachments where mon$system_flag", "cur_admin.close() # # #print('Final cleanup before quit from Python.') #", "but # # DELETE statement must NOT contain phrase 'mon$attachment_id", "attachment ', v[0] ) # try: # v[1].close() # #print('done.')", "clause. # Confirmed bug on 3.0.6.33271. # Checked on 3.0.6.33272", "from firebird.qa import db_factory, isql_act, Action # version: 3.0 #", "DELETE statement must NOT contain phrase 'mon$attachment_id != current_connection': #", "bugs.core_6266 # title: Deleting records from MON$ATTACHMENTS using ORDER BY", "# # con_list={} # for i in range(0, ATT_CNT): #", "# # ATT_CNT=5 # ATT_DELAY=1 # # os.environ[\"ISC_USER\"] = user_name", "cleanup before quit from Python.') # # for k,v in", "ticket issue: # #con_admin.execute_immediate('delete from mon$attachments where mon$system_flag is distinct", "# #print(e[0]) # # #--- #act_1 = python_act('db_1', test_script_1, substitutions=substitutions_1)", "> 0: # time.sleep( ATT_DELAY ) # # c =", "except Exception as e: # pass # #print('Got exception:', sys.exc_info()[0])", "con_list[ i ] = (a, c) # # print('created attachment", "from mon$attachments where mon$system_flag is distinct from 1 order by", "contain phrase 'mon$attachment_id != current_connection': # con_admin.execute_immediate('delete from mon$attachments where", "decription: # Old title: Don't close attach while deleting record", "Python.') # # for k,v in sorted( con_list.items() ): #", "where mon$system_flag is distinct from 1 and mon$attachment_id != current_connection", "attachments that remains alive: 0 \"\"\" @pytest.mark.version('>=3.0') @pytest.mark.xfail def test_1(db_1):", "ATT_DELAY=1 # # os.environ[\"ISC_USER\"] = user_name # os.environ[\"ISC_PASSWORD\"] = <PASSWORD>", "#con_admin.execute_immediate('delete from mon$attachments where mon$attachment_id != current_connection order by mon$timestamp')", "from Python.') # # for k,v in sorted( con_list.items() ):", "# # DELETE statement must NOT contain phrase 'mon$attachment_id !=", "# tracker_id: CORE-6266 # min_versions: ['3.0.0'] # versions: 3.0 #", "Number of attachments that remains alive: 0 \"\"\" @pytest.mark.version('>=3.0') @pytest.mark.xfail", "mon$attachment_id != current_connection order by mon$timestamp') # # # This", "# c = fdb.connect(dsn = dsn) # a = c.attachment_id", "1 # print('Number of attachments that remains alive: ',i) #", "+= 1 # print('Number of attachments that remains alive: ',i)", "range(0, ATT_CNT): # if i > 0: # time.sleep( ATT_DELAY", "Action # version: 3.0 # resources: None substitutions_1 = []", "qmid: None import pytest from firebird.qa import db_factory, isql_act, Action", "timestamp), but # # DELETE statement must NOT contain phrase", "# # #print(con_admin.firebird_version) # # # this removes ALL connections", "records from MON$ATTACHMENTS using ORDER BY clause doesn't close the", "ALIVE ATTACHMENT DETECTED: ', r[0], r[1].strip(), '###' ) # i", "(SS/CS) - works fine. # 22.04.2020. Checked separately on 4.0.0.1931", "sys # import time # import fdb # # ATT_CNT=5", "CORE-6266 # min_versions: ['3.0.0'] # versions: 3.0 # qmid: None", "mon$timestamp') # # con_admin.commit() # # cur_admin = con_admin.cursor() #", "used for reproducing ticket issue: # #con_admin.execute_immediate('delete from mon$attachments where", "versions: 3.0 # qmid: None import pytest from firebird.qa import", "= c.attachment_id # con_list[ i ] = (a, c) #", "# print( '### ACHTUNG ### STILL ALIVE ATTACHMENT DETECTED: ',", "and mon$attachment_id != current_connection order by mon$timestamp') # # #", "import sys # import time # import fdb # #", "# os.environ[\"ISC_USER\"] = user_name # os.environ[\"ISC_PASSWORD\"] = <PASSWORD> # #", "from mon$attachments where mon$attachment_id != current_connection order by mon$timestamp') #", "fdb # # ATT_CNT=5 # ATT_DELAY=1 # # os.environ[\"ISC_USER\"] =", "in order of timestamp), but # # DELETE statement must", "version: 3.0 # resources: None substitutions_1 = [] init_script_1 =", "# #con_admin.execute_immediate('delete from mon$attachments where mon$system_flag is distinct from 1", "# Old title: Don't close attach while deleting record from", "# # os.environ[\"ISC_USER\"] = user_name # os.environ[\"ISC_PASSWORD\"] = <PASSWORD> #", "#--- #act_1 = python_act('db_1', test_script_1, substitutions=substitutions_1) expected_stdout_1 = \"\"\" Number", "# ATT_DELAY=1 # # os.environ[\"ISC_USER\"] = user_name # os.environ[\"ISC_PASSWORD\"] =", "### STILL ALIVE ATTACHMENT DETECTED: ', r[0], r[1].strip(), '###' )", "db_1 = db_factory(sql_dialect=3, init=init_script_1) # test_script_1 #--- # import os", "current_connection order by mon$timestamp') # # # this removes ALL", "FB 4.0 can also be tested since this build. #", "', (a,c) ) # # con_admin = con_list[0][1] # #", "(only 'last' in order of timestamp), but # # DELETE", "#print('Got exception:', sys.exc_info()[0]) # #print(e[0]) # # #--- #act_1 =", "#--- # import os # import sys # import time", "close the corresponding attachments # decription: # Old title: Don't", "must NOT contain phrase 'mon$attachment_id != current_connection': # con_admin.execute_immediate('delete from", "c) # # print('created attachment ', (a,c) ) # #", "Old title: Don't close attach while deleting record from MON$ATTACHMENTS", "NOT remove all attachments (only 'last' in order of timestamp),", "distinct from 1 and mon$attachment_id != current_connection order by mon$timestamp')", "<PASSWORD> # # db_conn.close() # # con_list={} # for i", "where mon$system_flag is distinct from 1 order by mon$timestamp') #", "i += 1 # print('Number of attachments that remains alive:", "title: Don't close attach while deleting record from MON$ATTACHMENTS using", "connections --> should NOT be used for reproducing ticket issue:", "import time # import fdb # # ATT_CNT=5 # ATT_DELAY=1", "# qmid: None import pytest from firebird.qa import db_factory, isql_act,", "from MON$ATTACHMENTS using ORDER BY clause. # Confirmed bug on", "init_script_1 = \"\"\"\"\"\" db_1 = db_factory(sql_dialect=3, init=init_script_1) # test_script_1 #---", "# #--- #act_1 = python_act('db_1', test_script_1, substitutions=substitutions_1) expected_stdout_1 = \"\"\"", "python_act('db_1', test_script_1, substitutions=substitutions_1) expected_stdout_1 = \"\"\" Number of attachments that", "expected_stdout_1 = \"\"\" Number of attachments that remains alive: 0", "ALL connections --> should NOT be used for reproducing ticket", "', v[0] ) # try: # v[1].close() # #print('done.') #", "this build. # # tracker_id: CORE-6266 # min_versions: ['3.0.0'] #", "# cur_admin = con_admin.cursor() # cur_admin.execute('select mon$attachment_id,mon$user from mon$attachments where", "import os # import sys # import time # import", "mon$attachments where mon$system_flag is distinct from 1 and mon$attachment_id !=", "print( '### ACHTUNG ### STILL ALIVE ATTACHMENT DETECTED: ', r[0],", "con_admin.commit() # # cur_admin = con_admin.cursor() # cur_admin.execute('select mon$attachment_id,mon$user from", "# #print('attempt to close attachment ', v[0] ) # try:", "# title: Deleting records from MON$ATTACHMENTS using ORDER BY clause", "# i=0 # for r in cur_admin: # print( '###", "This DOES NOT remove all attachments (only 'last' in order", "bug on 3.0.6.33271. # Checked on 3.0.6.33272 (SS/CS) - works", "import db_factory, isql_act, Action # version: 3.0 # resources: None", "4.0 can also be tested since this build. # #", "'### ACHTUNG ### STILL ALIVE ATTACHMENT DETECTED: ', r[0], r[1].strip(),", "# 22.04.2020. Checked separately on 4.0.0.1931 SS/CS: all OK. FB", "--> should NOT be used for reproducing ticket issue: #", "from mon$attachments where mon$system_flag is distinct from 1 and mon$attachment_id", "= \"\"\"\"\"\" db_1 = db_factory(sql_dialect=3, init=init_script_1) # test_script_1 #--- #", "1 and mon$attachment_id != current_connection' ) # i=0 # for", "separately on 4.0.0.1931 SS/CS: all OK. FB 4.0 can also", "DOES NOT remove all attachments (only 'last' in order of", "# # id: bugs.core_6266 # title: Deleting records from MON$ATTACHMENTS", "import fdb # # ATT_CNT=5 # ATT_DELAY=1 # # os.environ[\"ISC_USER\"]", "SS/CS: all OK. FB 4.0 can also be tested since", "] = (a, c) # # print('created attachment ', (a,c)", "#con_admin.execute_immediate('delete from mon$attachments where mon$system_flag is distinct from 1 and", "cur_admin.execute('select mon$attachment_id,mon$user from mon$attachments where mon$system_flag is distinct from 1", "isql_act, Action # version: 3.0 # resources: None substitutions_1 =", "Checked separately on 4.0.0.1931 SS/CS: all OK. FB 4.0 can", "BY clause doesn't close the corresponding attachments # decription: #", "by mon$timestamp') # # con_admin.commit() # # cur_admin = con_admin.cursor()", "for r in cur_admin: # print( '### ACHTUNG ### STILL", "# con_admin.execute_immediate('delete from mon$attachments where mon$system_flag is distinct from 1", "# cur_admin.execute('select mon$attachment_id,mon$user from mon$attachments where mon$system_flag is distinct from", "order of timestamp), but # # DELETE statement must NOT", "22.04.2020. Checked separately on 4.0.0.1931 SS/CS: all OK. FB 4.0", "r[0], r[1].strip(), '###' ) # i += 1 # print('Number", "order by mon$timestamp') # # con_admin.commit() # # cur_admin =", "resources: None substitutions_1 = [] init_script_1 = \"\"\"\"\"\" db_1 =", "# try: # v[1].close() # #print('done.') # except Exception as", "mon$attachments where mon$attachment_id != current_connection order by mon$timestamp') # #", "sorted( con_list.items() ): # #print('attempt to close attachment ', v[0]", "be tested since this build. # # tracker_id: CORE-6266 #", "# v[1].close() # #print('done.') # except Exception as e: #", "None import pytest from firebird.qa import db_factory, isql_act, Action #", "# # con_admin.commit() # # cur_admin = con_admin.cursor() # cur_admin.execute('select", "ATTACHMENT DETECTED: ', r[0], r[1].strip(), '###' ) # i +=", "mon$system_flag is distinct from 1 and mon$attachment_id != current_connection' )", "3.0 # resources: None substitutions_1 = [] init_script_1 = \"\"\"\"\"\"", "# time.sleep( ATT_DELAY ) # # c = fdb.connect(dsn =", "and mon$attachment_id != current_connection' ) # i=0 # for r", "import pytest from firebird.qa import db_factory, isql_act, Action # version:", "# Checked on 3.0.6.33272 (SS/CS) - works fine. # 22.04.2020.", "# min_versions: ['3.0.0'] # versions: 3.0 # qmid: None import", "print('Number of attachments that remains alive: ',i) # # cur_admin.close()", "corresponding attachments # decription: # Old title: Don't close attach", "alive: 0 \"\"\" @pytest.mark.version('>=3.0') @pytest.mark.xfail def test_1(db_1): pytest.fail(\"Test not IMPLEMENTED\")", "db_conn.close() # # con_list={} # for i in range(0, ATT_CNT):", "c.attachment_id # con_list[ i ] = (a, c) # #", "#print('attempt to close attachment ', v[0] ) # try: #", "[] init_script_1 = \"\"\"\"\"\" db_1 = db_factory(sql_dialect=3, init=init_script_1) # test_script_1", "removes ALL connections --> should NOT be used for reproducing", "# for i in range(0, ATT_CNT): # if i >", "# # cur_admin = con_admin.cursor() # cur_admin.execute('select mon$attachment_id,mon$user from mon$attachments", "alive: ',i) # # cur_admin.close() # # #print('Final cleanup before", "#print('Final cleanup before quit from Python.') # # for k,v", "= db_factory(sql_dialect=3, init=init_script_1) # test_script_1 #--- # import os #", "user_name # os.environ[\"ISC_PASSWORD\"] = <PASSWORD> # # db_conn.close() # #", "reproducing ticket issue: # #con_admin.execute_immediate('delete from mon$attachments where mon$attachment_id !=", ") # i=0 # for r in cur_admin: # print(", "# version: 3.0 # resources: None substitutions_1 = [] init_script_1", "can also be tested since this build. # # tracker_id:", "3.0.6.33272 (SS/CS) - works fine. # 22.04.2020. Checked separately on", "OK. FB 4.0 can also be tested since this build.", "# if i > 0: # time.sleep( ATT_DELAY ) #", "# Confirmed bug on 3.0.6.33271. # Checked on 3.0.6.33272 (SS/CS)", "time # import fdb # # ATT_CNT=5 # ATT_DELAY=1 #", "v[1].close() # #print('done.') # except Exception as e: # pass", "# for k,v in sorted( con_list.items() ): # #print('attempt to", "mon$system_flag is distinct from 1 and mon$attachment_id != current_connection order", "deleting record from MON$ATTACHMENTS using ORDER BY clause. # Confirmed", "#print(con_admin.firebird_version) # # # this removes ALL connections --> should", "mon$attachments where mon$system_flag is distinct from 1 order by mon$timestamp')", "= con_admin.cursor() # cur_admin.execute('select mon$attachment_id,mon$user from mon$attachments where mon$system_flag is", "for i in range(0, ATT_CNT): # if i > 0:", "con_admin.execute_immediate('delete from mon$attachments where mon$system_flag is distinct from 1 order", "# i += 1 # print('Number of attachments that remains", "is distinct from 1 and mon$attachment_id != current_connection order by", "attach while deleting record from MON$ATTACHMENTS using ORDER BY clause.", "pass # #print('Got exception:', sys.exc_info()[0]) # #print(e[0]) # # #---", "# import fdb # # ATT_CNT=5 # ATT_DELAY=1 # #", "test_script_1, substitutions=substitutions_1) expected_stdout_1 = \"\"\" Number of attachments that remains", "= fdb.connect(dsn = dsn) # a = c.attachment_id # con_list[", "# pass # #print('Got exception:', sys.exc_info()[0]) # #print(e[0]) # #", "by mon$timestamp') # # # this removes ALL connections -->", "os # import sys # import time # import fdb", "', r[0], r[1].strip(), '###' ) # i += 1 #", "# # #--- #act_1 = python_act('db_1', test_script_1, substitutions=substitutions_1) expected_stdout_1 =", "# db_conn.close() # # con_list={} # for i in range(0,", "using ORDER BY clause. # Confirmed bug on 3.0.6.33271. #", "issue: # #con_admin.execute_immediate('delete from mon$attachments where mon$system_flag is distinct from", "= con_list[0][1] # # #print(con_admin.firebird_version) # # # this removes", "# resources: None substitutions_1 = [] init_script_1 = \"\"\"\"\"\" db_1", "con_list[0][1] # # #print(con_admin.firebird_version) # # # this removes ALL", "current_connection' ) # i=0 # for r in cur_admin: #", "i=0 # for r in cur_admin: # print( '### ACHTUNG", "reproducing ticket issue: # #con_admin.execute_immediate('delete from mon$attachments where mon$system_flag is", "= [] init_script_1 = \"\"\"\"\"\" db_1 = db_factory(sql_dialect=3, init=init_script_1) #", "init=init_script_1) # test_script_1 #--- # import os # import sys", "# # this removes ALL connections --> should NOT be", "#print('done.') # except Exception as e: # pass # #print('Got", "Checked on 3.0.6.33272 (SS/CS) - works fine. # 22.04.2020. Checked", "Confirmed bug on 3.0.6.33271. # Checked on 3.0.6.33272 (SS/CS) -", "while deleting record from MON$ATTACHMENTS using ORDER BY clause. #", "con_list={} # for i in range(0, ATT_CNT): # if i", "this removes ALL connections --> should NOT be used for", "order by mon$timestamp') # # # This DOES NOT remove", "quit from Python.') # # for k,v in sorted( con_list.items()", "# con_admin = con_list[0][1] # # #print(con_admin.firebird_version) # # #", "print('created attachment ', (a,c) ) # # con_admin = con_list[0][1]", "# print('Number of attachments that remains alive: ',i) # #", "c = fdb.connect(dsn = dsn) # a = c.attachment_id #", "db_factory, isql_act, Action # version: 3.0 # resources: None substitutions_1", "phrase 'mon$attachment_id != current_connection': # con_admin.execute_immediate('delete from mon$attachments where mon$system_flag", "the corresponding attachments # decription: # Old title: Don't close", "STILL ALIVE ATTACHMENT DETECTED: ', r[0], r[1].strip(), '###' ) #", "to close attachment ', v[0] ) # try: # v[1].close()", "mon$timestamp') # # # this removes ALL connections --> should", "by mon$timestamp') # # # This DOES NOT remove all", "clause doesn't close the corresponding attachments # decription: # Old", "# id: bugs.core_6266 # title: Deleting records from MON$ATTACHMENTS using", "3.0 # qmid: None import pytest from firebird.qa import db_factory,", "mon$attachment_id != current_connection' ) # i=0 # for r in", "# # #print('Final cleanup before quit from Python.') # #", "attachments # decription: # Old title: Don't close attach while", "test_script_1 #--- # import os # import sys # import", "that remains alive: 0 \"\"\" @pytest.mark.version('>=3.0') @pytest.mark.xfail def test_1(db_1): pytest.fail(\"Test", "# print('created attachment ', (a,c) ) # # con_admin =", "in sorted( con_list.items() ): # #print('attempt to close attachment ',", "sys.exc_info()[0]) # #print(e[0]) # # #--- #act_1 = python_act('db_1', test_script_1,", "NOT contain phrase 'mon$attachment_id != current_connection': # con_admin.execute_immediate('delete from mon$attachments", "# a = c.attachment_id # con_list[ i ] = (a,", "for k,v in sorted( con_list.items() ): # #print('attempt to close", "= \"\"\" Number of attachments that remains alive: 0 \"\"\"", "all OK. FB 4.0 can also be tested since this", "i ] = (a, c) # # print('created attachment ',", "attachments (only 'last' in order of timestamp), but # #", "# #con_admin.execute_immediate('delete from mon$attachments where mon$attachment_id != current_connection order by", "3.0.6.33271. # Checked on 3.0.6.33272 (SS/CS) - works fine. #", "1 and mon$attachment_id != current_connection order by mon$timestamp') # #", "!= current_connection order by mon$timestamp') # # # This DOES", "before quit from Python.') # # for k,v in sorted(", "NOT be used for reproducing ticket issue: # #con_admin.execute_immediate('delete from" ]
[ "\"email\": cells[5].get_text(), } if senateperson[\"email\"]: self.senate.append(senateperson) def run(self): try: self.get_senate(self.search_url)", "level=logging.INFO) def get_html(url): return requests.get(url).text class SenateCrawler: def __init__(self): self.base_url", "%(levelname)s:%(message)s\", level=logging.INFO) def get_html(url): return requests.get(url).text class SenateCrawler: def __init__(self):", "if senateperson[\"email\"]: self.senate.append(senateperson) def run(self): try: self.get_senate(self.search_url) except Exception: logging.exception(\"global", "SenateCrawler: def __init__(self): self.base_url = \"https://www25.senado.leg.br/\" self.search_url = self.base_url +", "urllib.parse import urljoin logging.basicConfig(format=\"%(asctime)s %(levelname)s:%(message)s\", level=logging.INFO) def get_html(url): return requests.get(url).text", "cells[1].get_text(), \"email\": cells[5].get_text(), } if senateperson[\"email\"]: self.senate.append(senateperson) def run(self): try:", "except Exception: logging.exception(\"global failure\") finally: df = pd.DataFrame(self.senate) df.to_csv(\"senate.csv\") logging.info(\"program", "def __init__(self): self.base_url = \"https://www25.senado.leg.br/\" self.search_url = self.base_url + \"web/senadores/em-exercicio/-/e/por-nome\"", "= self.base_url + \"web/senadores/em-exercicio/-/e/por-nome\" self.senate = [] def get_senate(self, url):", "BeautifulSoup import logging import pandas as pd import csv import", "import urljoin logging.basicConfig(format=\"%(asctime)s %(levelname)s:%(message)s\", level=logging.INFO) def get_html(url): return requests.get(url).text class", "self.senate = [] def get_senate(self, url): soup = BeautifulSoup(get_html(self.search_url), \"html.parser\")", "= { \"name\": cells[0].get_text(), \"party\": cells[1].get_text(), \"email\": cells[5].get_text(), } if", "import logging import pandas as pd import csv import re", "import csv import re import requests from urllib.parse import urljoin", "get_senate(self, url): soup = BeautifulSoup(get_html(self.search_url), \"html.parser\") trs = soup.find(\"table\").find(\"tbody\").find_all(\"tr\") for", "cells[0].get_text(), \"party\": cells[1].get_text(), \"email\": cells[5].get_text(), } if senateperson[\"email\"]: self.senate.append(senateperson) def", "logging.basicConfig(format=\"%(asctime)s %(levelname)s:%(message)s\", level=logging.INFO) def get_html(url): return requests.get(url).text class SenateCrawler: def", "self.search_url = self.base_url + \"web/senadores/em-exercicio/-/e/por-nome\" self.senate = [] def get_senate(self,", "= \"https://www25.senado.leg.br/\" self.search_url = self.base_url + \"web/senadores/em-exercicio/-/e/por-nome\" self.senate = []", "= tr.find_all(\"td\") senateperson = { \"name\": cells[0].get_text(), \"party\": cells[1].get_text(), \"email\":", "bs4 import BeautifulSoup import logging import pandas as pd import", "as pd import csv import re import requests from urllib.parse", "BeautifulSoup(get_html(self.search_url), \"html.parser\") trs = soup.find(\"table\").find(\"tbody\").find_all(\"tr\") for tr in trs: cells", "import re import requests from urllib.parse import urljoin logging.basicConfig(format=\"%(asctime)s %(levelname)s:%(message)s\",", "url): soup = BeautifulSoup(get_html(self.search_url), \"html.parser\") trs = soup.find(\"table\").find(\"tbody\").find_all(\"tr\") for tr", "import requests from urllib.parse import urljoin logging.basicConfig(format=\"%(asctime)s %(levelname)s:%(message)s\", level=logging.INFO) def", "\"html.parser\") trs = soup.find(\"table\").find(\"tbody\").find_all(\"tr\") for tr in trs: cells =", "import pandas as pd import csv import re import requests", "tr.find_all(\"td\") senateperson = { \"name\": cells[0].get_text(), \"party\": cells[1].get_text(), \"email\": cells[5].get_text(),", "logging import pandas as pd import csv import re import", "run(self): try: self.get_senate(self.search_url) except Exception: logging.exception(\"global failure\") finally: df =", "from urllib.parse import urljoin logging.basicConfig(format=\"%(asctime)s %(levelname)s:%(message)s\", level=logging.INFO) def get_html(url): return", "return requests.get(url).text class SenateCrawler: def __init__(self): self.base_url = \"https://www25.senado.leg.br/\" self.search_url", "Exception: logging.exception(\"global failure\") finally: df = pd.DataFrame(self.senate) df.to_csv(\"senate.csv\") logging.info(\"program exited\")", "def get_html(url): return requests.get(url).text class SenateCrawler: def __init__(self): self.base_url =", "urljoin logging.basicConfig(format=\"%(asctime)s %(levelname)s:%(message)s\", level=logging.INFO) def get_html(url): return requests.get(url).text class SenateCrawler:", "soup.find(\"table\").find(\"tbody\").find_all(\"tr\") for tr in trs: cells = tr.find_all(\"td\") senateperson =", "self.base_url + \"web/senadores/em-exercicio/-/e/por-nome\" self.senate = [] def get_senate(self, url): soup", "for tr in trs: cells = tr.find_all(\"td\") senateperson = {", "pandas as pd import csv import re import requests from", "self.get_senate(self.search_url) except Exception: logging.exception(\"global failure\") finally: df = pd.DataFrame(self.senate) df.to_csv(\"senate.csv\")", "senateperson = { \"name\": cells[0].get_text(), \"party\": cells[1].get_text(), \"email\": cells[5].get_text(), }", "trs = soup.find(\"table\").find(\"tbody\").find_all(\"tr\") for tr in trs: cells = tr.find_all(\"td\")", "= [] def get_senate(self, url): soup = BeautifulSoup(get_html(self.search_url), \"html.parser\") trs", "soup = BeautifulSoup(get_html(self.search_url), \"html.parser\") trs = soup.find(\"table\").find(\"tbody\").find_all(\"tr\") for tr in", "\"name\": cells[0].get_text(), \"party\": cells[1].get_text(), \"email\": cells[5].get_text(), } if senateperson[\"email\"]: self.senate.append(senateperson)", "__init__(self): self.base_url = \"https://www25.senado.leg.br/\" self.search_url = self.base_url + \"web/senadores/em-exercicio/-/e/por-nome\" self.senate", "requests.get(url).text class SenateCrawler: def __init__(self): self.base_url = \"https://www25.senado.leg.br/\" self.search_url =", "class SenateCrawler: def __init__(self): self.base_url = \"https://www25.senado.leg.br/\" self.search_url = self.base_url", "[] def get_senate(self, url): soup = BeautifulSoup(get_html(self.search_url), \"html.parser\") trs =", "trs: cells = tr.find_all(\"td\") senateperson = { \"name\": cells[0].get_text(), \"party\":", "pd import csv import re import requests from urllib.parse import", "def get_senate(self, url): soup = BeautifulSoup(get_html(self.search_url), \"html.parser\") trs = soup.find(\"table\").find(\"tbody\").find_all(\"tr\")", "import BeautifulSoup import logging import pandas as pd import csv", "= BeautifulSoup(get_html(self.search_url), \"html.parser\") trs = soup.find(\"table\").find(\"tbody\").find_all(\"tr\") for tr in trs:", "re import requests from urllib.parse import urljoin logging.basicConfig(format=\"%(asctime)s %(levelname)s:%(message)s\", level=logging.INFO)", "from bs4 import BeautifulSoup import logging import pandas as pd", "try: self.get_senate(self.search_url) except Exception: logging.exception(\"global failure\") finally: df = pd.DataFrame(self.senate)", "senateperson[\"email\"]: self.senate.append(senateperson) def run(self): try: self.get_senate(self.search_url) except Exception: logging.exception(\"global failure\")", "tr in trs: cells = tr.find_all(\"td\") senateperson = { \"name\":", "\"https://www25.senado.leg.br/\" self.search_url = self.base_url + \"web/senadores/em-exercicio/-/e/por-nome\" self.senate = [] def", "in trs: cells = tr.find_all(\"td\") senateperson = { \"name\": cells[0].get_text(),", "self.base_url = \"https://www25.senado.leg.br/\" self.search_url = self.base_url + \"web/senadores/em-exercicio/-/e/por-nome\" self.senate =", "+ \"web/senadores/em-exercicio/-/e/por-nome\" self.senate = [] def get_senate(self, url): soup =", "{ \"name\": cells[0].get_text(), \"party\": cells[1].get_text(), \"email\": cells[5].get_text(), } if senateperson[\"email\"]:", "self.senate.append(senateperson) def run(self): try: self.get_senate(self.search_url) except Exception: logging.exception(\"global failure\") finally:", "csv import re import requests from urllib.parse import urljoin logging.basicConfig(format=\"%(asctime)s", "get_html(url): return requests.get(url).text class SenateCrawler: def __init__(self): self.base_url = \"https://www25.senado.leg.br/\"", "= soup.find(\"table\").find(\"tbody\").find_all(\"tr\") for tr in trs: cells = tr.find_all(\"td\") senateperson", "\"party\": cells[1].get_text(), \"email\": cells[5].get_text(), } if senateperson[\"email\"]: self.senate.append(senateperson) def run(self):", "\"web/senadores/em-exercicio/-/e/por-nome\" self.senate = [] def get_senate(self, url): soup = BeautifulSoup(get_html(self.search_url),", "def run(self): try: self.get_senate(self.search_url) except Exception: logging.exception(\"global failure\") finally: df", "cells = tr.find_all(\"td\") senateperson = { \"name\": cells[0].get_text(), \"party\": cells[1].get_text(),", "} if senateperson[\"email\"]: self.senate.append(senateperson) def run(self): try: self.get_senate(self.search_url) except Exception:", "requests from urllib.parse import urljoin logging.basicConfig(format=\"%(asctime)s %(levelname)s:%(message)s\", level=logging.INFO) def get_html(url):", "cells[5].get_text(), } if senateperson[\"email\"]: self.senate.append(senateperson) def run(self): try: self.get_senate(self.search_url) except" ]
[ "= \"autocomplete_reference_number\" factory_class = ReferenceNumberFactory def validate_item(self, item): self.assertEqual(item[\"id\"], self.obj.id)", "basename = \"autocomplete_document_type\" factory_class = DocumentTypeFactory def validate_item(self, item): self.assertEqual(item[\"id\"],", "self.assertEqual(item[\"name\"], self.obj.name) class FeatureAutocompleteViewSetTestCase( ReadOnlyViewSetMixin, SearchQueryMixin, TestCase ): basename =", "\"autocomplete_institution\" factory_class = InstitutionFactory def validate_item(self, item): self.assertEqual(item[\"id\"], self.obj.id) self.assertEqual(item[\"name\"],", "factory_class = AdministrativeUnitFactory def validate_item(self, item): self.assertEqual(item[\"id\"], self.obj.id) self.assertEqual(item[\"name\"], self.obj.name)", "item): self.assertEqual(item[\"id\"], self.obj.id) self.assertEqual(item[\"name\"], self.obj.name) class EventAutocompleteViewSetTestCase( ReadOnlyViewSetMixin, SearchQueryMixin, TestCase", "...institutions.factories import InstitutionFactory from ...letters.factories import DocumentTypeFactory, ReferenceNumberFactory from ...search.tests.mixins", "): basename = \"autocomplete_feature\" factory_class = FeatureFactory def validate_item(self, item):", "from test_plus.test import TestCase from ...administrative_units.factories import AdministrativeUnitFactory from ...cases.factories", "...letters.factories import DocumentTypeFactory, ReferenceNumberFactory from ...search.tests.mixins import SearchQueryMixin from ...tags.factories", "= CaseFactory def validate_item(self, item): self.assertEqual(item[\"id\"], self.obj.id) self.assertEqual(item[\"name\"], self.obj.name) class", "self.obj.id) self.assertEqual(item[\"name\"], self.obj.name) class InstitutionAutocompleteViewSetTestCase( ReadOnlyViewSetMixin, SearchQueryMixin, TestCase ): basename", "= \"autocomplete_institution\" factory_class = InstitutionFactory def validate_item(self, item): self.assertEqual(item[\"id\"], self.obj.id)", "TestCase): basename = \"autocomplete_user\" factory_class = UserFactory initial_count = 1", "InstitutionFactory from ...letters.factories import DocumentTypeFactory, ReferenceNumberFactory from ...search.tests.mixins import SearchQueryMixin", "FeatureOptionFactory from ...generic.tests.test_views import ReadOnlyViewSetMixin from ...institutions.factories import InstitutionFactory from", "self.obj.id) self.assertEqual(item[\"name\"], self.obj.name) class EventAutocompleteViewSetTestCase( ReadOnlyViewSetMixin, SearchQueryMixin, TestCase ): basename", "from ...generic.tests.test_views import ReadOnlyViewSetMixin from ...institutions.factories import InstitutionFactory from ...letters.factories", "TestCase ): basename = \"autocomplete_institution\" factory_class = InstitutionFactory def validate_item(self,", "import ChannelFactory from ...events.factories import EventFactory from ...features.factories import FeatureFactory,", "ReadOnlyViewSetMixin, SearchQueryMixin, TestCase ): basename = \"autocomplete_institution\" factory_class = InstitutionFactory", "basename = \"autocomplete_event\" factory_class = EventFactory def validate_item(self, item): self.assertEqual(item[\"id\"],", "import TagFactory from ...users.factories import UserFactory class AdministrativeUnitAutocompleteViewSetTestCase( ReadOnlyViewSetMixin, SearchQueryMixin,", "class ChannelAutocompleteViewSetTestCase( ReadOnlyViewSetMixin, SearchQueryMixin, TestCase ): basename = \"autocomplete_channel\" factory_class", "= ReferenceNumberFactory def validate_item(self, item): self.assertEqual(item[\"id\"], self.obj.id) self.assertEqual(item[\"name\"], self.obj.name) class", "SearchQueryMixin from ...tags.factories import TagFactory from ...users.factories import UserFactory class", "ReferenceNumberAutocompleteViewSetTestCase( ReadOnlyViewSetMixin, SearchQueryMixin, TestCase ): basename = \"autocomplete_reference_number\" factory_class =", "self.assertEqual(item[\"id\"], self.obj.id) self.assertEqual(item[\"name\"], self.obj.name) class TagAutocompleteViewSetTestCase(ReadOnlyViewSetMixin, SearchQueryMixin, TestCase): basename =", "= \"autocomplete_user\" factory_class = UserFactory initial_count = 1 def validate_item(self,", "def validate_item(self, item): self.assertEqual(item[\"id\"], self.obj.id) self.assertEqual(item[\"name\"], self.obj.name) class DocumentTypeAutocompleteViewSetTestCase( ReadOnlyViewSetMixin,", "initial_count = 1 def validate_item(self, item): self.assertEqual(item[\"id\"], self.obj.id) self.assertEqual(item[\"username\"], self.obj.username)", "ReferenceNumberFactory def validate_item(self, item): self.assertEqual(item[\"id\"], self.obj.id) self.assertEqual(item[\"name\"], self.obj.name) class EventAutocompleteViewSetTestCase(", "from ...features.factories import FeatureFactory, FeatureOptionFactory from ...generic.tests.test_views import ReadOnlyViewSetMixin from", "factory_class = TagFactory def validate_item(self, item): self.assertEqual(item[\"id\"], self.obj.id) self.assertEqual(item[\"name\"], self.obj.name)", "TestCase from ...administrative_units.factories import AdministrativeUnitFactory from ...cases.factories import CaseFactory from", "basename = \"autocomplete_case\" factory_class = CaseFactory def validate_item(self, item): self.assertEqual(item[\"id\"],", "self.assertEqual(item[\"id\"], self.obj.id) self.assertEqual(item[\"name\"], self.obj.name) class UserAutocompleteViewSetTestCase(ReadOnlyViewSetMixin, SearchQueryMixin, TestCase): basename =", "EventAutocompleteViewSetTestCase( ReadOnlyViewSetMixin, SearchQueryMixin, TestCase ): basename = \"autocomplete_event\" factory_class =", "self.assertEqual(item[\"name\"], self.obj.name) class ReferenceNumberAutocompleteViewSetTestCase( ReadOnlyViewSetMixin, SearchQueryMixin, TestCase ): basename =", "item): self.assertEqual(item[\"id\"], self.obj.id) self.assertEqual(item[\"name\"], self.obj.name) class ChannelAutocompleteViewSetTestCase( ReadOnlyViewSetMixin, SearchQueryMixin, TestCase", "import FeatureFactory, FeatureOptionFactory from ...generic.tests.test_views import ReadOnlyViewSetMixin from ...institutions.factories import", "class TagAutocompleteViewSetTestCase(ReadOnlyViewSetMixin, SearchQueryMixin, TestCase): basename = \"autocomplete_tag\" factory_class = TagFactory", "factory_class = ChannelFactory def validate_item(self, item): self.assertEqual(item[\"id\"], self.obj.id) self.assertEqual(item[\"name\"], self.obj.name)", "self.obj.name) class TagAutocompleteViewSetTestCase(ReadOnlyViewSetMixin, SearchQueryMixin, TestCase): basename = \"autocomplete_tag\" factory_class =", "\"autocomplete_channel\" factory_class = ChannelFactory def validate_item(self, item): self.assertEqual(item[\"id\"], self.obj.id) self.assertEqual(item[\"name\"],", "...users.factories import UserFactory class AdministrativeUnitAutocompleteViewSetTestCase( ReadOnlyViewSetMixin, SearchQueryMixin, TestCase ): basename", "class CaseAutocompleteViewSetTestCase(ReadOnlyViewSetMixin, SearchQueryMixin, TestCase): basename = \"autocomplete_case\" factory_class = CaseFactory", "= \"autocomplete_event\" factory_class = EventFactory def validate_item(self, item): self.assertEqual(item[\"id\"], self.obj.id)", "self.assertEqual(item[\"id\"], self.obj.id) self.assertEqual(item[\"name\"], self.obj.name) class DocumentTypeAutocompleteViewSetTestCase( ReadOnlyViewSetMixin, SearchQueryMixin, TestCase ):", "from ...institutions.factories import InstitutionFactory from ...letters.factories import DocumentTypeFactory, ReferenceNumberFactory from", "TestCase ): basename = \"autocomplete_feature_option\" factory_class = FeatureOptionFactory def validate_item(self,", "from ...letters.factories import DocumentTypeFactory, ReferenceNumberFactory from ...search.tests.mixins import SearchQueryMixin from", "def validate_item(self, item): self.assertEqual(item[\"id\"], self.obj.id) self.assertEqual(item[\"name\"], self.obj.name) class FeatureAutocompleteViewSetTestCase( ReadOnlyViewSetMixin,", "TestCase ): basename = \"autocomplete_feature\" factory_class = FeatureFactory def validate_item(self,", "FeatureFactory def validate_item(self, item): self.assertEqual(item[\"id\"], self.obj.id) self.assertEqual(item[\"name\"], self.obj.name) class FeatureOptionAutocompleteViewSetTestCase(", "class InstitutionAutocompleteViewSetTestCase( ReadOnlyViewSetMixin, SearchQueryMixin, TestCase ): basename = \"autocomplete_institution\" factory_class", "CaseFactory def validate_item(self, item): self.assertEqual(item[\"id\"], self.obj.id) self.assertEqual(item[\"name\"], self.obj.name) class ChannelAutocompleteViewSetTestCase(", "= ChannelFactory def validate_item(self, item): self.assertEqual(item[\"id\"], self.obj.id) self.assertEqual(item[\"name\"], self.obj.name) class", "FeatureOptionAutocompleteViewSetTestCase( ReadOnlyViewSetMixin, SearchQueryMixin, TestCase ): basename = \"autocomplete_feature_option\" factory_class =", "validate_item(self, item): self.assertEqual(item[\"id\"], self.obj.id) self.assertEqual(item[\"name\"], self.obj.name) class EventAutocompleteViewSetTestCase( ReadOnlyViewSetMixin, SearchQueryMixin,", "self.assertEqual(item[\"id\"], self.obj.id) self.assertEqual(item[\"name\"], self.obj.name) class EventAutocompleteViewSetTestCase( ReadOnlyViewSetMixin, SearchQueryMixin, TestCase ):", "self.assertEqual(item[\"name\"], self.obj.name) class UserAutocompleteViewSetTestCase(ReadOnlyViewSetMixin, SearchQueryMixin, TestCase): basename = \"autocomplete_user\" factory_class", "...features.factories import FeatureFactory, FeatureOptionFactory from ...generic.tests.test_views import ReadOnlyViewSetMixin from ...institutions.factories", "AdministrativeUnitFactory def validate_item(self, item): self.assertEqual(item[\"id\"], self.obj.id) self.assertEqual(item[\"name\"], self.obj.name) class CaseAutocompleteViewSetTestCase(ReadOnlyViewSetMixin,", "self.assertEqual(item[\"name\"], self.obj.name) class DocumentTypeAutocompleteViewSetTestCase( ReadOnlyViewSetMixin, SearchQueryMixin, TestCase ): basename =", "import CaseFactory from ...channels.factories import ChannelFactory from ...events.factories import EventFactory", "self.obj.id) self.assertEqual(item[\"name\"], self.obj.name) class ChannelAutocompleteViewSetTestCase( ReadOnlyViewSetMixin, SearchQueryMixin, TestCase ): basename", "item): self.assertEqual(item[\"id\"], self.obj.id) self.assertEqual(item[\"name\"], self.obj.name) class DocumentTypeAutocompleteViewSetTestCase( ReadOnlyViewSetMixin, SearchQueryMixin, TestCase", "def validate_item(self, item): self.assertEqual(item[\"id\"], self.obj.id) self.assertEqual(item[\"name\"], self.obj.name) class ReferenceNumberAutocompleteViewSetTestCase( ReadOnlyViewSetMixin,", "SearchQueryMixin, TestCase ): basename = \"autocomplete_channel\" factory_class = ChannelFactory def", "FeatureFactory, FeatureOptionFactory from ...generic.tests.test_views import ReadOnlyViewSetMixin from ...institutions.factories import InstitutionFactory", "from ...events.factories import EventFactory from ...features.factories import FeatureFactory, FeatureOptionFactory from", "class FeatureAutocompleteViewSetTestCase( ReadOnlyViewSetMixin, SearchQueryMixin, TestCase ): basename = \"autocomplete_feature\" factory_class", "UserAutocompleteViewSetTestCase(ReadOnlyViewSetMixin, SearchQueryMixin, TestCase): basename = \"autocomplete_user\" factory_class = UserFactory initial_count", "validate_item(self, item): self.assertEqual(item[\"id\"], self.obj.id) self.assertEqual(item[\"name\"], self.obj.name) class FeatureAutocompleteViewSetTestCase( ReadOnlyViewSetMixin, SearchQueryMixin,", "basename = \"autocomplete_tag\" factory_class = TagFactory def validate_item(self, item): self.assertEqual(item[\"id\"],", "item): self.assertEqual(item[\"id\"], self.obj.id) self.assertEqual(item[\"name\"], self.obj.name) class ReferenceNumberAutocompleteViewSetTestCase( ReadOnlyViewSetMixin, SearchQueryMixin, TestCase", "validate_item(self, item): self.assertEqual(item[\"id\"], self.obj.id) self.assertEqual(item[\"name\"], self.obj.name) class FeatureOptionAutocompleteViewSetTestCase( ReadOnlyViewSetMixin, SearchQueryMixin,", "self.obj.name) class EventAutocompleteViewSetTestCase( ReadOnlyViewSetMixin, SearchQueryMixin, TestCase ): basename = \"autocomplete_event\"", "from ...administrative_units.factories import AdministrativeUnitFactory from ...cases.factories import CaseFactory from ...channels.factories", "from ...cases.factories import CaseFactory from ...channels.factories import ChannelFactory from ...events.factories", "self.obj.id) self.assertEqual(item[\"name\"], self.obj.name) class CaseAutocompleteViewSetTestCase(ReadOnlyViewSetMixin, SearchQueryMixin, TestCase): basename = \"autocomplete_case\"", "validate_item(self, item): self.assertEqual(item[\"id\"], self.obj.id) self.assertEqual(item[\"name\"], self.obj.name) class ReferenceNumberAutocompleteViewSetTestCase( ReadOnlyViewSetMixin, SearchQueryMixin,", "\"autocomplete_feature\" factory_class = FeatureFactory def validate_item(self, item): self.assertEqual(item[\"id\"], self.obj.id) self.assertEqual(item[\"name\"],", "def validate_item(self, item): self.assertEqual(item[\"id\"], self.obj.id) self.assertEqual(item[\"name\"], self.obj.name) class EventAutocompleteViewSetTestCase( ReadOnlyViewSetMixin,", "validate_item(self, item): self.assertEqual(item[\"id\"], self.obj.id) self.assertEqual(item[\"name\"], self.obj.name) class DocumentTypeAutocompleteViewSetTestCase( ReadOnlyViewSetMixin, SearchQueryMixin,", "): basename = \"autocomplete_document_type\" factory_class = DocumentTypeFactory def validate_item(self, item):", "self.obj.name) class ChannelAutocompleteViewSetTestCase( ReadOnlyViewSetMixin, SearchQueryMixin, TestCase ): basename = \"autocomplete_channel\"", "factory_class = FeatureFactory def validate_item(self, item): self.assertEqual(item[\"id\"], self.obj.id) self.assertEqual(item[\"name\"], self.obj.name)", "\"autocomplete_event\" factory_class = EventFactory def validate_item(self, item): self.assertEqual(item[\"id\"], self.obj.id) self.assertEqual(item[\"name\"],", "test_plus.test import TestCase from ...administrative_units.factories import AdministrativeUnitFactory from ...cases.factories import", "self.obj.id) self.assertEqual(item[\"name\"], self.obj.name) class FeatureAutocompleteViewSetTestCase( ReadOnlyViewSetMixin, SearchQueryMixin, TestCase ): basename", "= UserFactory initial_count = 1 def validate_item(self, item): self.assertEqual(item[\"id\"], self.obj.id)", "): basename = \"autocomplete_event\" factory_class = EventFactory def validate_item(self, item):", "def validate_item(self, item): self.assertEqual(item[\"id\"], self.obj.id) self.assertEqual(item[\"name\"], self.obj.name) class FeatureOptionAutocompleteViewSetTestCase( ReadOnlyViewSetMixin,", "\"autocomplete_user\" factory_class = UserFactory initial_count = 1 def validate_item(self, item):", "ReadOnlyViewSetMixin, SearchQueryMixin, TestCase ): basename = \"autocomplete_feature\" factory_class = FeatureFactory", "TestCase): basename = \"autocomplete_case\" factory_class = CaseFactory def validate_item(self, item):", "import AdministrativeUnitFactory from ...cases.factories import CaseFactory from ...channels.factories import ChannelFactory", "ChannelFactory from ...events.factories import EventFactory from ...features.factories import FeatureFactory, FeatureOptionFactory", "\"autocomplete_tag\" factory_class = TagFactory def validate_item(self, item): self.assertEqual(item[\"id\"], self.obj.id) self.assertEqual(item[\"name\"],", "SearchQueryMixin, TestCase): basename = \"autocomplete_case\" factory_class = CaseFactory def validate_item(self,", "from ...search.tests.mixins import SearchQueryMixin from ...tags.factories import TagFactory from ...users.factories", "def validate_item(self, item): self.assertEqual(item[\"id\"], self.obj.id) self.assertEqual(item[\"name\"], self.obj.name) class UserAutocompleteViewSetTestCase(ReadOnlyViewSetMixin, SearchQueryMixin,", "SearchQueryMixin, TestCase ): basename = \"autocomplete_administrative_unit\" factory_class = AdministrativeUnitFactory def", "self.obj.name) class InstitutionAutocompleteViewSetTestCase( ReadOnlyViewSetMixin, SearchQueryMixin, TestCase ): basename = \"autocomplete_institution\"", "\"autocomplete_reference_number\" factory_class = ReferenceNumberFactory def validate_item(self, item): self.assertEqual(item[\"id\"], self.obj.id) self.assertEqual(item[\"name\"],", "ReadOnlyViewSetMixin, SearchQueryMixin, TestCase ): basename = \"autocomplete_feature_option\" factory_class = FeatureOptionFactory", "= EventFactory def validate_item(self, item): self.assertEqual(item[\"id\"], self.obj.id) self.assertEqual(item[\"name\"], self.obj.name) class", "basename = \"autocomplete_reference_number\" factory_class = ReferenceNumberFactory def validate_item(self, item): self.assertEqual(item[\"id\"],", "FeatureOptionFactory def validate_item(self, item): self.assertEqual(item[\"id\"], self.obj.id) self.assertEqual(item[\"name\"], self.obj.name) class InstitutionAutocompleteViewSetTestCase(", "class EventAutocompleteViewSetTestCase( ReadOnlyViewSetMixin, SearchQueryMixin, TestCase ): basename = \"autocomplete_event\" factory_class", "import UserFactory class AdministrativeUnitAutocompleteViewSetTestCase( ReadOnlyViewSetMixin, SearchQueryMixin, TestCase ): basename =", "self.obj.id) self.assertEqual(item[\"name\"], self.obj.name) class UserAutocompleteViewSetTestCase(ReadOnlyViewSetMixin, SearchQueryMixin, TestCase): basename = \"autocomplete_user\"", "import TestCase from ...administrative_units.factories import AdministrativeUnitFactory from ...cases.factories import CaseFactory", "self.assertEqual(item[\"id\"], self.obj.id) self.assertEqual(item[\"name\"], self.obj.name) class InstitutionAutocompleteViewSetTestCase( ReadOnlyViewSetMixin, SearchQueryMixin, TestCase ):", "ReadOnlyViewSetMixin, SearchQueryMixin, TestCase ): basename = \"autocomplete_administrative_unit\" factory_class = AdministrativeUnitFactory", "SearchQueryMixin, TestCase): basename = \"autocomplete_user\" factory_class = UserFactory initial_count =", "import InstitutionFactory from ...letters.factories import DocumentTypeFactory, ReferenceNumberFactory from ...search.tests.mixins import", "factory_class = UserFactory initial_count = 1 def validate_item(self, item): self.assertEqual(item[\"id\"],", "ReadOnlyViewSetMixin from ...institutions.factories import InstitutionFactory from ...letters.factories import DocumentTypeFactory, ReferenceNumberFactory", "item): self.assertEqual(item[\"id\"], self.obj.id) self.assertEqual(item[\"name\"], self.obj.name) class InstitutionAutocompleteViewSetTestCase( ReadOnlyViewSetMixin, SearchQueryMixin, TestCase", "= AdministrativeUnitFactory def validate_item(self, item): self.assertEqual(item[\"id\"], self.obj.id) self.assertEqual(item[\"name\"], self.obj.name) class", "ChannelAutocompleteViewSetTestCase( ReadOnlyViewSetMixin, SearchQueryMixin, TestCase ): basename = \"autocomplete_channel\" factory_class =", "): basename = \"autocomplete_institution\" factory_class = InstitutionFactory def validate_item(self, item):", "basename = \"autocomplete_feature\" factory_class = FeatureFactory def validate_item(self, item): self.assertEqual(item[\"id\"],", "factory_class = FeatureOptionFactory def validate_item(self, item): self.assertEqual(item[\"id\"], self.obj.id) self.assertEqual(item[\"name\"], self.obj.name)", "CaseFactory from ...channels.factories import ChannelFactory from ...events.factories import EventFactory from", "): basename = \"autocomplete_reference_number\" factory_class = ReferenceNumberFactory def validate_item(self, item):", "= DocumentTypeFactory def validate_item(self, item): self.assertEqual(item[\"id\"], self.obj.id) self.assertEqual(item[\"name\"], self.obj.name) class", "self.assertEqual(item[\"name\"], self.obj.name) class FeatureOptionAutocompleteViewSetTestCase( ReadOnlyViewSetMixin, SearchQueryMixin, TestCase ): basename =", "ChannelFactory def validate_item(self, item): self.assertEqual(item[\"id\"], self.obj.id) self.assertEqual(item[\"name\"], self.obj.name) class DocumentTypeAutocompleteViewSetTestCase(", "self.assertEqual(item[\"name\"], self.obj.name) class ChannelAutocompleteViewSetTestCase( ReadOnlyViewSetMixin, SearchQueryMixin, TestCase ): basename =", "class DocumentTypeAutocompleteViewSetTestCase( ReadOnlyViewSetMixin, SearchQueryMixin, TestCase ): basename = \"autocomplete_document_type\" factory_class", "= \"autocomplete_feature_option\" factory_class = FeatureOptionFactory def validate_item(self, item): self.assertEqual(item[\"id\"], self.obj.id)", "def validate_item(self, item): self.assertEqual(item[\"id\"], self.obj.id) self.assertEqual(item[\"name\"], self.obj.name) class InstitutionAutocompleteViewSetTestCase( ReadOnlyViewSetMixin,", "TagFactory from ...users.factories import UserFactory class AdministrativeUnitAutocompleteViewSetTestCase( ReadOnlyViewSetMixin, SearchQueryMixin, TestCase", "from ...tags.factories import TagFactory from ...users.factories import UserFactory class AdministrativeUnitAutocompleteViewSetTestCase(", "self.assertEqual(item[\"name\"], self.obj.name) class CaseAutocompleteViewSetTestCase(ReadOnlyViewSetMixin, SearchQueryMixin, TestCase): basename = \"autocomplete_case\" factory_class", "TestCase ): basename = \"autocomplete_event\" factory_class = EventFactory def validate_item(self,", "UserFactory class AdministrativeUnitAutocompleteViewSetTestCase( ReadOnlyViewSetMixin, SearchQueryMixin, TestCase ): basename = \"autocomplete_administrative_unit\"", "factory_class = InstitutionFactory def validate_item(self, item): self.assertEqual(item[\"id\"], self.obj.id) self.assertEqual(item[\"name\"], self.obj.name)", "from ...users.factories import UserFactory class AdministrativeUnitAutocompleteViewSetTestCase( ReadOnlyViewSetMixin, SearchQueryMixin, TestCase ):", "class AdministrativeUnitAutocompleteViewSetTestCase( ReadOnlyViewSetMixin, SearchQueryMixin, TestCase ): basename = \"autocomplete_administrative_unit\" factory_class", "basename = \"autocomplete_institution\" factory_class = InstitutionFactory def validate_item(self, item): self.assertEqual(item[\"id\"],", "...cases.factories import CaseFactory from ...channels.factories import ChannelFactory from ...events.factories import", "FeatureAutocompleteViewSetTestCase( ReadOnlyViewSetMixin, SearchQueryMixin, TestCase ): basename = \"autocomplete_feature\" factory_class =", "TagAutocompleteViewSetTestCase(ReadOnlyViewSetMixin, SearchQueryMixin, TestCase): basename = \"autocomplete_tag\" factory_class = TagFactory def", "= TagFactory def validate_item(self, item): self.assertEqual(item[\"id\"], self.obj.id) self.assertEqual(item[\"name\"], self.obj.name) class", "ReferenceNumberFactory from ...search.tests.mixins import SearchQueryMixin from ...tags.factories import TagFactory from", "SearchQueryMixin, TestCase ): basename = \"autocomplete_event\" factory_class = EventFactory def", "= \"autocomplete_case\" factory_class = CaseFactory def validate_item(self, item): self.assertEqual(item[\"id\"], self.obj.id)", "SearchQueryMixin, TestCase): basename = \"autocomplete_tag\" factory_class = TagFactory def validate_item(self,", "= InstitutionFactory def validate_item(self, item): self.assertEqual(item[\"id\"], self.obj.id) self.assertEqual(item[\"name\"], self.obj.name) class", "CaseAutocompleteViewSetTestCase(ReadOnlyViewSetMixin, SearchQueryMixin, TestCase): basename = \"autocomplete_case\" factory_class = CaseFactory def", "from ...channels.factories import ChannelFactory from ...events.factories import EventFactory from ...features.factories", "EventFactory def validate_item(self, item): self.assertEqual(item[\"id\"], self.obj.id) self.assertEqual(item[\"name\"], self.obj.name) class FeatureAutocompleteViewSetTestCase(", "...channels.factories import ChannelFactory from ...events.factories import EventFactory from ...features.factories import", "DocumentTypeFactory, ReferenceNumberFactory from ...search.tests.mixins import SearchQueryMixin from ...tags.factories import TagFactory", "SearchQueryMixin, TestCase ): basename = \"autocomplete_document_type\" factory_class = DocumentTypeFactory def", "= \"autocomplete_document_type\" factory_class = DocumentTypeFactory def validate_item(self, item): self.assertEqual(item[\"id\"], self.obj.id)", "= \"autocomplete_administrative_unit\" factory_class = AdministrativeUnitFactory def validate_item(self, item): self.assertEqual(item[\"id\"], self.obj.id)", "...search.tests.mixins import SearchQueryMixin from ...tags.factories import TagFactory from ...users.factories import", "SearchQueryMixin, TestCase ): basename = \"autocomplete_reference_number\" factory_class = ReferenceNumberFactory def", "TestCase ): basename = \"autocomplete_reference_number\" factory_class = ReferenceNumberFactory def validate_item(self,", "= \"autocomplete_tag\" factory_class = TagFactory def validate_item(self, item): self.assertEqual(item[\"id\"], self.obj.id)", "class UserAutocompleteViewSetTestCase(ReadOnlyViewSetMixin, SearchQueryMixin, TestCase): basename = \"autocomplete_user\" factory_class = UserFactory", "...generic.tests.test_views import ReadOnlyViewSetMixin from ...institutions.factories import InstitutionFactory from ...letters.factories import", "= \"autocomplete_feature\" factory_class = FeatureFactory def validate_item(self, item): self.assertEqual(item[\"id\"], self.obj.id)", "DocumentTypeAutocompleteViewSetTestCase( ReadOnlyViewSetMixin, SearchQueryMixin, TestCase ): basename = \"autocomplete_document_type\" factory_class =", "factory_class = ReferenceNumberFactory def validate_item(self, item): self.assertEqual(item[\"id\"], self.obj.id) self.assertEqual(item[\"name\"], self.obj.name)", "\"autocomplete_administrative_unit\" factory_class = AdministrativeUnitFactory def validate_item(self, item): self.assertEqual(item[\"id\"], self.obj.id) self.assertEqual(item[\"name\"],", "AdministrativeUnitFactory from ...cases.factories import CaseFactory from ...channels.factories import ChannelFactory from", "self.obj.id) self.assertEqual(item[\"name\"], self.obj.name) class FeatureOptionAutocompleteViewSetTestCase( ReadOnlyViewSetMixin, SearchQueryMixin, TestCase ): basename", "SearchQueryMixin, TestCase ): basename = \"autocomplete_feature_option\" factory_class = FeatureOptionFactory def", "validate_item(self, item): self.assertEqual(item[\"id\"], self.obj.id) self.assertEqual(item[\"name\"], self.obj.name) class TagAutocompleteViewSetTestCase(ReadOnlyViewSetMixin, SearchQueryMixin, TestCase):", "self.obj.id) self.assertEqual(item[\"name\"], self.obj.name) class DocumentTypeAutocompleteViewSetTestCase( ReadOnlyViewSetMixin, SearchQueryMixin, TestCase ): basename", "import EventFactory from ...features.factories import FeatureFactory, FeatureOptionFactory from ...generic.tests.test_views import", "TestCase ): basename = \"autocomplete_document_type\" factory_class = DocumentTypeFactory def validate_item(self,", "basename = \"autocomplete_feature_option\" factory_class = FeatureOptionFactory def validate_item(self, item): self.assertEqual(item[\"id\"],", "factory_class = EventFactory def validate_item(self, item): self.assertEqual(item[\"id\"], self.obj.id) self.assertEqual(item[\"name\"], self.obj.name)", "self.obj.name) class CaseAutocompleteViewSetTestCase(ReadOnlyViewSetMixin, SearchQueryMixin, TestCase): basename = \"autocomplete_case\" factory_class =", "validate_item(self, item): self.assertEqual(item[\"id\"], self.obj.id) self.assertEqual(item[\"name\"], self.obj.name) class InstitutionAutocompleteViewSetTestCase( ReadOnlyViewSetMixin, SearchQueryMixin,", "= FeatureOptionFactory def validate_item(self, item): self.assertEqual(item[\"id\"], self.obj.id) self.assertEqual(item[\"name\"], self.obj.name) class", "self.assertEqual(item[\"id\"], self.obj.id) self.assertEqual(item[\"name\"], self.obj.name) class FeatureOptionAutocompleteViewSetTestCase( ReadOnlyViewSetMixin, SearchQueryMixin, TestCase ):", "...events.factories import EventFactory from ...features.factories import FeatureFactory, FeatureOptionFactory from ...generic.tests.test_views", "basename = \"autocomplete_channel\" factory_class = ChannelFactory def validate_item(self, item): self.assertEqual(item[\"id\"],", "def validate_item(self, item): self.assertEqual(item[\"id\"], self.obj.id) self.assertEqual(item[\"name\"], self.obj.name) class CaseAutocompleteViewSetTestCase(ReadOnlyViewSetMixin, SearchQueryMixin,", "DocumentTypeFactory def validate_item(self, item): self.assertEqual(item[\"id\"], self.obj.id) self.assertEqual(item[\"name\"], self.obj.name) class ReferenceNumberAutocompleteViewSetTestCase(", "...tags.factories import TagFactory from ...users.factories import UserFactory class AdministrativeUnitAutocompleteViewSetTestCase( ReadOnlyViewSetMixin,", "validate_item(self, item): self.assertEqual(item[\"id\"], self.obj.id) self.assertEqual(item[\"name\"], self.obj.name) class UserAutocompleteViewSetTestCase(ReadOnlyViewSetMixin, SearchQueryMixin, TestCase):", "): basename = \"autocomplete_administrative_unit\" factory_class = AdministrativeUnitFactory def validate_item(self, item):", "= FeatureFactory def validate_item(self, item): self.assertEqual(item[\"id\"], self.obj.id) self.assertEqual(item[\"name\"], self.obj.name) class", "InstitutionAutocompleteViewSetTestCase( ReadOnlyViewSetMixin, SearchQueryMixin, TestCase ): basename = \"autocomplete_institution\" factory_class =", "self.obj.id) self.assertEqual(item[\"name\"], self.obj.name) class TagAutocompleteViewSetTestCase(ReadOnlyViewSetMixin, SearchQueryMixin, TestCase): basename = \"autocomplete_tag\"", "def validate_item(self, item): self.assertEqual(item[\"id\"], self.obj.id) self.assertEqual(item[\"name\"], self.obj.name) class TagAutocompleteViewSetTestCase(ReadOnlyViewSetMixin, SearchQueryMixin,", "ReadOnlyViewSetMixin, SearchQueryMixin, TestCase ): basename = \"autocomplete_reference_number\" factory_class = ReferenceNumberFactory", "TestCase ): basename = \"autocomplete_channel\" factory_class = ChannelFactory def validate_item(self,", "class FeatureOptionAutocompleteViewSetTestCase( ReadOnlyViewSetMixin, SearchQueryMixin, TestCase ): basename = \"autocomplete_feature_option\" factory_class", "\"autocomplete_feature_option\" factory_class = FeatureOptionFactory def validate_item(self, item): self.assertEqual(item[\"id\"], self.obj.id) self.assertEqual(item[\"name\"],", "TestCase): basename = \"autocomplete_tag\" factory_class = TagFactory def validate_item(self, item):", "AdministrativeUnitAutocompleteViewSetTestCase( ReadOnlyViewSetMixin, SearchQueryMixin, TestCase ): basename = \"autocomplete_administrative_unit\" factory_class =", "self.assertEqual(item[\"id\"], self.obj.id) self.assertEqual(item[\"name\"], self.obj.name) class ChannelAutocompleteViewSetTestCase( ReadOnlyViewSetMixin, SearchQueryMixin, TestCase ):", "import SearchQueryMixin from ...tags.factories import TagFactory from ...users.factories import UserFactory", "self.assertEqual(item[\"id\"], self.obj.id) self.assertEqual(item[\"name\"], self.obj.name) class ReferenceNumberAutocompleteViewSetTestCase( ReadOnlyViewSetMixin, SearchQueryMixin, TestCase ):", "\"autocomplete_document_type\" factory_class = DocumentTypeFactory def validate_item(self, item): self.assertEqual(item[\"id\"], self.obj.id) self.assertEqual(item[\"name\"],", "self.obj.name) class FeatureOptionAutocompleteViewSetTestCase( ReadOnlyViewSetMixin, SearchQueryMixin, TestCase ): basename = \"autocomplete_feature_option\"", "): basename = \"autocomplete_feature_option\" factory_class = FeatureOptionFactory def validate_item(self, item):", "item): self.assertEqual(item[\"id\"], self.obj.id) self.assertEqual(item[\"name\"], self.obj.name) class CaseAutocompleteViewSetTestCase(ReadOnlyViewSetMixin, SearchQueryMixin, TestCase): basename", "self.assertEqual(item[\"id\"], self.obj.id) self.assertEqual(item[\"name\"], self.obj.name) class CaseAutocompleteViewSetTestCase(ReadOnlyViewSetMixin, SearchQueryMixin, TestCase): basename =", "class ReferenceNumberAutocompleteViewSetTestCase( ReadOnlyViewSetMixin, SearchQueryMixin, TestCase ): basename = \"autocomplete_reference_number\" factory_class", "factory_class = CaseFactory def validate_item(self, item): self.assertEqual(item[\"id\"], self.obj.id) self.assertEqual(item[\"name\"], self.obj.name)", "self.assertEqual(item[\"id\"], self.obj.id) self.assertEqual(item[\"name\"], self.obj.name) class FeatureAutocompleteViewSetTestCase( ReadOnlyViewSetMixin, SearchQueryMixin, TestCase ):", "import ReadOnlyViewSetMixin from ...institutions.factories import InstitutionFactory from ...letters.factories import DocumentTypeFactory,", "self.obj.name) class UserAutocompleteViewSetTestCase(ReadOnlyViewSetMixin, SearchQueryMixin, TestCase): basename = \"autocomplete_user\" factory_class =", "validate_item(self, item): self.assertEqual(item[\"id\"], self.obj.id) self.assertEqual(item[\"name\"], self.obj.name) class CaseAutocompleteViewSetTestCase(ReadOnlyViewSetMixin, SearchQueryMixin, TestCase):", "TestCase ): basename = \"autocomplete_administrative_unit\" factory_class = AdministrativeUnitFactory def validate_item(self,", "basename = \"autocomplete_administrative_unit\" factory_class = AdministrativeUnitFactory def validate_item(self, item): self.assertEqual(item[\"id\"],", "validate_item(self, item): self.assertEqual(item[\"id\"], self.obj.id) self.assertEqual(item[\"name\"], self.obj.name) class ChannelAutocompleteViewSetTestCase( ReadOnlyViewSetMixin, SearchQueryMixin,", "ReadOnlyViewSetMixin, SearchQueryMixin, TestCase ): basename = \"autocomplete_channel\" factory_class = ChannelFactory", "item): self.assertEqual(item[\"id\"], self.obj.id) self.assertEqual(item[\"name\"], self.obj.name) class FeatureOptionAutocompleteViewSetTestCase( ReadOnlyViewSetMixin, SearchQueryMixin, TestCase", "= \"autocomplete_channel\" factory_class = ChannelFactory def validate_item(self, item): self.assertEqual(item[\"id\"], self.obj.id)", "ReadOnlyViewSetMixin, SearchQueryMixin, TestCase ): basename = \"autocomplete_event\" factory_class = EventFactory", "): basename = \"autocomplete_channel\" factory_class = ChannelFactory def validate_item(self, item):", "import DocumentTypeFactory, ReferenceNumberFactory from ...search.tests.mixins import SearchQueryMixin from ...tags.factories import", "factory_class = DocumentTypeFactory def validate_item(self, item): self.assertEqual(item[\"id\"], self.obj.id) self.assertEqual(item[\"name\"], self.obj.name)", "\"autocomplete_case\" factory_class = CaseFactory def validate_item(self, item): self.assertEqual(item[\"id\"], self.obj.id) self.assertEqual(item[\"name\"],", "self.obj.id) self.assertEqual(item[\"name\"], self.obj.name) class ReferenceNumberAutocompleteViewSetTestCase( ReadOnlyViewSetMixin, SearchQueryMixin, TestCase ): basename", "item): self.assertEqual(item[\"id\"], self.obj.id) self.assertEqual(item[\"name\"], self.obj.name) class FeatureAutocompleteViewSetTestCase( ReadOnlyViewSetMixin, SearchQueryMixin, TestCase", "self.assertEqual(item[\"name\"], self.obj.name) class TagAutocompleteViewSetTestCase(ReadOnlyViewSetMixin, SearchQueryMixin, TestCase): basename = \"autocomplete_tag\" factory_class", "item): self.assertEqual(item[\"id\"], self.obj.id) self.assertEqual(item[\"name\"], self.obj.name) class TagAutocompleteViewSetTestCase(ReadOnlyViewSetMixin, SearchQueryMixin, TestCase): basename", "UserFactory initial_count = 1 def validate_item(self, item): self.assertEqual(item[\"id\"], self.obj.id) self.assertEqual(item[\"username\"],", "self.obj.name) class FeatureAutocompleteViewSetTestCase( ReadOnlyViewSetMixin, SearchQueryMixin, TestCase ): basename = \"autocomplete_feature\"", "SearchQueryMixin, TestCase ): basename = \"autocomplete_institution\" factory_class = InstitutionFactory def", "ReadOnlyViewSetMixin, SearchQueryMixin, TestCase ): basename = \"autocomplete_document_type\" factory_class = DocumentTypeFactory", "self.assertEqual(item[\"name\"], self.obj.name) class InstitutionAutocompleteViewSetTestCase( ReadOnlyViewSetMixin, SearchQueryMixin, TestCase ): basename =", "SearchQueryMixin, TestCase ): basename = \"autocomplete_feature\" factory_class = FeatureFactory def", "item): self.assertEqual(item[\"id\"], self.obj.id) self.assertEqual(item[\"name\"], self.obj.name) class UserAutocompleteViewSetTestCase(ReadOnlyViewSetMixin, SearchQueryMixin, TestCase): basename", "self.assertEqual(item[\"name\"], self.obj.name) class EventAutocompleteViewSetTestCase( ReadOnlyViewSetMixin, SearchQueryMixin, TestCase ): basename =", "TagFactory def validate_item(self, item): self.assertEqual(item[\"id\"], self.obj.id) self.assertEqual(item[\"name\"], self.obj.name) class UserAutocompleteViewSetTestCase(ReadOnlyViewSetMixin,", "def validate_item(self, item): self.assertEqual(item[\"id\"], self.obj.id) self.assertEqual(item[\"name\"], self.obj.name) class ChannelAutocompleteViewSetTestCase( ReadOnlyViewSetMixin,", "self.obj.name) class ReferenceNumberAutocompleteViewSetTestCase( ReadOnlyViewSetMixin, SearchQueryMixin, TestCase ): basename = \"autocomplete_reference_number\"", "...administrative_units.factories import AdministrativeUnitFactory from ...cases.factories import CaseFactory from ...channels.factories import", "basename = \"autocomplete_user\" factory_class = UserFactory initial_count = 1 def", "EventFactory from ...features.factories import FeatureFactory, FeatureOptionFactory from ...generic.tests.test_views import ReadOnlyViewSetMixin", "self.obj.name) class DocumentTypeAutocompleteViewSetTestCase( ReadOnlyViewSetMixin, SearchQueryMixin, TestCase ): basename = \"autocomplete_document_type\"", "InstitutionFactory def validate_item(self, item): self.assertEqual(item[\"id\"], self.obj.id) self.assertEqual(item[\"name\"], self.obj.name) class TagAutocompleteViewSetTestCase(ReadOnlyViewSetMixin," ]
[ "list(set(result)) @property def actions(self) -> List[IAction]: return self._actions @property def", "action.source_version return None def target_version(self, application: str) -> str: for", "self._state def source_version(self, application: str) -> str: for action in", "actions: List[IAction]) -> None: self._state = state self._actions = actions", "applications(self) -> List[str]: result = [] for action in self._actions:", "..actions.iaction import IAction from ..model.state import LennyBotState class LennyBotPlan: def", "def state(self) -> LennyBotState: return self._state def source_version(self, application: str)", "-> str: for action in self._actions: if action.application != application:", "= state self._actions = actions @property def applications(self) -> List[str]:", "!= application: continue return action.source_version return None def target_version(self, application:", "LennyBotPlan: def __init__(self, state: LennyBotState, actions: List[IAction]) -> None: self._state", "for action in self._actions: if action.application != application: continue return", "= actions @property def applications(self) -> List[str]: result = []", "class LennyBotPlan: def __init__(self, state: LennyBotState, actions: List[IAction]) -> None:", "if action.application != application: continue return action.source_version return None def", "Any, List from ..actions.iaction import IAction from ..model.state import LennyBotState", "typing import Any, List from ..actions.iaction import IAction from ..model.state", "self._actions: if action.application != application: continue return action.source_version return None", "result.append(action.application) return list(set(result)) @property def actions(self) -> List[IAction]: return self._actions", "None def target_version(self, application: str) -> str: for action in", "str: for action in self._actions: if action.application != application: continue", "action in self._actions: result.append(action.application) return list(set(result)) @property def actions(self) ->", "action in self._actions: if action.application != application: continue return action.source_version", "from ..actions.iaction import IAction from ..model.state import LennyBotState class LennyBotPlan:", "-> LennyBotState: return self._state def source_version(self, application: str) -> str:", "in self._actions: if action.application != application: continue return action.source_version return", "for action in self._actions: result.append(action.application) return list(set(result)) @property def actions(self)", "LennyBotState, actions: List[IAction]) -> None: self._state = state self._actions =", "str) -> str: for action in self._actions: if action.application !=", "<filename>src/lennybot/model/plan.py from typing import Any, List from ..actions.iaction import IAction", "List[str]: result = [] for action in self._actions: result.append(action.application) return", "target_version(self, application: str) -> str: for action in self._actions: if", "in self._actions: result.append(action.application) return list(set(result)) @property def actions(self) -> List[IAction]:", "self._actions: result.append(action.application) return list(set(result)) @property def actions(self) -> List[IAction]: return", "action.application != application: continue return action.source_version return None def target_version(self,", "return action.source_version return None def target_version(self, application: str) -> str:", "None: self._state = state self._actions = actions @property def applications(self)", "-> List[IAction]: return self._actions @property def state(self) -> LennyBotState: return", "state: LennyBotState, actions: List[IAction]) -> None: self._state = state self._actions", "self._actions @property def state(self) -> LennyBotState: return self._state def source_version(self,", "actions @property def applications(self) -> List[str]: result = [] for", "import LennyBotState class LennyBotPlan: def __init__(self, state: LennyBotState, actions: List[IAction])", "@property def applications(self) -> List[str]: result = [] for action", "import IAction from ..model.state import LennyBotState class LennyBotPlan: def __init__(self,", "def __init__(self, state: LennyBotState, actions: List[IAction]) -> None: self._state =", "def target_version(self, application: str) -> str: for action in self._actions:", "List[IAction]) -> None: self._state = state self._actions = actions @property", "action in self._actions: if action.application != application: continue return action.target_version", "@property def actions(self) -> List[IAction]: return self._actions @property def state(self)", "self._state = state self._actions = actions @property def applications(self) ->", "LennyBotState: return self._state def source_version(self, application: str) -> str: for", "@property def state(self) -> LennyBotState: return self._state def source_version(self, application:", "__init__(self, state: LennyBotState, actions: List[IAction]) -> None: self._state = state", "List[IAction]: return self._actions @property def state(self) -> LennyBotState: return self._state", "= [] for action in self._actions: result.append(action.application) return list(set(result)) @property", "application: continue return action.source_version return None def target_version(self, application: str)", "return self._state def source_version(self, application: str) -> str: for action", "application: str) -> str: for action in self._actions: if action.application", "from ..model.state import LennyBotState class LennyBotPlan: def __init__(self, state: LennyBotState,", "self._actions = actions @property def applications(self) -> List[str]: result =", "List from ..actions.iaction import IAction from ..model.state import LennyBotState class", "state(self) -> LennyBotState: return self._state def source_version(self, application: str) ->", "actions(self) -> List[IAction]: return self._actions @property def state(self) -> LennyBotState:", "continue return action.source_version return None def target_version(self, application: str) ->", "in self._actions: if action.application != application: continue return action.target_version return", "self._actions: if action.application != application: continue return action.target_version return None", "-> None: self._state = state self._actions = actions @property def", "def applications(self) -> List[str]: result = [] for action in", "LennyBotState class LennyBotPlan: def __init__(self, state: LennyBotState, actions: List[IAction]) ->", "return list(set(result)) @property def actions(self) -> List[IAction]: return self._actions @property", "result = [] for action in self._actions: result.append(action.application) return list(set(result))", "import Any, List from ..actions.iaction import IAction from ..model.state import", "-> List[str]: result = [] for action in self._actions: result.append(action.application)", "..model.state import LennyBotState class LennyBotPlan: def __init__(self, state: LennyBotState, actions:", "source_version(self, application: str) -> str: for action in self._actions: if", "from typing import Any, List from ..actions.iaction import IAction from", "return None def target_version(self, application: str) -> str: for action", "return self._actions @property def state(self) -> LennyBotState: return self._state def", "def source_version(self, application: str) -> str: for action in self._actions:", "def actions(self) -> List[IAction]: return self._actions @property def state(self) ->", "state self._actions = actions @property def applications(self) -> List[str]: result", "IAction from ..model.state import LennyBotState class LennyBotPlan: def __init__(self, state:", "[] for action in self._actions: result.append(action.application) return list(set(result)) @property def" ]
[ "is not None: tmp_data[\"enabled\"] = int(bool(enabled)) if data: tmp_data[\"data\"] =", "return response.json() def update(self, guid, name=None, type=None, enabled=None, data=None, org=False):", "request URI api_uri = f\"/api/v2/ContainerRegistries/{guid}\" response = self._session.delete(api_uri, org=org) if", "response = self._session.patch(api_uri, org=org, data=tmp_data) return response.json() def delete(self, guid,", "= \"/api/v2/ContainerRegistries/search\" response = self._session.post(api_uri, data=query_data, org=org) return response.json() def", "= type if enabled is not None: tmp_data[\"enabled\"] = int(bool(enabled))", "org=org) return response.json() def get_by_type(self, type, org=False): \"\"\" A method", "boolean representing whether the request should be performed at the", "self._session.post(api_uri, data=query_data, org=org) return response.json() def update(self, guid, name=None, type=None,", "level :return response json \"\"\" logger.info(\"Creating container registry in Lacework...\")", "schema for the specified type. :param org: A boolean representing", "data response = self._session.patch(api_uri, org=org, data=tmp_data) return response.json() def delete(self,", "A method to search container registries. :param query_data: A dictionary", ":return response json \"\"\" logger.info(\"Updating container registry in Lacework...\") #", "tmp_data[\"type\"] = type if enabled is not None: tmp_data[\"enabled\"] =", "object. :param session: An instance of the HttpSession class :return", "registry type. :param enabled: A boolean/integer representing whether the container", "self._session.patch(api_uri, org=org, data=tmp_data) return response.json() def delete(self, guid, org=False): \"\"\"", "Registries request URI api_uri = f\"/api/v2/ContainerRegistries/{guid}\" tmp_data = {} if", "performed at the Organization level :return response json \"\"\" logger.info(\"Deleting", "A string representing the container registry name. :param type: A", "GUID. :param name: A string representing the container registry name.", "type: A string representing the container registry type. :param enabled:", "container registry GUID. :param name: A string representing the container", "{ \"name\": name, \"type\": type, \"enabled\": int(bool(enabled)), \"data\": data }", "org=org) def search(self, query_data=None, org=False): \"\"\" A method to search", "response = self._session.post(api_uri, data=query_data, org=org) return response.json() def update(self, guid,", "logging logger = logging.getLogger(__name__) class ContainerRegistriesAPI(object): \"\"\" Lacework Container Registries", "if data: tmp_data[\"data\"] = data response = self._session.patch(api_uri, org=org, data=tmp_data)", "\"\"\" logger.info(\"Searching container registries from Lacework...\") # Build the Container", "registry name. :param type: A string representing the container registry", "} response = self._session.post(api_uri, org=org, data=data) return response.json() def get(self,", "get all container registries. :param guid: A string representing the", "class :return ContainerRegistriesAPI object. \"\"\" super(ContainerRegistriesAPI, self).__init__() self._session = session", "or 1) :param data: A JSON object matching the schema", "Organization level :return response json \"\"\" return self.get(type=type, org=org) def", "get_by_type(self, type, org=False): \"\"\" A method to get all container", "return response.json() def get(self, guid=None, type=None, org=False): \"\"\" A method", "URI api_uri = \"/api/v2/ContainerRegistries\" data = { \"name\": name, \"type\":", "container registry in Lacework...\") # Build the Container Registries request", "search parameters. (filters, returns) :return response json \"\"\" logger.info(\"Searching container", "\"\"\" Lacework Container Registries API wrapper. \"\"\" import logging logger", "string representing the container registry name. :param type: A string", ":param data: A JSON object matching the schema for the", "the Organization level :return response json \"\"\" logger.info(\"Deleting container registry", "performed at the Organization level :return response json \"\"\" return", "\"/api/v2/ContainerRegistries/search\" response = self._session.post(api_uri, data=query_data, org=org) return response.json() def update(self,", "Registries API wrapper. \"\"\" import logging logger = logging.getLogger(__name__) class", "request URI api_uri = \"/api/v2/ContainerRegistries/search\" response = self._session.post(api_uri, data=query_data, org=org)", "enabled, data, org=False): \"\"\" A method to create a new", "response = self._session.post(api_uri, org=org, data=data) return response.json() def get(self, guid=None,", "to update an container registry. :param guid: A string representing", "Build the Container Registries request URI if guid: api_uri =", "representing the container registry GUID. :param type: A string representing", "tmp_data = {} if name: tmp_data[\"name\"] = name if type:", "not None: tmp_data[\"enabled\"] = int(bool(enabled)) if data: tmp_data[\"data\"] = data", "registry info from Lacework...\") # Build the Container Registries request", "the Container Registries request URI api_uri = \"/api/v2/ContainerRegistries/search\" response =", "whether the container registry is enabled. (0 or 1) :param", "int(bool(enabled)), \"data\": data } response = self._session.post(api_uri, org=org, data=data) return", "JSON object matching the schema for the specified type. :param", "def create(self, name, type, enabled, data, org=False): \"\"\" A method", "request URI api_uri = f\"/api/v2/ContainerRegistries/{guid}\" tmp_data = {} if name:", "Registries request URI api_uri = f\"/api/v2/ContainerRegistries/{guid}\" response = self._session.delete(api_uri, org=org)", "org=org) def get_by_guid(self, guid, org=False): \"\"\" A method to get", "type. :param type: A string representing the container registry type.", "org=False): \"\"\" A method to get all container registries. :param", "Organization level :return response json \"\"\" logger.info(\"Getting container registry info", "level :return response json \"\"\" logger.info(\"Updating container registry in Lacework...\")", "\"/api/v2/ContainerRegistries\" response = self._session.get(api_uri, org=org) return response.json() def get_by_type(self, type,", "the request should be performed at the Organization level :return", "# -*- coding: utf-8 -*- \"\"\" Lacework Container Registries API", "representing the container registry GUID. :param org: A boolean representing", "data=query_data, org=org) return response.json() def update(self, guid, name=None, type=None, enabled=None,", "An instance of the HttpSession class :return ContainerRegistriesAPI object. \"\"\"", "container registries from Lacework...\") # Build the Container Registries request", "type: api_uri = f\"/api/v2/ContainerRegistries/{type}\" else: api_uri = \"/api/v2/ContainerRegistries\" response =", "get_by_guid(self, guid, org=False): \"\"\" A method to get all container", "= self._session.get(api_uri, org=org) return response.json() def get_by_type(self, type, org=False): \"\"\"", ":return response json \"\"\" logger.info(\"Searching container registries from Lacework...\") #", "in Lacework...\") # Build the Container Registries request URI api_uri", "get all container registries by type. :param type: A string", "response json \"\"\" logger.info(\"Deleting container registry in Lacework...\") # Build", ":param org: A boolean representing whether the request should be", "performed at the Organization level :return response json \"\"\" logger.info(\"Getting", "def delete(self, guid, org=False): \"\"\" A method to delete an", "org=org) if response.status_code == 204: return response else: return response.json()", "returns) :return response json \"\"\" logger.info(\"Searching container registries from Lacework...\")", "parameters. (filters, returns) :return response json \"\"\" logger.info(\"Searching container registries", "A string representing the container registry GUID. :param name: A", "method to search container registries. :param query_data: A dictionary containing", "Organization level :return response json \"\"\" return self.get(guid=guid, org=org) def", "1) :param data: A JSON object matching the schema for", "A string representing the container registry type. :param org: A", "\"\"\" A method to create a new container registry. :param", "data=data) return response.json() def get(self, guid=None, type=None, org=False): \"\"\" A", "Container Registries request URI api_uri = f\"/api/v2/ContainerRegistries/{guid}\" tmp_data = {}", "= name if type: tmp_data[\"type\"] = type if enabled is", "tmp_data[\"enabled\"] = int(bool(enabled)) if data: tmp_data[\"data\"] = data response =", "container registries. :param query_data: A dictionary containing the desired search", "\"\"\" import logging logger = logging.getLogger(__name__) class ContainerRegistriesAPI(object): \"\"\" Lacework", "name, \"type\": type, \"enabled\": int(bool(enabled)), \"data\": data } response =", "instance of the HttpSession class :return ContainerRegistriesAPI object. \"\"\" super(ContainerRegistriesAPI,", ":param enabled: A boolean/integer representing whether the container registry is", "def update(self, guid, name=None, type=None, enabled=None, data=None, org=False): \"\"\" A", "Lacework Container Registries API wrapper. \"\"\" import logging logger =", "container registry. :param name: A string representing the container registry", "from Lacework...\") # Build the Container Registries request URI api_uri", "object matching the schema for the specified type. :param org:", "container registry GUID. :param type: A string representing the container", "registries by type. :param type: A string representing the container", "self._session.delete(api_uri, org=org) if response.status_code == 204: return response else: return", "\"\"\" logger.info(\"Updating container registry in Lacework...\") # Build the Container", "the container registry is enabled. (0 or 1) :param data:", "the HttpSession class :return ContainerRegistriesAPI object. \"\"\" super(ContainerRegistriesAPI, self).__init__() self._session", "ContainerRegistriesAPI object. :param session: An instance of the HttpSession class", ":param type: A string representing the container registry type. :param", "type. :param enabled: A boolean/integer representing whether the container registry", "Registries request URI api_uri = \"/api/v2/ContainerRegistries\" data = { \"name\":", "the Organization level :return response json \"\"\" logger.info(\"Getting container registry", "\"name\": name, \"type\": type, \"enabled\": int(bool(enabled)), \"data\": data } response", "= self._session.post(api_uri, data=query_data, org=org) return response.json() def update(self, guid, name=None,", "Container Registries request URI api_uri = f\"/api/v2/ContainerRegistries/{guid}\" response = self._session.delete(api_uri,", "string representing the container registry GUID. :param type: A string", "tmp_data[\"data\"] = data response = self._session.patch(api_uri, org=org, data=tmp_data) return response.json()", "def search(self, query_data=None, org=False): \"\"\" A method to search container", "A boolean representing whether the request should be performed at", "logger = logging.getLogger(__name__) class ContainerRegistriesAPI(object): \"\"\" Lacework Container Registries API.", "org=False): \"\"\" A method to update an container registry. :param", "method to delete an container registry. :param guid: A string", "self.get(guid=guid, org=org) def search(self, query_data=None, org=False): \"\"\" A method to", "for the specified type. :param org: A boolean representing whether", "# Build the Container Registries request URI api_uri = \"/api/v2/ContainerRegistries/search\"", "A JSON object matching the schema for the specified type.", "enabled: A boolean/integer representing whether the container registry is enabled.", "Initializes the ContainerRegistriesAPI object. :param session: An instance of the", "response.json() def get(self, guid=None, type=None, org=False): \"\"\" A method to", "A string representing the container registry GUID. :param org: A", "data=tmp_data) return response.json() def delete(self, guid, org=False): \"\"\" A method", "method to get all container registries by type. :param type:", "registry is enabled. (0 or 1) :param data: A JSON", "matching the schema for the specified type. :param org: A", "f\"/api/v2/ContainerRegistries/{guid}\" response = self._session.delete(api_uri, org=org) if response.status_code == 204: return", "None: tmp_data[\"enabled\"] = int(bool(enabled)) if data: tmp_data[\"data\"] = data response", "representing the container registry GUID. :param name: A string representing", "Container Registries API wrapper. \"\"\" import logging logger = logging.getLogger(__name__)", "= self._session.patch(api_uri, org=org, data=tmp_data) return response.json() def delete(self, guid, org=False):", "registry type. :param org: A boolean representing whether the request", "all container registries. :param guid: A string representing the container", "org=False): \"\"\" A method to search container registries. :param query_data:", "representing the container registry type. :param org: A boolean representing", "request URI if guid: api_uri = f\"/api/v2/ContainerRegistries/{guid}\" elif type: api_uri", "type=None, enabled=None, data=None, org=False): \"\"\" A method to update an", "at the Organization level :return response json \"\"\" logger.info(\"Deleting container", "self._session = session def create(self, name, type, enabled, data, org=False):", "self.get(type=type, org=org) def get_by_guid(self, guid, org=False): \"\"\" A method to", "registries. :param query_data: A dictionary containing the desired search parameters.", "= \"/api/v2/ContainerRegistries\" response = self._session.get(api_uri, org=org) return response.json() def get_by_type(self,", "performed at the Organization level :return response json \"\"\" logger.info(\"Creating", "string representing the container registry GUID. :param org: A boolean", "f\"/api/v2/ContainerRegistries/{guid}\" tmp_data = {} if name: tmp_data[\"name\"] = name if", "search(self, query_data=None, org=False): \"\"\" A method to search container registries.", "\"\"\" return self.get(guid=guid, org=org) def search(self, query_data=None, org=False): \"\"\" A", "\"\"\" logger.info(\"Creating container registry in Lacework...\") # Build the Container", "desired search parameters. (filters, returns) :return response json \"\"\" logger.info(\"Searching", "the Organization level :return response json \"\"\" return self.get(type=type, org=org)", "= data response = self._session.patch(api_uri, org=org, data=tmp_data) return response.json() def", "container registries. :param guid: A string representing the container registry", "Build the Container Registries request URI api_uri = f\"/api/v2/ContainerRegistries/{guid}\" tmp_data", "elif type: api_uri = f\"/api/v2/ContainerRegistries/{type}\" else: api_uri = \"/api/v2/ContainerRegistries\" response", "type, enabled, data, org=False): \"\"\" A method to create a", "boolean/integer representing whether the container registry is enabled. (0 or", "method to get all container registries. :param guid: A string", "registry GUID. :param name: A string representing the container registry", ":param session: An instance of the HttpSession class :return ContainerRegistriesAPI", "api_uri = \"/api/v2/ContainerRegistries/search\" response = self._session.post(api_uri, data=query_data, org=org) return response.json()", "json \"\"\" logger.info(\"Getting container registry info from Lacework...\") # Build", "registry GUID. :param org: A boolean representing whether the request", "guid: A string representing the container registry GUID. :param type:", "the ContainerRegistriesAPI object. :param session: An instance of the HttpSession", "response.json() def delete(self, guid, org=False): \"\"\" A method to delete", "session): \"\"\" Initializes the ContainerRegistriesAPI object. :param session: An instance", "return response.json() def get_by_type(self, type, org=False): \"\"\" A method to", "representing the container registry name. :param type: A string representing", "HttpSession class :return ContainerRegistriesAPI object. \"\"\" super(ContainerRegistriesAPI, self).__init__() self._session =", "\"\"\" logger.info(\"Deleting container registry in Lacework...\") # Build the Container", "at the Organization level :return response json \"\"\" return self.get(guid=guid,", "api_uri = f\"/api/v2/ContainerRegistries/{guid}\" elif type: api_uri = f\"/api/v2/ContainerRegistries/{type}\" else: api_uri", "GUID. :param type: A string representing the container registry type.", "Lacework...\") # Build the Container Registries request URI if guid:", "self._session.post(api_uri, org=org, data=data) return response.json() def get(self, guid=None, type=None, org=False):", "A string representing the container registry GUID. :param type: A", "ContainerRegistriesAPI object. \"\"\" super(ContainerRegistriesAPI, self).__init__() self._session = session def create(self,", "should be performed at the Organization level :return response json", "to create a new container registry. :param name: A string", "logger.info(\"Getting container registry info from Lacework...\") # Build the Container", "the Organization level :return response json \"\"\" return self.get(guid=guid, org=org)", "name, type, enabled, data, org=False): \"\"\" A method to create", "guid=None, type=None, org=False): \"\"\" A method to get all container", "at the Organization level :return response json \"\"\" return self.get(type=type,", "def get(self, guid=None, type=None, org=False): \"\"\" A method to get", "to delete an container registry. :param guid: A string representing", "self._session.get(api_uri, org=org) return response.json() def get_by_type(self, type, org=False): \"\"\" A", "org=org, data=data) return response.json() def get(self, guid=None, type=None, org=False): \"\"\"", "api_uri = \"/api/v2/ContainerRegistries\" data = { \"name\": name, \"type\": type,", "request should be performed at the Organization level :return response", "f\"/api/v2/ContainerRegistries/{guid}\" elif type: api_uri = f\"/api/v2/ContainerRegistries/{type}\" else: api_uri = \"/api/v2/ContainerRegistries\"", "if name: tmp_data[\"name\"] = name if type: tmp_data[\"type\"] = type", "response json \"\"\" return self.get(type=type, org=org) def get_by_guid(self, guid, org=False):", "info from Lacework...\") # Build the Container Registries request URI", "object. \"\"\" super(ContainerRegistriesAPI, self).__init__() self._session = session def create(self, name,", ":param query_data: A dictionary containing the desired search parameters. (filters,", "type if enabled is not None: tmp_data[\"enabled\"] = int(bool(enabled)) if", "Build the Container Registries request URI api_uri = f\"/api/v2/ContainerRegistries/{guid}\" response", "type: tmp_data[\"type\"] = type if enabled is not None: tmp_data[\"enabled\"]", "type: A string representing the container registry type. :param org:", "A dictionary containing the desired search parameters. (filters, returns) :return", "\"\"\" super(ContainerRegistriesAPI, self).__init__() self._session = session def create(self, name, type,", "response json \"\"\" logger.info(\"Getting container registry info from Lacework...\") #", "response json \"\"\" logger.info(\"Updating container registry in Lacework...\") # Build", "string representing the container registry type. :param org: A boolean", "class ContainerRegistriesAPI(object): \"\"\" Lacework Container Registries API. \"\"\" def __init__(self,", "dictionary containing the desired search parameters. (filters, returns) :return response", "container registry GUID. :param org: A boolean representing whether the", "A string representing the container registry type. :param enabled: A", "\"\"\" return self.get(type=type, org=org) def get_by_guid(self, guid, org=False): \"\"\" A", "type, org=False): \"\"\" A method to get all container registries", "api_uri = f\"/api/v2/ContainerRegistries/{type}\" else: api_uri = \"/api/v2/ContainerRegistries\" response = self._session.get(api_uri,", "def get_by_type(self, type, org=False): \"\"\" A method to get all", "at the Organization level :return response json \"\"\" logger.info(\"Updating container", "container registry info from Lacework...\") # Build the Container Registries", "\"\"\" A method to get all container registries by type.", "api_uri = f\"/api/v2/ContainerRegistries/{guid}\" tmp_data = {} if name: tmp_data[\"name\"] =", "int(bool(enabled)) if data: tmp_data[\"data\"] = data response = self._session.patch(api_uri, org=org,", "string representing the container registry type. :param enabled: A boolean/integer", "the container registry name. :param type: A string representing the", "delete(self, guid, org=False): \"\"\" A method to delete an container", "method to create a new container registry. :param name: A", "coding: utf-8 -*- \"\"\" Lacework Container Registries API wrapper. \"\"\"", "(0 or 1) :param data: A JSON object matching the", "name=None, type=None, enabled=None, data=None, org=False): \"\"\" A method to update", "org=org, data=tmp_data) return response.json() def delete(self, guid, org=False): \"\"\" A", "\"\"\" A method to search container registries. :param query_data: A", "import logging logger = logging.getLogger(__name__) class ContainerRegistriesAPI(object): \"\"\" Lacework Container", "the container registry GUID. :param name: A string representing the", "utf-8 -*- \"\"\" Lacework Container Registries API wrapper. \"\"\" import", "level :return response json \"\"\" logger.info(\"Deleting container registry in Lacework...\")", "= \"/api/v2/ContainerRegistries\" data = { \"name\": name, \"type\": type, \"enabled\":", "# Build the Container Registries request URI api_uri = f\"/api/v2/ContainerRegistries/{guid}\"", "the desired search parameters. (filters, returns) :return response json \"\"\"", "# Build the Container Registries request URI if guid: api_uri", "guid, org=False): \"\"\" A method to delete an container registry.", "data: A JSON object matching the schema for the specified", "api_uri = \"/api/v2/ContainerRegistries\" response = self._session.get(api_uri, org=org) return response.json() def", "return response.json() def delete(self, guid, org=False): \"\"\" A method to", "whether the request should be performed at the Organization level", "= f\"/api/v2/ContainerRegistries/{type}\" else: api_uri = \"/api/v2/ContainerRegistries\" response = self._session.get(api_uri, org=org)", "json \"\"\" return self.get(guid=guid, org=org) def search(self, query_data=None, org=False): \"\"\"", "query_data=None, org=False): \"\"\" A method to search container registries. :param", "specified type. :param org: A boolean representing whether the request", "string representing the container registry GUID. :param name: A string", "A method to delete an container registry. :param guid: A", "A method to update an container registry. :param guid: A", "all container registries by type. :param type: A string representing", "an container registry. :param guid: A string representing the container", "Container Registries request URI api_uri = \"/api/v2/ContainerRegistries\" data = {", "container registry name. :param type: A string representing the container", "enabled=None, data=None, org=False): \"\"\" A method to update an container", "level :return response json \"\"\" return self.get(type=type, org=org) def get_by_guid(self,", "type, \"enabled\": int(bool(enabled)), \"data\": data } response = self._session.post(api_uri, org=org,", "get(self, guid=None, type=None, org=False): \"\"\" A method to get all", "guid: A string representing the container registry GUID. :param name:", "tmp_data[\"name\"] = name if type: tmp_data[\"type\"] = type if enabled", "by type. :param type: A string representing the container registry", "the Container Registries request URI api_uri = f\"/api/v2/ContainerRegistries/{guid}\" response =", "\"/api/v2/ContainerRegistries\" data = { \"name\": name, \"type\": type, \"enabled\": int(bool(enabled)),", "response.json() def update(self, guid, name=None, type=None, enabled=None, data=None, org=False): \"\"\"", ":return response json \"\"\" logger.info(\"Deleting container registry in Lacework...\") #", "response = self._session.get(api_uri, org=org) return response.json() def get_by_type(self, type, org=False):", "to get all container registries by type. :param type: A", "= f\"/api/v2/ContainerRegistries/{guid}\" elif type: api_uri = f\"/api/v2/ContainerRegistries/{type}\" else: api_uri =", "self).__init__() self._session = session def create(self, name, type, enabled, data,", "method to update an container registry. :param guid: A string", ":return ContainerRegistriesAPI object. \"\"\" super(ContainerRegistriesAPI, self).__init__() self._session = session def", "query_data: A dictionary containing the desired search parameters. (filters, returns)", "= {} if name: tmp_data[\"name\"] = name if type: tmp_data[\"type\"]", "f\"/api/v2/ContainerRegistries/{type}\" else: api_uri = \"/api/v2/ContainerRegistries\" response = self._session.get(api_uri, org=org) return", "registry. :param name: A string representing the container registry name.", "json \"\"\" logger.info(\"Updating container registry in Lacework...\") # Build the", "if type: tmp_data[\"type\"] = type if enabled is not None:", "the Organization level :return response json \"\"\" logger.info(\"Creating container registry", "API. \"\"\" def __init__(self, session): \"\"\" Initializes the ContainerRegistriesAPI object.", "a new container registry. :param name: A string representing the", "\"enabled\": int(bool(enabled)), \"data\": data } response = self._session.post(api_uri, org=org, data=data)", "the container registry type. :param org: A boolean representing whether", "A boolean/integer representing whether the container registry is enabled. (0", "__init__(self, session): \"\"\" Initializes the ContainerRegistriesAPI object. :param session: An", "\"\"\" Lacework Container Registries API. \"\"\" def __init__(self, session): \"\"\"", "container registry is enabled. (0 or 1) :param data: A", "URI api_uri = f\"/api/v2/ContainerRegistries/{guid}\" tmp_data = {} if name: tmp_data[\"name\"]", "Organization level :return response json \"\"\" logger.info(\"Deleting container registry in", "the schema for the specified type. :param org: A boolean", "(filters, returns) :return response json \"\"\" logger.info(\"Searching container registries from", "registry. :param guid: A string representing the container registry GUID.", "Organization level :return response json \"\"\" logger.info(\"Creating container registry in", "return self.get(type=type, org=org) def get_by_guid(self, guid, org=False): \"\"\" A method", "\"\"\" A method to get all container registries. :param guid:", "the container registry type. :param enabled: A boolean/integer representing whether", "the Container Registries request URI if guid: api_uri = f\"/api/v2/ContainerRegistries/{guid}\"", "= self._session.delete(api_uri, org=org) if response.status_code == 204: return response else:", "type. :param org: A boolean representing whether the request should", "delete an container registry. :param guid: A string representing the", "URI if guid: api_uri = f\"/api/v2/ContainerRegistries/{guid}\" elif type: api_uri =", "containing the desired search parameters. (filters, returns) :return response json", "= session def create(self, name, type, enabled, data, org=False): \"\"\"", "\"\"\" A method to update an container registry. :param guid:", "type=None, org=False): \"\"\" A method to get all container registries.", "session: An instance of the HttpSession class :return ContainerRegistriesAPI object.", "data=None, org=False): \"\"\" A method to update an container registry.", "= int(bool(enabled)) if data: tmp_data[\"data\"] = data response = self._session.patch(api_uri,", "Container Registries API. \"\"\" def __init__(self, session): \"\"\" Initializes the", "registries. :param guid: A string representing the container registry GUID.", ":return response json \"\"\" logger.info(\"Creating container registry in Lacework...\") #", "guid, name=None, type=None, enabled=None, data=None, org=False): \"\"\" A method to", "the Container Registries request URI api_uri = \"/api/v2/ContainerRegistries\" data =", ":return response json \"\"\" logger.info(\"Getting container registry info from Lacework...\")", "registries from Lacework...\") # Build the Container Registries request URI", "org: A boolean representing whether the request should be performed", "else: api_uri = \"/api/v2/ContainerRegistries\" response = self._session.get(api_uri, org=org) return response.json()", "update(self, guid, name=None, type=None, enabled=None, data=None, org=False): \"\"\" A method", ":param name: A string representing the container registry name. :param", "name: A string representing the container registry name. :param type:", "level :return response json \"\"\" return self.get(guid=guid, org=org) def search(self,", "registry GUID. :param type: A string representing the container registry", "search container registries. :param query_data: A dictionary containing the desired", "Organization level :return response json \"\"\" logger.info(\"Updating container registry in", "Lacework...\") # Build the Container Registries request URI api_uri =", "Container Registries request URI api_uri = \"/api/v2/ContainerRegistries/search\" response = self._session.post(api_uri,", "= { \"name\": name, \"type\": type, \"enabled\": int(bool(enabled)), \"data\": data", "representing whether the request should be performed at the Organization", "A method to get all container registries. :param guid: A", ":return response json \"\"\" return self.get(guid=guid, org=org) def search(self, query_data=None,", "Build the Container Registries request URI api_uri = \"/api/v2/ContainerRegistries/search\" response", "Build the Container Registries request URI api_uri = \"/api/v2/ContainerRegistries\" data", "the Organization level :return response json \"\"\" logger.info(\"Updating container registry", "the Container Registries request URI api_uri = f\"/api/v2/ContainerRegistries/{guid}\" tmp_data =", "logging.getLogger(__name__) class ContainerRegistriesAPI(object): \"\"\" Lacework Container Registries API. \"\"\" def", "guid, org=False): \"\"\" A method to get all container registries.", "\"\"\" Initializes the ContainerRegistriesAPI object. :param session: An instance of", "enabled. (0 or 1) :param data: A JSON object matching", "name: tmp_data[\"name\"] = name if type: tmp_data[\"type\"] = type if", "\"type\": type, \"enabled\": int(bool(enabled)), \"data\": data } response = self._session.post(api_uri,", "json \"\"\" return self.get(type=type, org=org) def get_by_guid(self, guid, org=False): \"\"\"", "= f\"/api/v2/ContainerRegistries/{guid}\" response = self._session.delete(api_uri, org=org) if response.status_code == 204:", "org=False): \"\"\" A method to delete an container registry. :param", "logger.info(\"Updating container registry in Lacework...\") # Build the Container Registries", "request URI api_uri = \"/api/v2/ContainerRegistries\" data = { \"name\": name,", "response = self._session.delete(api_uri, org=org) if response.status_code == 204: return response", "\"\"\" def __init__(self, session): \"\"\" Initializes the ContainerRegistriesAPI object. :param", ":param guid: A string representing the container registry GUID. :param", "Registries request URI if guid: api_uri = f\"/api/v2/ContainerRegistries/{guid}\" elif type:", "URI api_uri = f\"/api/v2/ContainerRegistries/{guid}\" response = self._session.delete(api_uri, org=org) if response.status_code", ":return response json \"\"\" return self.get(type=type, org=org) def get_by_guid(self, guid,", "the container registry GUID. :param org: A boolean representing whether", "container registry type. :param enabled: A boolean/integer representing whether the", "create a new container registry. :param name: A string representing", "response json \"\"\" return self.get(guid=guid, org=org) def search(self, query_data=None, org=False):", "API wrapper. \"\"\" import logging logger = logging.getLogger(__name__) class ContainerRegistriesAPI(object):", "super(ContainerRegistriesAPI, self).__init__() self._session = session def create(self, name, type, enabled,", "A method to create a new container registry. :param name:", "json \"\"\" logger.info(\"Searching container registries from Lacework...\") # Build the", "from Lacework...\") # Build the Container Registries request URI if", "def get_by_guid(self, guid, org=False): \"\"\" A method to get all", "Container Registries request URI if guid: api_uri = f\"/api/v2/ContainerRegistries/{guid}\" elif", "json \"\"\" logger.info(\"Deleting container registry in Lacework...\") # Build the", "response json \"\"\" logger.info(\"Searching container registries from Lacework...\") # Build", "update an container registry. :param guid: A string representing the", "-*- \"\"\" Lacework Container Registries API wrapper. \"\"\" import logging", "new container registry. :param name: A string representing the container", "representing the container registry type. :param enabled: A boolean/integer representing", "guid: A string representing the container registry GUID. :param org:", "at the Organization level :return response json \"\"\" logger.info(\"Creating container", "container registry type. :param org: A boolean representing whether the", "session def create(self, name, type, enabled, data, org=False): \"\"\" A", "at the Organization level :return response json \"\"\" logger.info(\"Getting container", "logger.info(\"Searching container registries from Lacework...\") # Build the Container Registries", "create(self, name, type, enabled, data, org=False): \"\"\" A method to", "if enabled is not None: tmp_data[\"enabled\"] = int(bool(enabled)) if data:", "= logging.getLogger(__name__) class ContainerRegistriesAPI(object): \"\"\" Lacework Container Registries API. \"\"\"", "= f\"/api/v2/ContainerRegistries/{guid}\" tmp_data = {} if name: tmp_data[\"name\"] = name", "logger.info(\"Deleting container registry in Lacework...\") # Build the Container Registries", "Registries API. \"\"\" def __init__(self, session): \"\"\" Initializes the ContainerRegistriesAPI", "{} if name: tmp_data[\"name\"] = name if type: tmp_data[\"type\"] =", "level :return response json \"\"\" logger.info(\"Getting container registry info from", "to get all container registries. :param guid: A string representing", "GUID. :param org: A boolean representing whether the request should", "be performed at the Organization level :return response json \"\"\"", "A method to get all container registries by type. :param", "-*- coding: utf-8 -*- \"\"\" Lacework Container Registries API wrapper.", "ContainerRegistriesAPI(object): \"\"\" Lacework Container Registries API. \"\"\" def __init__(self, session):", "is enabled. (0 or 1) :param data: A JSON object", "registry in Lacework...\") # Build the Container Registries request URI", "org=org) return response.json() def update(self, guid, name=None, type=None, enabled=None, data=None,", "URI api_uri = \"/api/v2/ContainerRegistries/search\" response = self._session.post(api_uri, data=query_data, org=org) return", "response.json() def get_by_type(self, type, org=False): \"\"\" A method to get", "org=False): \"\"\" A method to get all container registries by", "org=False): \"\"\" A method to create a new container registry.", "data, org=False): \"\"\" A method to create a new container", "the container registry GUID. :param type: A string representing the", "api_uri = f\"/api/v2/ContainerRegistries/{guid}\" response = self._session.delete(api_uri, org=org) if response.status_code ==", "container registries by type. :param type: A string representing the", "container registry. :param guid: A string representing the container registry", "if guid: api_uri = f\"/api/v2/ContainerRegistries/{guid}\" elif type: api_uri = f\"/api/v2/ContainerRegistries/{type}\"", "to search container registries. :param query_data: A dictionary containing the", "data } response = self._session.post(api_uri, org=org, data=data) return response.json() def", "guid: api_uri = f\"/api/v2/ContainerRegistries/{guid}\" elif type: api_uri = f\"/api/v2/ContainerRegistries/{type}\" else:", "name if type: tmp_data[\"type\"] = type if enabled is not", "data: tmp_data[\"data\"] = data response = self._session.patch(api_uri, org=org, data=tmp_data) return", "of the HttpSession class :return ContainerRegistriesAPI object. \"\"\" super(ContainerRegistriesAPI, self).__init__()", "= self._session.post(api_uri, org=org, data=data) return response.json() def get(self, guid=None, type=None,", "Lacework Container Registries API. \"\"\" def __init__(self, session): \"\"\" Initializes", "the specified type. :param org: A boolean representing whether the", "enabled is not None: tmp_data[\"enabled\"] = int(bool(enabled)) if data: tmp_data[\"data\"]", "\"\"\" A method to delete an container registry. :param guid:", "data = { \"name\": name, \"type\": type, \"enabled\": int(bool(enabled)), \"data\":", "logger.info(\"Creating container registry in Lacework...\") # Build the Container Registries", "representing whether the container registry is enabled. (0 or 1)", "wrapper. \"\"\" import logging logger = logging.getLogger(__name__) class ContainerRegistriesAPI(object): \"\"\"", "response json \"\"\" logger.info(\"Creating container registry in Lacework...\") # Build", "def __init__(self, session): \"\"\" Initializes the ContainerRegistriesAPI object. :param session:", "\"data\": data } response = self._session.post(api_uri, org=org, data=data) return response.json()", "# Build the Container Registries request URI api_uri = \"/api/v2/ContainerRegistries\"", "json \"\"\" logger.info(\"Creating container registry in Lacework...\") # Build the", "performed at the Organization level :return response json \"\"\" logger.info(\"Updating", "\"\"\" logger.info(\"Getting container registry info from Lacework...\") # Build the", "Registries request URI api_uri = \"/api/v2/ContainerRegistries/search\" response = self._session.post(api_uri, data=query_data,", "return self.get(guid=guid, org=org) def search(self, query_data=None, org=False): \"\"\" A method", "name. :param type: A string representing the container registry type." ]
[ "encoder outputs word_input = word_input.unsqueeze(0) # we are not using", "lr_scheduler = { 'scheduler': optim.lr_scheduler.OneCycleLR( optimizer, max_lr = self.learning_rate, steps_per_epoch", "optim.AdamW(self.parameters(), lr=self.learning_rate) lr_scheduler = { 'scheduler': optim.lr_scheduler.OneCycleLR( optimizer, max_lr =", "output = self.forward(src_seq, src_lengths,trg_seq) # do not know if this", "File to edit: nbs/01_seq2seq.ipynb (unless otherwise specified). __all__ = ['Encoder',", "{ 'scheduler': optim.lr_scheduler.OneCycleLR( optimizer, max_lr = self.learning_rate, steps_per_epoch = 3379,", "output_lengths = pad_packed_sequence(output_packed, batch_first=True) # output is irrelevant, context vector", "self.dec_hid_dim = hidden_dim self.enc_dropout = dropout self.dec_dropout = dropout self.pad_idx", "x shape (seq_length, N) embedding = self.dropout(self.embedding(x)) # embedding shape", "src_seq, trg_seq, src_lengths = batch['src'],batch['trg'], batch['src_len'] output = self.forward(src_seq, src_lengths,trg_seq)", "vector is important return hidden,cell # Cell class NewDecoder(nn.Module): def", "# output is irrelevant, context vector is important return hidden,cell", "self.max_epochs= kwargs.get('max_epochs',5) self.learning_rate = 0.0005 self._loss = nn.CrossEntropyLoss(ignore_index=self.pad_idx) self.encoder =", "predicted ids for all sequences in a batch to targets", "forward for a single decoder time step, but will #", "forward(self, word_input, last_hidden, encoder_outputs): # Note that we will only", "p=0.1): super(Encoder, self).__init__() self.hidden_size = hidden_size self.num_layers = num_layers self.dropout", "self.hidden_size = hidden_size self.num_layers = num_layers self.dropout = nn.Dropout(p) self.embedding", "import nn from torch import optim import torch import torch.nn.functional", "list of predicted ids for all sequences in a batch", "input token \"\"\" source = src_seq.transpose(0, 1) target_len = self.max_length", "self.pad_idx = padding_index self.num_layers = 2 self.max_length =10 self.save_hyperparameters() self.max_epochs=", "encoder_outputs): # Note that we will only be running forward", "self.enc_emb_dim, self.enc_hid_dim, self.num_layers, self.enc_dropout ) self.decoder = NewDecoder( self.enc_hid_dim, self.dec_emb_dim,", "#x = target[0,:] decoder_input = torch.ones(batch_size).long().to(self.device) decoder_hidden = encoder_hidden encoder_outputs", "not know if this is a problem, loss will be", "embedding_size, hidden_size, num_layers=2, p=0.1): super(Encoder, self).__init__() self.hidden_size = hidden_size self.num_layers", "from torch import nn from torch import optim import torch", "# we are not using encoder_outputs here word_embedded = self.embedding(word_input)", "src_seq.transpose(0, 1) target_len = self.max_length if trg_seq is not None:", "outputs[t] = decoder_output #(N, english_vocab_size) #best_guess = output.argmax(1) topv, topi", "hidden_size, embedding_size, output_size, n_layers=1, dropout_p=0.1): super(NewDecoder, self).__init__() # Define parameters", "know if this is a problem, loss will be computed", "because we are interested only in hidden state #output_padded, output_lengths", "hidden_size self.output_size = output_size self.n_layers =n_layers self.dropout_p = dropout_p #", "will only be running forward for a single decoder time", "1 X B X emb_length # Combine embedded input word", "if trg_seq is not None: target = trg_seq.transpose(0, 1) target_len", "batch['src_len'] output = self.forward(src_seq, src_lengths,trg_seq) # do not know if", "pytorch_lightning as pl import pytorch_lightning.metrics.functional as plfunc from pytorch_lightning.loggers import", "# x shape (seq_length, N) embedding = self.dropout(self.embedding(x)) # embedding", "on_step=False, on_epoch=True, prog_bar=True, logger=True, sync_dist=True ) self.log( \"val_bleu_idx\", bleu_score, on_step=False,", "end output = output.view(-1, self.output_dim) trg_seq = trg_seq.transpose(0, 1) trg", "= pred_seq.T # change layout: seq_len * batch_size -> batch_size", "# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/01_seq2seq.ipynb (unless", "= nn.Dropout(p) self.embedding = nn.Embedding(input_size, embedding_size) self.rnn = nn.LSTM(embedding_size, hidden_size,", "= nn.LSTM(embedding_size, hidden_size, n_layers, dropout=dropout_p, batch_first=False) self.out = nn.Linear(hidden_size, output_size)", "def _init_weights(self): for name, param in self.named_parameters(): if \"weight\" in", "epochs=self.max_epochs, anneal_strategy='linear', final_div_factor=1000, pct_start = 0.01 ), \"name\": \"learning_rate\", \"interval\":\"step\",", "= batch['src'],batch['trg'], batch['src_len'] output = self.forward(src_seq, src_lengths,trg_seq) # do not", "self.dropout = nn.Dropout(p) self.embedding = nn.Embedding(input_size, embedding_size) self.rnn = nn.LSTM(embedding_size,", "output.view(-1, self.output_dim) trg_seq = trg_seq.transpose(0, 1) trg = trg_seq.reshape(-1) loss", "\"\"\" validation is in eval model so we do not", "irrelevant because we are interested only in hidden state #output_padded,", "= 8, hidden_dim=32, dropout=0.1, max_length=20, **kwargs): super().__init__() # dynamic, based", "# Define parameters self.hidden_size = hidden_size self.output_size = output_size self.n_layers", "def __init__(self, hidden_size, embedding_size, output_size, n_layers=1, dropout_p=0.1): super(NewDecoder, self).__init__() #", "def training_step(self, batch, batch_idx): src_seq, trg_seq, src_lengths = batch['src'],batch['trg'], batch['src_len']", "torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence # Cell class Encoder(nn.Module): def __init__(self,", "Define parameters self.hidden_size = hidden_size self.output_size = output_size self.n_layers =n_layers", "= self.loss(output, trg) self.log('train_loss',loss.item(), on_step = True, on_epoch=True, prog_bar =", "seq_len*batch_size*vocab_size -> seq_len * batch_size # change layout: sesq_len *", "if this is a problem, loss will be computed with", "for training seq2seq model with teacher forcing Module try to", "batch_size, target_vocab_size).to(self.device) encoder_hidden = self.encoder(source, source_len) # mask = [batch_size,", "\"\"\" @staticmethod def add_model_specific_args(parent_parser): parser = ArgumentParser(parents=[parent_parser], add_help=False) parser.add_argument(\"--emb_dim\", type=int,", "N) embedding = self.dropout(self.embedding(x)) # embedding shape : (seq_length, N,", "a batch to targets acc = plfunc.accuracy(pred_seq.reshape(-1), trg_batch.reshape(-1)) # need", "torch.zeros(target_len, batch_size, target_vocab_size).to(self.device) encoder_hidden = self.encoder(source, source_len) # mask =", "only in hidden state #output_padded, output_lengths = pad_packed_sequence(output_packed, batch_first=True) #", "default=32) parser.add_argument('--hidden_dim', type=int, default=64) parser.add_argument('--dropout', type=float, default=0.1) return parser def", "= dropout_p # Define layers self.embedding = nn.Embedding(output_size, embedding_size) self.dropout=nn.Dropout(dropout_p)", "to format needed by blue_score_func # [seq1=[[reference1],[reference2]], seq2=[reference1]] target_ids =", "= pad_packed_sequence(output_packed, batch_first=True) # output is irrelevant, context vector is", "word_input.unsqueeze(0) # we are not using encoder_outputs here word_embedded =", "sensors\"\"\" src_seq, trg_seq, src_lengths = batch['src'],batch['trg'], batch['src_len'] outputs = self.forward(src_seq,", "dropout self.dec_dropout = dropout self.pad_idx = padding_index self.num_layers = 2", "random.random() < teacher_force_ratio and target is not None else decoder_input", "In starting, original input token will be sent as input", "encoder_hidden = self.encoder(source, source_len) # mask = [batch_size, src len]", "* batch_size # change layout: sesq_len * batch_size -> batch_size", "import random import pytorch_lightning as pl import pytorch_lightning.metrics.functional as plfunc", "in eval model so we do not have to use", "parser.add_argument('--hidden_dim', type=int, default=64) parser.add_argument('--dropout', type=float, default=0.1) return parser def __init__(self,", "on_epoch=True, prog_bar = True, logger=True) return loss def validation_step(self, batch,batch_idx):", "do not have to use placeholder input sensors\"\"\" src_seq, trg_seq,", "compare list of predicted ids for all sequences in a", "emb_length # Combine embedded input word and hidden vector, run", "t in range(target_len): decoder_output, decoder_hidden = self.decoder(decoder_input, decoder_hidden, encoder_outputs) outputs[t]", "return outputs def loss(self, logits, target): return self._loss(logits, target) def", "torch.unsqueeze(trg_batch, 1).tolist() bleu_score - plfunc.nlp.bleu_score(predicted_ids, target_ids, n_gram=3).to(self.device) self.log( 'val_loss', loss,", "= encoder_hidden encoder_outputs = None for t in range(target_len): decoder_output,", "self.log( 'val_loss', loss, on_step=False, on_epoch=True, prog_bar=True, logger=True, sync_dist=True) self.log( \"val_acc\",", ") [ seq_tok1, seqtok2] predicted_ids - pred_seq.tolist() # need to", "'NewDecoder', 'Seq2Seq'] # Cell from torch import nn from torch", "input_size, embedding_size, hidden_size, num_layers=2, p=0.1): super(Encoder, self).__init__() self.hidden_size = hidden_size", "need to cast to list of predicted sequences ( as", "=n_layers self.dropout_p = dropout_p # Define layers self.embedding = nn.Embedding(output_size,", "= self.dropout(word_embedded) # 1 X B X emb_length # Combine", "loss = self.loss(output, trg) self.log('train_loss',loss.item(), on_step = True, on_epoch=True, prog_bar", "input token will be sent as input token \"\"\" source", "eos token at the end #x = target[0,:] decoder_input =", "nbs/01_seq2seq.ipynb (unless otherwise specified). __all__ = ['Encoder', 'NewDecoder', 'Seq2Seq'] #", "pad_packed_sequence # Cell class Encoder(nn.Module): def __init__(self, input_size, embedding_size, hidden_size,", "be running forward for a single decoder time step, but", "n_layers, dropout=dropout_p, batch_first=False) self.out = nn.Linear(hidden_size, output_size) def forward(self, word_input,", "token \"\"\" source = src_seq.transpose(0, 1) target_len = self.max_length if", "batch_size = source.shape[1] target_vocab_size = self.output_dim outputs = torch.zeros(target_len, batch_size,", "return [optimizer],[lr_scheduler] def training_step(self, batch, batch_idx): src_seq, trg_seq, src_lengths =", "token # without sos token at the beginning and eos", "mean=0, std=0.01) else: nn.init.constant_(param.data, 0) def create_mask(self, src): mask =", "nn.Embedding(input_size, embedding_size) self.rnn = nn.LSTM(embedding_size, hidden_size, num_layers, dropout=p,batch_first=False) def forward(self,", "= hidden_dim self.dec_hid_dim = hidden_dim self.enc_dropout = dropout self.dec_dropout =", "class Encoder(nn.Module): def __init__(self, input_size, embedding_size, hidden_size, num_layers=2, p=0.1): super(Encoder,", "\"weight\" in name: nn.init.normal_(param.data, mean=0, std=0.01) else: nn.init.constant_(param.data, 0) def", "computed with sos token # without sos token at the", "= target.shape[0] batch_size = source.shape[1] target_vocab_size = self.output_dim outputs =", "= optim.AdamW(self.parameters(), lr=self.learning_rate) lr_scheduler = { 'scheduler': optim.lr_scheduler.OneCycleLR( optimizer, max_lr", "output_size self.n_layers =n_layers self.dropout_p = dropout_p # Define layers self.embedding", "sos token at the beginning and eos token at the", "output_size) def forward(self, word_input, last_hidden, encoder_outputs): # Note that we", "self.enc_hid_dim, self.dec_emb_dim, self.output_dim, self.num_layers, self.dec_dropout ) self._init_weights() def _init_weights(self): for", "nn.CrossEntropyLoss(ignore_index=self.pad_idx) self.encoder = Encoder( self.input_dim, self.enc_emb_dim, self.enc_hid_dim, self.num_layers, self.enc_dropout )", "=10 self.save_hyperparameters() self.max_epochs= kwargs.get('max_epochs',5) self.learning_rate = 0.0005 self._loss = nn.CrossEntropyLoss(ignore_index=self.pad_idx)", "0, emb_dim = 8, hidden_dim=32, dropout=0.1, max_length=20, **kwargs): super().__init__() #", "num_layers=2, p=0.1): super(Encoder, self).__init__() self.hidden_size = hidden_size self.num_layers = num_layers", "= torch.ones(batch_size).long().to(self.device) decoder_hidden = encoder_hidden encoder_outputs = None for t", "here word_embedded = self.embedding(word_input) # 1 X B word_embedded =", "self.dropout=nn.Dropout(dropout_p) self.rnn = nn.LSTM(embedding_size, hidden_size, n_layers, dropout=dropout_p, batch_first=False) self.out =", "decoder_input = torch.ones(batch_size).long().to(self.device) decoder_hidden = encoder_hidden encoder_outputs = None for", "decoder_output #(N, english_vocab_size) #best_guess = output.argmax(1) topv, topi = decoder_output.topk(1)", "prog_bar=True, logger=True, sync_dist=True ) self.log( \"val_bleu_idx\", bleu_score, on_step=False, on_epoch=True, prog_bar=True,", "and target is not None else decoder_input return outputs def", "will be computed with sos token # without sos token", "src_seq, trg_seq, src_lengths = batch['src'],batch['trg'], batch['src_len'] outputs = self.forward(src_seq, src_lengths,", "= plfunc.accuracy(pred_seq.reshape(-1), trg_batch.reshape(-1)) # need to cast to list of", "= input_vocab_size self.output_dim = output_vocab_size self.enc_emb_dim = emb_dim self.dec_emb_dim =", "optimizer = optim.AdamW(self.parameters(), lr=self.learning_rate) lr_scheduler = { 'scheduler': optim.lr_scheduler.OneCycleLR( optimizer,", "dropout_p # Define layers self.embedding = nn.Embedding(output_size, embedding_size) self.dropout=nn.Dropout(dropout_p) self.rnn", "specified). __all__ = ['Encoder', 'NewDecoder', 'Seq2Seq'] # Cell from torch", "# conver to format needed by blue_score_func # [seq1=[[reference1],[reference2]], seq2=[reference1]]", "x, x_len): # x shape (seq_length, N) embedding = self.dropout(self.embedding(x))", "hidden_size self.num_layers = num_layers self.dropout = nn.Dropout(p) self.embedding = nn.Embedding(input_size,", "self.dec_dropout = dropout self.pad_idx = padding_index self.num_layers = 2 self.max_length", "mask = (src != self.pad_idx).permute(1, 0) return mask def forward(self,", "target) def configure_optimizers(self): optimizer = optim.AdamW(self.parameters(), lr=self.learning_rate) lr_scheduler = {", "from one sequence to another \"\"\" @staticmethod def add_model_specific_args(parent_parser): parser", "self.n_layers =n_layers self.dropout_p = dropout_p # Define layers self.embedding =", "self.encoder = Encoder( self.input_dim, self.enc_emb_dim, self.enc_hid_dim, self.num_layers, self.enc_dropout ) self.decoder", "def __init__(self, input_size, embedding_size, hidden_size, num_layers=2, p=0.1): super(Encoder, self).__init__() self.hidden_size", "otherwise specified). __all__ = ['Encoder', 'NewDecoder', 'Seq2Seq'] # Cell from", "self.output_dim outputs = torch.zeros(target_len, batch_size, target_vocab_size).to(self.device) encoder_hidden = self.encoder(source, source_len)", "to help in decoding. In starting, original input token will", "# 1 X B X hidden predictions = self.out(output) #", "pytorch lightning module for training seq2seq model with teacher forcing", "<reponame>pmaxit/dlnotebooks<filename>mllib/nlp/seq2seq.py<gh_stars>0 # AUTOGENERATED! DO NOT EDIT! File to edit: nbs/01_seq2seq.ipynb", "learn mapping from one sequence to another \"\"\" @staticmethod def", "trg = trg_seq[1:].reshape(-1) loss = self.loss(logits, trg) pred_seq = outputs[1:].argmax(2)", "add additional dim to each target reference sequence in order", "= target[t] if random.random() < teacher_force_ratio and target is not", "emb_dim self.dec_emb_dim = emb_dim self.enc_hid_dim = hidden_dim self.dec_hid_dim = hidden_dim", "sequences ( as list of token ids ) [ seq_tok1,", "model so we do not have to use placeholder input", "decoder time step, but will # use all encoder outputs", "= trg_seq.transpose(0, 1) target_len = target.shape[0] batch_size = source.shape[1] target_vocab_size", "None for t in range(target_len): decoder_output, decoder_hidden = self.decoder(decoder_input, decoder_hidden,", "is not None else decoder_input return outputs def loss(self, logits,", "max_lr = self.learning_rate, steps_per_epoch = 3379, epochs=self.max_epochs, anneal_strategy='linear', final_div_factor=1000, pct_start", "for t in range(target_len): decoder_output, decoder_hidden = self.decoder(decoder_input, decoder_hidden, encoder_outputs)", "B word_embedded = self.dropout(word_embedded) # 1 X B X emb_length", "self.rnn = nn.LSTM(embedding_size, hidden_size, n_layers, dropout=dropout_p, batch_first=False) self.out = nn.Linear(hidden_size,", "parser.add_argument(\"--emb_dim\", type=int, default=32) parser.add_argument('--hidden_dim', type=int, default=64) parser.add_argument('--dropout', type=float, default=0.1) return", "input word and hidden vector, run through RNN output, hidden", "source.shape[1] target_vocab_size = self.output_dim outputs = torch.zeros(target_len, batch_size, target_vocab_size).to(self.device) encoder_hidden", "embedding_size, output_size, n_layers=1, dropout_p=0.1): super(NewDecoder, self).__init__() # Define parameters self.hidden_size", "embedding_size) x_packed = pack_padded_sequence(embedding, x_len.cpu(), batch_first=False, enforce_sorted=False) output_packed, (hidden,cell) =", "on_step=False, on_epoch=True, prog_bar=True, logger=True, sync_dist=True ) return loss, acc, bleu_score", "X B word_embedded = self.dropout(word_embedded) # 1 X B X", "def forward(self, x, x_len): # x shape (seq_length, N) embedding", "mask def forward(self, src_seq, source_len, trg_seq, teacher_force_ratio=0.5): \"\"\" teacher_force_ratio is", "= hidden_dim self.enc_dropout = dropout self.dec_dropout = dropout self.pad_idx =", "needed by blue_score_func # [seq1=[[reference1],[reference2]], seq2=[reference1]] target_ids = torch.unsqueeze(trg_batch, 1).tolist()", "trg_seq[1:].T # compare list of predicted ids for all sequences", "\"frequency\": 1 } return [optimizer],[lr_scheduler] def training_step(self, batch, batch_idx): src_seq,", "\"val_bleu_idx\", bleu_score, on_step=False, on_epoch=True, prog_bar=True, logger=True, sync_dist=True ) return loss,", "len] # without sos token at the beginning and eos", "as plfunc from pytorch_lightning.loggers import TensorBoardLogger # Cell class Seq2Seq(pl.LightningModule):", "predicted sequences ( as list of token ids ) [", "NewDecoder(nn.Module): def __init__(self, hidden_size, embedding_size, output_size, n_layers=1, dropout_p=0.1): super(NewDecoder, self).__init__()", "#output = F.log_softmax(predictions) return predictions, hidden # Cell import random", "class Seq2Seq(pl.LightningModule): \"\"\" Encoder decoder pytorch lightning module for training", "random import pytorch_lightning as pl import pytorch_lightning.metrics.functional as plfunc from", "trg_seq.transpose(0, 1) trg = trg_seq.reshape(-1) loss = self.loss(output, trg) self.log('train_loss',loss.item(),", "# Cell from torch import nn from torch import optim", "optim.lr_scheduler.OneCycleLR( optimizer, max_lr = self.learning_rate, steps_per_epoch = 3379, epochs=self.max_epochs, anneal_strategy='linear',", "torch.ones(batch_size).long().to(self.device) decoder_hidden = encoder_hidden encoder_outputs = None for t in", "embedded input word and hidden vector, run through RNN output,", "word_input, last_hidden, encoder_outputs): # Note that we will only be", "= outputs[1:].argmax(2) # seq_len*batch_size*vocab_size -> seq_len * batch_size # change", "None: target = trg_seq.transpose(0, 1) target_len = target.shape[0] batch_size =", "= [batch_size, src len] # without sos token at the", "def validation_step(self, batch,batch_idx): \"\"\" validation is in eval model so", "1).tolist() bleu_score - plfunc.nlp.bleu_score(predicted_ids, target_ids, n_gram=3).to(self.device) self.log( 'val_loss', loss, on_step=False,", "return predictions, hidden # Cell import random import pytorch_lightning as", "parser def __init__(self, input_vocab_size, output_vocab_size, padding_index = 0, emb_dim =", "step, but will # use all encoder outputs word_input =", "vector, run through RNN output, hidden = self.rnn(word_embedded, last_hidden) #", "predicted_ids - pred_seq.tolist() # need to add additional dim to", "[batch_size, src len] # without sos token at the beginning", "= decoder_output #(N, english_vocab_size) #best_guess = output.argmax(1) topv, topi =", "word and hidden vector, run through RNN output, hidden =", "1) target_len = self.max_length if trg_seq is not None: target", "and hidden vector, run through RNN output, hidden = self.rnn(word_embedded,", "-> batch_size * seq_len pred_seq = pred_seq.T # change layout:", "outputs = self.forward(src_seq, src_lengths, trg_seq, 0) logits = outputs[1:].view(-1, self.output_dim)", "super().__init__() # dynamic, based on tokenizer vocab size defined in", "= nn.CrossEntropyLoss(ignore_index=self.pad_idx) self.encoder = Encoder( self.input_dim, self.enc_emb_dim, self.enc_hid_dim, self.num_layers, self.enc_dropout", "self.num_layers, self.enc_dropout ) self.decoder = NewDecoder( self.enc_hid_dim, self.dec_emb_dim, self.output_dim, self.num_layers,", "self.max_length if trg_seq is not None: target = trg_seq.transpose(0, 1)", "from pytorch_lightning.loggers import TensorBoardLogger # Cell class Seq2Seq(pl.LightningModule): \"\"\" Encoder", "logger=True) return loss def validation_step(self, batch,batch_idx): \"\"\" validation is in", "batch, batch_idx): src_seq, trg_seq, src_lengths = batch['src'],batch['trg'], batch['src_len'] output =", "!= self.pad_idx).permute(1, 0) return mask def forward(self, src_seq, source_len, trg_seq,", "batch['src'],batch['trg'], batch['src_len'] outputs = self.forward(src_seq, src_lengths, trg_seq, 0) logits =", "RNN output, hidden = self.rnn(word_embedded, last_hidden) # 1 X B", "self._init_weights() def _init_weights(self): for name, param in self.named_parameters(): if \"weight\"", "batch_size * seq_len pred_seq = pred_seq.T # change layout: seq_len", "= torch.zeros(target_len, batch_size, target_vocab_size).to(self.device) encoder_hidden = self.encoder(source, source_len) # mask", "DO NOT EDIT! File to edit: nbs/01_seq2seq.ipynb (unless otherwise specified).", "word_input = word_input.unsqueeze(0) # we are not using encoder_outputs here", "in decoding. In starting, original input token will be sent", "input sensors\"\"\" src_seq, trg_seq, src_lengths = batch['src'],batch['trg'], batch['src_len'] outputs =", "final_div_factor=1000, pct_start = 0.01 ), \"name\": \"learning_rate\", \"interval\":\"step\", \"frequency\": 1", "# dynamic, based on tokenizer vocab size defined in datamodule", "self.dec_dropout ) self._init_weights() def _init_weights(self): for name, param in self.named_parameters():", "output_vocab_size, padding_index = 0, emb_dim = 8, hidden_dim=32, dropout=0.1, max_length=20,", "decoder_hidden, encoder_outputs) outputs[t] = decoder_output #(N, english_vocab_size) #best_guess = output.argmax(1)", "super(NewDecoder, self).__init__() # Define parameters self.hidden_size = hidden_size self.output_size =", "eval model so we do not have to use placeholder", "plfunc from pytorch_lightning.loggers import TensorBoardLogger # Cell class Seq2Seq(pl.LightningModule): \"\"\"", "Cell class Encoder(nn.Module): def __init__(self, input_size, embedding_size, hidden_size, num_layers=2, p=0.1):", "super(Encoder, self).__init__() self.hidden_size = hidden_size self.num_layers = num_layers self.dropout =", "= nn.Embedding(output_size, embedding_size) self.dropout=nn.Dropout(dropout_p) self.rnn = nn.LSTM(embedding_size, hidden_size, n_layers, dropout=dropout_p,", "self.embedding = nn.Embedding(output_size, embedding_size) self.dropout=nn.Dropout(dropout_p) self.rnn = nn.LSTM(embedding_size, hidden_size, n_layers,", "True, logger=True) return loss def validation_step(self, batch,batch_idx): \"\"\" validation is", "return loss def validation_step(self, batch,batch_idx): \"\"\" validation is in eval", "pred_seq = pred_seq.T # change layout: seq_len * batch_size ->", "# change layout: sesq_len * batch_size -> batch_size * seq_len", "to edit: nbs/01_seq2seq.ipynb (unless otherwise specified). __all__ = ['Encoder', 'NewDecoder',", "= batch['src'],batch['trg'], batch['src_len'] outputs = self.forward(src_seq, src_lengths, trg_seq, 0) logits", "outputs word_input = word_input.unsqueeze(0) # we are not using encoder_outputs", "predictions = self.out(output) # 1, B, out #output = F.log_softmax(predictions)", "conver to format needed by blue_score_func # [seq1=[[reference1],[reference2]], seq2=[reference1]] target_ids", "self.output_dim) trg = trg_seq[1:].reshape(-1) loss = self.loss(logits, trg) pred_seq =", "trg_seq.transpose(0, 1) target_len = target.shape[0] batch_size = source.shape[1] target_vocab_size =", "= self.learning_rate, steps_per_epoch = 3379, epochs=self.max_epochs, anneal_strategy='linear', final_div_factor=1000, pct_start =", "self.dropout(self.embedding(x)) # embedding shape : (seq_length, N, embedding_size) x_packed =", "= trg_seq[1:].reshape(-1) loss = self.loss(logits, trg) pred_seq = outputs[1:].argmax(2) #", "self.enc_hid_dim, self.num_layers, self.enc_dropout ) self.decoder = NewDecoder( self.enc_hid_dim, self.dec_emb_dim, self.output_dim,", "sos token # without sos token at the beginning and", "acc, on_step=False, on_epoch=True, prog_bar=True, logger=True, sync_dist=True ) self.log( \"val_bleu_idx\", bleu_score,", "= self.forward(src_seq, src_lengths, trg_seq, 0) logits = outputs[1:].view(-1, self.output_dim) trg", "def create_mask(self, src): mask = (src != self.pad_idx).permute(1, 0) return", "without sos token at the beginning and eos token at", "seqtok2] predicted_ids - pred_seq.tolist() # need to add additional dim", "in order to # conver to format needed by blue_score_func", "= output_size self.n_layers =n_layers self.dropout_p = dropout_p # Define layers", "time step, but will # use all encoder outputs word_input", "# Cell class NewDecoder(nn.Module): def __init__(self, hidden_size, embedding_size, output_size, n_layers=1,", "to another \"\"\" @staticmethod def add_model_specific_args(parent_parser): parser = ArgumentParser(parents=[parent_parser], add_help=False)", "that we will only be running forward for a single", "src len] # without sos token at the beginning and", "(src != self.pad_idx).permute(1, 0) return mask def forward(self, src_seq, source_len,", "to use placeholder input sensors\"\"\" src_seq, trg_seq, src_lengths = batch['src'],batch['trg'],", "embedding shape : (seq_length, N, embedding_size) x_packed = pack_padded_sequence(embedding, x_len.cpu(),", "placeholder input sensors\"\"\" src_seq, trg_seq, src_lengths = batch['src'],batch['trg'], batch['src_len'] outputs", "pytorch_lightning.loggers import TensorBoardLogger # Cell class Seq2Seq(pl.LightningModule): \"\"\" Encoder decoder", "in datamodule self.input_dim = input_vocab_size self.output_dim = output_vocab_size self.enc_emb_dim =", "padding_index self.num_layers = 2 self.max_length =10 self.save_hyperparameters() self.max_epochs= kwargs.get('max_epochs',5) self.learning_rate", "beginning and eos token at the end #x = target[0,:]", "B, out #output = F.log_softmax(predictions) return predictions, hidden # Cell", "if \"weight\" in name: nn.init.normal_(param.data, mean=0, std=0.01) else: nn.init.constant_(param.data, 0)", "* batch_size -> batch_size * seq_len pred_seq = pred_seq.T #", "= (src != self.pad_idx).permute(1, 0) return mask def forward(self, src_seq,", "__all__ = ['Encoder', 'NewDecoder', 'Seq2Seq'] # Cell from torch import", "sync_dist=True ) self.log( \"val_bleu_idx\", bleu_score, on_step=False, on_epoch=True, prog_bar=True, logger=True, sync_dist=True", "for name, param in self.named_parameters(): if \"weight\" in name: nn.init.normal_(param.data,", "another \"\"\" @staticmethod def add_model_specific_args(parent_parser): parser = ArgumentParser(parents=[parent_parser], add_help=False) parser.add_argument(\"--emb_dim\",", "self).__init__() # Define parameters self.hidden_size = hidden_size self.output_size = output_size", "use all encoder outputs word_input = word_input.unsqueeze(0) # we are", "= self.max_length if trg_seq is not None: target = trg_seq.transpose(0,", "batch['src'],batch['trg'], batch['src_len'] output = self.forward(src_seq, src_lengths,trg_seq) # do not know", "will # use all encoder outputs word_input = word_input.unsqueeze(0) #", "(seq_length, N) embedding = self.dropout(self.embedding(x)) # embedding shape : (seq_length,", "self.output_size = output_size self.n_layers =n_layers self.dropout_p = dropout_p # Define", "seq_len pred_seq = pred_seq.T # change layout: seq_len * batch_size", "batch_first=True) # output is irrelevant, context vector is important return", "trg_batch = trg_seq[1:].T # compare list of predicted ids for", "trg_seq, src_lengths = batch['src'],batch['trg'], batch['src_len'] output = self.forward(src_seq, src_lengths,trg_seq) #", "Combine embedded input word and hidden vector, run through RNN", "anneal_strategy='linear', final_div_factor=1000, pct_start = 0.01 ), \"name\": \"learning_rate\", \"interval\":\"step\", \"frequency\":", "# do not know if this is a problem, loss", "# change layout: seq_len * batch_size -> batch_size * seq_len", "token ids ) [ seq_tok1, seqtok2] predicted_ids - pred_seq.tolist() #", "loss = self.loss(logits, trg) pred_seq = outputs[1:].argmax(2) # seq_len*batch_size*vocab_size ->", "from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence # Cell class Encoder(nn.Module): def", "self.rnn = nn.LSTM(embedding_size, hidden_size, num_layers, dropout=p,batch_first=False) def forward(self, x, x_len):", "# Cell import random import pytorch_lightning as pl import pytorch_lightning.metrics.functional", "tokenizer vocab size defined in datamodule self.input_dim = input_vocab_size self.output_dim", "[seq1=[[reference1],[reference2]], seq2=[reference1]] target_ids = torch.unsqueeze(trg_batch, 1).tolist() bleu_score - plfunc.nlp.bleu_score(predicted_ids, target_ids,", "= hidden_size self.output_size = output_size self.n_layers =n_layers self.dropout_p = dropout_p", "training_step(self, batch, batch_idx): src_seq, trg_seq, src_lengths = batch['src'],batch['trg'], batch['src_len'] output", "nn.Embedding(output_size, embedding_size) self.dropout=nn.Dropout(dropout_p) self.rnn = nn.LSTM(embedding_size, hidden_size, n_layers, dropout=dropout_p, batch_first=False)", "\"name\": \"learning_rate\", \"interval\":\"step\", \"frequency\": 1 } return [optimizer],[lr_scheduler] def training_step(self,", "0.01 ), \"name\": \"learning_rate\", \"interval\":\"step\", \"frequency\": 1 } return [optimizer],[lr_scheduler]", "teacher forcing Module try to learn mapping from one sequence", "= self.out(output) # 1, B, out #output = F.log_softmax(predictions) return", "forward(self, src_seq, source_len, trg_seq, teacher_force_ratio=0.5): \"\"\" teacher_force_ratio is used to", "bleu_score, on_step=False, on_epoch=True, prog_bar=True, logger=True, sync_dist=True ) return loss, acc,", "batch_first=False, enforce_sorted=False) output_packed, (hidden,cell) = self.rnn(x_packed) # irrelevant because we", "\"val_acc\", acc, on_step=False, on_epoch=True, prog_bar=True, logger=True, sync_dist=True ) self.log( \"val_bleu_idx\",", "# Define layers self.embedding = nn.Embedding(output_size, embedding_size) self.dropout=nn.Dropout(dropout_p) self.rnn =", "will be sent as input token \"\"\" source = src_seq.transpose(0,", "embedding_size) self.dropout=nn.Dropout(dropout_p) self.rnn = nn.LSTM(embedding_size, hidden_size, n_layers, dropout=dropout_p, batch_first=False) self.out", "decoder_hidden = encoder_hidden encoder_outputs = None for t in range(target_len):", "layers self.embedding = nn.Embedding(output_size, embedding_size) self.dropout=nn.Dropout(dropout_p) self.rnn = nn.LSTM(embedding_size, hidden_size,", "topi.squeeze().detach() decoder_input = target[t] if random.random() < teacher_force_ratio and target", "\"learning_rate\", \"interval\":\"step\", \"frequency\": 1 } return [optimizer],[lr_scheduler] def training_step(self, batch,", "True, on_epoch=True, prog_bar = True, logger=True) return loss def validation_step(self,", "return parser def __init__(self, input_vocab_size, output_vocab_size, padding_index = 0, emb_dim", "important return hidden,cell # Cell class NewDecoder(nn.Module): def __init__(self, hidden_size,", "of predicted sequences ( as list of token ids )", "def configure_optimizers(self): optimizer = optim.AdamW(self.parameters(), lr=self.learning_rate) lr_scheduler = { 'scheduler':", "2 self.max_length =10 self.save_hyperparameters() self.max_epochs= kwargs.get('max_epochs',5) self.learning_rate = 0.0005 self._loss", "sesq_len * batch_size -> batch_size * seq_len pred_seq = pred_seq.T", "nn.Linear(hidden_size, output_size) def forward(self, word_input, last_hidden, encoder_outputs): # Note that", "\"interval\":\"step\", \"frequency\": 1 } return [optimizer],[lr_scheduler] def training_step(self, batch, batch_idx):", "self.log('train_loss',loss.item(), on_step = True, on_epoch=True, prog_bar = True, logger=True) return", "we are not using encoder_outputs here word_embedded = self.embedding(word_input) #", "self.num_layers = 2 self.max_length =10 self.save_hyperparameters() self.max_epochs= kwargs.get('max_epochs',5) self.learning_rate =", "to learn mapping from one sequence to another \"\"\" @staticmethod", "nn.init.constant_(param.data, 0) def create_mask(self, src): mask = (src != self.pad_idx).permute(1,", "num_layers, dropout=p,batch_first=False) def forward(self, x, x_len): # x shape (seq_length,", "= self.rnn(x_packed) # irrelevant because we are interested only in", "layout: sesq_len * batch_size -> batch_size * seq_len pred_seq =", "based on tokenizer vocab size defined in datamodule self.input_dim =", "dim to each target reference sequence in order to #", "padding_index = 0, emb_dim = 8, hidden_dim=32, dropout=0.1, max_length=20, **kwargs):", "= 0.0005 self._loss = nn.CrossEntropyLoss(ignore_index=self.pad_idx) self.encoder = Encoder( self.input_dim, self.enc_emb_dim,", "= output.view(-1, self.output_dim) trg_seq = trg_seq.transpose(0, 1) trg = trg_seq.reshape(-1)", "a problem, loss will be computed with sos token #", "= hidden_size self.num_layers = num_layers self.dropout = nn.Dropout(p) self.embedding =", "as input token \"\"\" source = src_seq.transpose(0, 1) target_len =", "lightning module for training seq2seq model with teacher forcing Module", "input_vocab_size, output_vocab_size, padding_index = 0, emb_dim = 8, hidden_dim=32, dropout=0.1,", ": (seq_length, N, embedding_size) x_packed = pack_padded_sequence(embedding, x_len.cpu(), batch_first=False, enforce_sorted=False)", "help in decoding. In starting, original input token will be", "= self.encoder(source, source_len) # mask = [batch_size, src len] #", "8, hidden_dim=32, dropout=0.1, max_length=20, **kwargs): super().__init__() # dynamic, based on", "self.out(output) # 1, B, out #output = F.log_softmax(predictions) return predictions,", "cast to list of predicted sequences ( as list of", "list of token ids ) [ seq_tok1, seqtok2] predicted_ids -", "torch.nn.functional as F from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence # Cell", "class NewDecoder(nn.Module): def __init__(self, hidden_size, embedding_size, output_size, n_layers=1, dropout_p=0.1): super(NewDecoder,", "logger=True, sync_dist=True) self.log( \"val_acc\", acc, on_step=False, on_epoch=True, prog_bar=True, logger=True, sync_dist=True", "= 0, emb_dim = 8, hidden_dim=32, dropout=0.1, max_length=20, **kwargs): super().__init__()", "self.forward(src_seq, src_lengths,trg_seq) # do not know if this is a", "# compare list of predicted ids for all sequences in", "seq2seq model with teacher forcing Module try to learn mapping", "logits, target): return self._loss(logits, target) def configure_optimizers(self): optimizer = optim.AdamW(self.parameters(),", "target[0,:] decoder_input = torch.ones(batch_size).long().to(self.device) decoder_hidden = encoder_hidden encoder_outputs = None", "teacher_force_ratio and target is not None else decoder_input return outputs", "logger=True, sync_dist=True ) self.log( \"val_bleu_idx\", bleu_score, on_step=False, on_epoch=True, prog_bar=True, logger=True,", "* seq_len pred_seq = pred_seq.T # change layout: seq_len *", "add_model_specific_args(parent_parser): parser = ArgumentParser(parents=[parent_parser], add_help=False) parser.add_argument(\"--emb_dim\", type=int, default=32) parser.add_argument('--hidden_dim', type=int,", "X emb_length # Combine embedded input word and hidden vector,", "self.output_dim, self.num_layers, self.dec_dropout ) self._init_weights() def _init_weights(self): for name, param", "* seq_len trg_batch = trg_seq[1:].T # compare list of predicted", "src_lengths = batch['src'],batch['trg'], batch['src_len'] output = self.forward(src_seq, src_lengths,trg_seq) # do", "= source.shape[1] target_vocab_size = self.output_dim outputs = torch.zeros(target_len, batch_size, target_vocab_size).to(self.device)", "self.rnn(word_embedded, last_hidden) # 1 X B X hidden predictions =", "name: nn.init.normal_(param.data, mean=0, std=0.01) else: nn.init.constant_(param.data, 0) def create_mask(self, src):", "we will only be running forward for a single decoder", "X B X hidden predictions = self.out(output) # 1, B,", "and eos token at the end #x = target[0,:] decoder_input", "self.log( \"val_acc\", acc, on_step=False, on_epoch=True, prog_bar=True, logger=True, sync_dist=True ) self.log(", "src): mask = (src != self.pad_idx).permute(1, 0) return mask def", "return hidden,cell # Cell class NewDecoder(nn.Module): def __init__(self, hidden_size, embedding_size,", "parameters self.hidden_size = hidden_size self.output_size = output_size self.n_layers =n_layers self.dropout_p", "param in self.named_parameters(): if \"weight\" in name: nn.init.normal_(param.data, mean=0, std=0.01)", "is in eval model so we do not have to", "type=int, default=64) parser.add_argument('--dropout', type=float, default=0.1) return parser def __init__(self, input_vocab_size,", "on_epoch=True, prog_bar=True, logger=True, sync_dist=True ) self.log( \"val_bleu_idx\", bleu_score, on_step=False, on_epoch=True,", "teacher_force_ratio=0.5): \"\"\" teacher_force_ratio is used to help in decoding. In", "1, B, out #output = F.log_softmax(predictions) return predictions, hidden #", "self.out = nn.Linear(hidden_size, output_size) def forward(self, word_input, last_hidden, encoder_outputs): #", "emb_dim self.enc_hid_dim = hidden_dim self.dec_hid_dim = hidden_dim self.enc_dropout = dropout", "self.encoder(source, source_len) # mask = [batch_size, src len] # without", "pct_start = 0.01 ), \"name\": \"learning_rate\", \"interval\":\"step\", \"frequency\": 1 }", "logits = outputs[1:].view(-1, self.output_dim) trg = trg_seq[1:].reshape(-1) loss = self.loss(logits,", "acc = plfunc.accuracy(pred_seq.reshape(-1), trg_batch.reshape(-1)) # need to cast to list", "decoder_hidden = self.decoder(decoder_input, decoder_hidden, encoder_outputs) outputs[t] = decoder_output #(N, english_vocab_size)", "encoder_hidden encoder_outputs = None for t in range(target_len): decoder_output, decoder_hidden", "= self.decoder(decoder_input, decoder_hidden, encoder_outputs) outputs[t] = decoder_output #(N, english_vocab_size) #best_guess", "self.rnn(x_packed) # irrelevant because we are interested only in hidden", "source_len, trg_seq, teacher_force_ratio=0.5): \"\"\" teacher_force_ratio is used to help in", "list of predicted sequences ( as list of token ids", "last_hidden, encoder_outputs): # Note that we will only be running", "the beginning and eos token at the end #x =", "N, embedding_size) x_packed = pack_padded_sequence(embedding, x_len.cpu(), batch_first=False, enforce_sorted=False) output_packed, (hidden,cell)", "# Combine embedded input word and hidden vector, run through", "eos token at the end output = output.view(-1, self.output_dim) trg_seq", "**kwargs): super().__init__() # dynamic, based on tokenizer vocab size defined", "= torch.unsqueeze(trg_batch, 1).tolist() bleu_score - plfunc.nlp.bleu_score(predicted_ids, target_ids, n_gram=3).to(self.device) self.log( 'val_loss',", "= self.embedding(word_input) # 1 X B word_embedded = self.dropout(word_embedded) #", "mask = [batch_size, src len] # without sos token at", "interested only in hidden state #output_padded, output_lengths = pad_packed_sequence(output_packed, batch_first=True)", "x_len): # x shape (seq_length, N) embedding = self.dropout(self.embedding(x)) #", "seq_tok1, seqtok2] predicted_ids - pred_seq.tolist() # need to add additional", "nn.LSTM(embedding_size, hidden_size, n_layers, dropout=dropout_p, batch_first=False) self.out = nn.Linear(hidden_size, output_size) def", "= nn.Embedding(input_size, embedding_size) self.rnn = nn.LSTM(embedding_size, hidden_size, num_layers, dropout=p,batch_first=False) def", "Encoder decoder pytorch lightning module for training seq2seq model with", ") self.decoder = NewDecoder( self.enc_hid_dim, self.dec_emb_dim, self.output_dim, self.num_layers, self.dec_dropout )", "token at the end output = output.view(-1, self.output_dim) trg_seq =", "import pack_padded_sequence, pad_packed_sequence # Cell class Encoder(nn.Module): def __init__(self, input_size,", "bleu_score - plfunc.nlp.bleu_score(predicted_ids, target_ids, n_gram=3).to(self.device) self.log( 'val_loss', loss, on_step=False, on_epoch=True,", "source = src_seq.transpose(0, 1) target_len = self.max_length if trg_seq is", "- plfunc.nlp.bleu_score(predicted_ids, target_ids, n_gram=3).to(self.device) self.log( 'val_loss', loss, on_step=False, on_epoch=True, prog_bar=True,", "pl import pytorch_lightning.metrics.functional as plfunc from pytorch_lightning.loggers import TensorBoardLogger #", "import optim import torch import torch.nn.functional as F from torch.nn.utils.rnn", "self.named_parameters(): if \"weight\" in name: nn.init.normal_(param.data, mean=0, std=0.01) else: nn.init.constant_(param.data,", "1) target_len = target.shape[0] batch_size = source.shape[1] target_vocab_size = self.output_dim", "ids for all sequences in a batch to targets acc", "= True, on_epoch=True, prog_bar = True, logger=True) return loss def", "original input token will be sent as input token \"\"\"", "Cell class NewDecoder(nn.Module): def __init__(self, hidden_size, embedding_size, output_size, n_layers=1, dropout_p=0.1):", "0.0005 self._loss = nn.CrossEntropyLoss(ignore_index=self.pad_idx) self.encoder = Encoder( self.input_dim, self.enc_emb_dim, self.enc_hid_dim,", "model with teacher forcing Module try to learn mapping from", "-> seq_len * batch_size # change layout: sesq_len * batch_size", "a single decoder time step, but will # use all", "num_layers self.dropout = nn.Dropout(p) self.embedding = nn.Embedding(input_size, embedding_size) self.rnn =", "dropout=dropout_p, batch_first=False) self.out = nn.Linear(hidden_size, output_size) def forward(self, word_input, last_hidden,", "self.save_hyperparameters() self.max_epochs= kwargs.get('max_epochs',5) self.learning_rate = 0.0005 self._loss = nn.CrossEntropyLoss(ignore_index=self.pad_idx) self.encoder", "self.enc_dropout ) self.decoder = NewDecoder( self.enc_hid_dim, self.dec_emb_dim, self.output_dim, self.num_layers, self.dec_dropout", "src_lengths,trg_seq) # do not know if this is a problem,", "src_lengths = batch['src'],batch['trg'], batch['src_len'] outputs = self.forward(src_seq, src_lengths, trg_seq, 0)", "self.decoder(decoder_input, decoder_hidden, encoder_outputs) outputs[t] = decoder_output #(N, english_vocab_size) #best_guess =", "validation_step(self, batch,batch_idx): \"\"\" validation is in eval model so we", "target.shape[0] batch_size = source.shape[1] target_vocab_size = self.output_dim outputs = torch.zeros(target_len,", "= emb_dim self.enc_hid_dim = hidden_dim self.dec_hid_dim = hidden_dim self.enc_dropout =", "else: nn.init.constant_(param.data, 0) def create_mask(self, src): mask = (src !=", "def add_model_specific_args(parent_parser): parser = ArgumentParser(parents=[parent_parser], add_help=False) parser.add_argument(\"--emb_dim\", type=int, default=32) parser.add_argument('--hidden_dim',", "on_step = True, on_epoch=True, prog_bar = True, logger=True) return loss", "= self.dropout(self.embedding(x)) # embedding shape : (seq_length, N, embedding_size) x_packed", "loss(self, logits, target): return self._loss(logits, target) def configure_optimizers(self): optimizer =", "on tokenizer vocab size defined in datamodule self.input_dim = input_vocab_size", "type=float, default=0.1) return parser def __init__(self, input_vocab_size, output_vocab_size, padding_index =", "be sent as input token \"\"\" source = src_seq.transpose(0, 1)", "= trg_seq.reshape(-1) loss = self.loss(output, trg) self.log('train_loss',loss.item(), on_step = True,", "at the beginning and eos token at the end #x", "[optimizer],[lr_scheduler] def training_step(self, batch, batch_idx): src_seq, trg_seq, src_lengths = batch['src'],batch['trg'],", "# 1 X B word_embedded = self.dropout(word_embedded) # 1 X", "\"\"\" source = src_seq.transpose(0, 1) target_len = self.max_length if trg_seq", "self._loss(logits, target) def configure_optimizers(self): optimizer = optim.AdamW(self.parameters(), lr=self.learning_rate) lr_scheduler =", "), \"name\": \"learning_rate\", \"interval\":\"step\", \"frequency\": 1 } return [optimizer],[lr_scheduler] def", "NOT EDIT! File to edit: nbs/01_seq2seq.ipynb (unless otherwise specified). __all__", "default=64) parser.add_argument('--dropout', type=float, default=0.1) return parser def __init__(self, input_vocab_size, output_vocab_size,", "this is a problem, loss will be computed with sos", "state #output_padded, output_lengths = pad_packed_sequence(output_packed, batch_first=True) # output is irrelevant,", "target): return self._loss(logits, target) def configure_optimizers(self): optimizer = optim.AdamW(self.parameters(), lr=self.learning_rate)", "target is not None else decoder_input return outputs def loss(self,", "= 3379, epochs=self.max_epochs, anneal_strategy='linear', final_div_factor=1000, pct_start = 0.01 ), \"name\":", "parser = ArgumentParser(parents=[parent_parser], add_help=False) parser.add_argument(\"--emb_dim\", type=int, default=32) parser.add_argument('--hidden_dim', type=int, default=64)", "to list of predicted sequences ( as list of token", "to each target reference sequence in order to # conver", "shape : (seq_length, N, embedding_size) x_packed = pack_padded_sequence(embedding, x_len.cpu(), batch_first=False,", "\"\"\" teacher_force_ratio is used to help in decoding. In starting,", "# embedding shape : (seq_length, N, embedding_size) x_packed = pack_padded_sequence(embedding,", "edit: nbs/01_seq2seq.ipynb (unless otherwise specified). __all__ = ['Encoder', 'NewDecoder', 'Seq2Seq']", "'scheduler': optim.lr_scheduler.OneCycleLR( optimizer, max_lr = self.learning_rate, steps_per_epoch = 3379, epochs=self.max_epochs,", "self.enc_dropout = dropout self.dec_dropout = dropout self.pad_idx = padding_index self.num_layers", "sequence to another \"\"\" @staticmethod def add_model_specific_args(parent_parser): parser = ArgumentParser(parents=[parent_parser],", "src_seq, source_len, trg_seq, teacher_force_ratio=0.5): \"\"\" teacher_force_ratio is used to help", "is irrelevant, context vector is important return hidden,cell # Cell", "encoder_outputs here word_embedded = self.embedding(word_input) # 1 X B word_embedded", "all encoder outputs word_input = word_input.unsqueeze(0) # we are not", "= output_vocab_size self.enc_emb_dim = emb_dim self.dec_emb_dim = emb_dim self.enc_hid_dim =", "= pack_padded_sequence(embedding, x_len.cpu(), batch_first=False, enforce_sorted=False) output_packed, (hidden,cell) = self.rnn(x_packed) #", "do not know if this is a problem, loss will", "hidden_size, num_layers=2, p=0.1): super(Encoder, self).__init__() self.hidden_size = hidden_size self.num_layers =", "AUTOGENERATED! DO NOT EDIT! File to edit: nbs/01_seq2seq.ipynb (unless otherwise", "dropout=p,batch_first=False) def forward(self, x, x_len): # x shape (seq_length, N)", "sequence in order to # conver to format needed by", "hidden # Cell import random import pytorch_lightning as pl import", "def forward(self, word_input, last_hidden, encoder_outputs): # Note that we will", "last_hidden) # 1 X B X hidden predictions = self.out(output)", "trg_seq = trg_seq.transpose(0, 1) trg = trg_seq.reshape(-1) loss = self.loss(output,", "# irrelevant because we are interested only in hidden state", "n_layers=1, dropout_p=0.1): super(NewDecoder, self).__init__() # Define parameters self.hidden_size = hidden_size", "decoder_input return outputs def loss(self, logits, target): return self._loss(logits, target)", "out #output = F.log_softmax(predictions) return predictions, hidden # Cell import", "the beginning and eos token at the end output =", "dropout self.pad_idx = padding_index self.num_layers = 2 self.max_length =10 self.save_hyperparameters()", "Encoder(nn.Module): def __init__(self, input_size, embedding_size, hidden_size, num_layers=2, p=0.1): super(Encoder, self).__init__()", "one sequence to another \"\"\" @staticmethod def add_model_specific_args(parent_parser): parser =", "are not using encoder_outputs here word_embedded = self.embedding(word_input) # 1", "self.loss(logits, trg) pred_seq = outputs[1:].argmax(2) # seq_len*batch_size*vocab_size -> seq_len *", "= self.forward(src_seq, src_lengths,trg_seq) # do not know if this is", "add_help=False) parser.add_argument(\"--emb_dim\", type=int, default=32) parser.add_argument('--hidden_dim', type=int, default=64) parser.add_argument('--dropout', type=float, default=0.1)", "used to help in decoding. In starting, original input token", "the end #x = target[0,:] decoder_input = torch.ones(batch_size).long().to(self.device) decoder_hidden =", "at the beginning and eos token at the end output", "on_step=False, on_epoch=True, prog_bar=True, logger=True, sync_dist=True) self.log( \"val_acc\", acc, on_step=False, on_epoch=True,", "word_embedded = self.embedding(word_input) # 1 X B word_embedded = self.dropout(word_embedded)", "self.embedding(word_input) # 1 X B word_embedded = self.dropout(word_embedded) # 1", "loss, on_step=False, on_epoch=True, prog_bar=True, logger=True, sync_dist=True) self.log( \"val_acc\", acc, on_step=False,", "token at the beginning and eos token at the end", "layout: seq_len * batch_size -> batch_size * seq_len trg_batch =", "decoder_output, decoder_hidden = self.decoder(decoder_input, decoder_hidden, encoder_outputs) outputs[t] = decoder_output #(N,", "with sos token # without sos token at the beginning", "= padding_index self.num_layers = 2 self.max_length =10 self.save_hyperparameters() self.max_epochs= kwargs.get('max_epochs',5)", "be computed with sos token # without sos token at", "trg_seq.reshape(-1) loss = self.loss(output, trg) self.log('train_loss',loss.item(), on_step = True, on_epoch=True,", "( as list of token ids ) [ seq_tok1, seqtok2]", "TensorBoardLogger # Cell class Seq2Seq(pl.LightningModule): \"\"\" Encoder decoder pytorch lightning", "beginning and eos token at the end output = output.view(-1,", "ArgumentParser(parents=[parent_parser], add_help=False) parser.add_argument(\"--emb_dim\", type=int, default=32) parser.add_argument('--hidden_dim', type=int, default=64) parser.add_argument('--dropout', type=float,", "nn.LSTM(embedding_size, hidden_size, num_layers, dropout=p,batch_first=False) def forward(self, x, x_len): # x", "self.output_dim) trg_seq = trg_seq.transpose(0, 1) trg = trg_seq.reshape(-1) loss =", "# seq_len*batch_size*vocab_size -> seq_len * batch_size # change layout: sesq_len", "torch import torch.nn.functional as F from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence", "Cell class Seq2Seq(pl.LightningModule): \"\"\" Encoder decoder pytorch lightning module for", "token will be sent as input token \"\"\" source =", "seq_len * batch_size -> batch_size * seq_len trg_batch = trg_seq[1:].T", "self.decoder = NewDecoder( self.enc_hid_dim, self.dec_emb_dim, self.output_dim, self.num_layers, self.dec_dropout ) self._init_weights()", "sequences in a batch to targets acc = plfunc.accuracy(pred_seq.reshape(-1), trg_batch.reshape(-1))", "else decoder_input return outputs def loss(self, logits, target): return self._loss(logits,", "1 } return [optimizer],[lr_scheduler] def training_step(self, batch, batch_idx): src_seq, trg_seq,", "3379, epochs=self.max_epochs, anneal_strategy='linear', final_div_factor=1000, pct_start = 0.01 ), \"name\": \"learning_rate\",", "seq2=[reference1]] target_ids = torch.unsqueeze(trg_batch, 1).tolist() bleu_score - plfunc.nlp.bleu_score(predicted_ids, target_ids, n_gram=3).to(self.device)", "self.dec_emb_dim = emb_dim self.enc_hid_dim = hidden_dim self.dec_hid_dim = hidden_dim self.enc_dropout", "hidden_dim=32, dropout=0.1, max_length=20, **kwargs): super().__init__() # dynamic, based on tokenizer", "['Encoder', 'NewDecoder', 'Seq2Seq'] # Cell from torch import nn from", "vocab size defined in datamodule self.input_dim = input_vocab_size self.output_dim =", "outputs = torch.zeros(target_len, batch_size, target_vocab_size).to(self.device) encoder_hidden = self.encoder(source, source_len) #", "encoder_outputs) outputs[t] = decoder_output #(N, english_vocab_size) #best_guess = output.argmax(1) topv,", "target_vocab_size = self.output_dim outputs = torch.zeros(target_len, batch_size, target_vocab_size).to(self.device) encoder_hidden =", "we do not have to use placeholder input sensors\"\"\" src_seq,", "Cell from torch import nn from torch import optim import", "Define layers self.embedding = nn.Embedding(output_size, embedding_size) self.dropout=nn.Dropout(dropout_p) self.rnn = nn.LSTM(embedding_size,", "self.forward(src_seq, src_lengths, trg_seq, 0) logits = outputs[1:].view(-1, self.output_dim) trg =", "to # conver to format needed by blue_score_func # [seq1=[[reference1],[reference2]],", "decoder pytorch lightning module for training seq2seq model with teacher", "plfunc.nlp.bleu_score(predicted_ids, target_ids, n_gram=3).to(self.device) self.log( 'val_loss', loss, on_step=False, on_epoch=True, prog_bar=True, logger=True,", "embedding = self.dropout(self.embedding(x)) # embedding shape : (seq_length, N, embedding_size)", "torch import nn from torch import optim import torch import", "dropout=0.1, max_length=20, **kwargs): super().__init__() # dynamic, based on tokenizer vocab", "= ['Encoder', 'NewDecoder', 'Seq2Seq'] # Cell from torch import nn", "decoder_output.topk(1) decoder_input = topi.squeeze().detach() decoder_input = target[t] if random.random() <", "batch_idx): src_seq, trg_seq, src_lengths = batch['src'],batch['trg'], batch['src_len'] output = self.forward(src_seq,", "hidden_dim self.dec_hid_dim = hidden_dim self.enc_dropout = dropout self.dec_dropout = dropout", "input_vocab_size self.output_dim = output_vocab_size self.enc_emb_dim = emb_dim self.dec_emb_dim = emb_dim", "is a problem, loss will be computed with sos token", "pred_seq.T # change layout: seq_len * batch_size -> batch_size *", "batch_size * seq_len trg_batch = trg_seq[1:].T # compare list of", "not None else decoder_input return outputs def loss(self, logits, target):", "decoder_input = topi.squeeze().detach() decoder_input = target[t] if random.random() < teacher_force_ratio", "} return [optimizer],[lr_scheduler] def training_step(self, batch, batch_idx): src_seq, trg_seq, src_lengths", "target reference sequence in order to # conver to format", "by blue_score_func # [seq1=[[reference1],[reference2]], seq2=[reference1]] target_ids = torch.unsqueeze(trg_batch, 1).tolist() bleu_score", "create_mask(self, src): mask = (src != self.pad_idx).permute(1, 0) return mask", "english_vocab_size) #best_guess = output.argmax(1) topv, topi = decoder_output.topk(1) decoder_input =", "hidden_dim self.enc_dropout = dropout self.dec_dropout = dropout self.pad_idx = padding_index", "output = output.view(-1, self.output_dim) trg_seq = trg_seq.transpose(0, 1) trg =", "all sequences in a batch to targets acc = plfunc.accuracy(pred_seq.reshape(-1),", "target_ids, n_gram=3).to(self.device) self.log( 'val_loss', loss, on_step=False, on_epoch=True, prog_bar=True, logger=True, sync_dist=True)", "= Encoder( self.input_dim, self.enc_emb_dim, self.enc_hid_dim, self.num_layers, self.enc_dropout ) self.decoder =", "teacher_force_ratio is used to help in decoding. In starting, original", "the end output = output.view(-1, self.output_dim) trg_seq = trg_seq.transpose(0, 1)", "nn from torch import optim import torch import torch.nn.functional as", "loss def validation_step(self, batch,batch_idx): \"\"\" validation is in eval model", "batch to targets acc = plfunc.accuracy(pred_seq.reshape(-1), trg_batch.reshape(-1)) # need to", "in a batch to targets acc = plfunc.accuracy(pred_seq.reshape(-1), trg_batch.reshape(-1)) #", "= word_input.unsqueeze(0) # we are not using encoder_outputs here word_embedded", "(seq_length, N, embedding_size) x_packed = pack_padded_sequence(embedding, x_len.cpu(), batch_first=False, enforce_sorted=False) output_packed,", "ids ) [ seq_tok1, seqtok2] predicted_ids - pred_seq.tolist() # need", "pred_seq.tolist() # need to add additional dim to each target", "F.log_softmax(predictions) return predictions, hidden # Cell import random import pytorch_lightning", "self.pad_idx).permute(1, 0) return mask def forward(self, src_seq, source_len, trg_seq, teacher_force_ratio=0.5):", "B X hidden predictions = self.out(output) # 1, B, out", "import pytorch_lightning.metrics.functional as plfunc from pytorch_lightning.loggers import TensorBoardLogger # Cell", "in hidden state #output_padded, output_lengths = pad_packed_sequence(output_packed, batch_first=True) # output", "end #x = target[0,:] decoder_input = torch.ones(batch_size).long().to(self.device) decoder_hidden = encoder_hidden", "context vector is important return hidden,cell # Cell class NewDecoder(nn.Module):", "default=0.1) return parser def __init__(self, input_vocab_size, output_vocab_size, padding_index = 0,", "kwargs.get('max_epochs',5) self.learning_rate = 0.0005 self._loss = nn.CrossEntropyLoss(ignore_index=self.pad_idx) self.encoder = Encoder(", "loss will be computed with sos token # without sos", "# Cell class Seq2Seq(pl.LightningModule): \"\"\" Encoder decoder pytorch lightning module", "using encoder_outputs here word_embedded = self.embedding(word_input) # 1 X B", "= nn.LSTM(embedding_size, hidden_size, num_layers, dropout=p,batch_first=False) def forward(self, x, x_len): #", "__init__(self, hidden_size, embedding_size, output_size, n_layers=1, dropout_p=0.1): super(NewDecoder, self).__init__() # Define", "trg_batch.reshape(-1)) # need to cast to list of predicted sequences", "= { 'scheduler': optim.lr_scheduler.OneCycleLR( optimizer, max_lr = self.learning_rate, steps_per_epoch =", "# [seq1=[[reference1],[reference2]], seq2=[reference1]] target_ids = torch.unsqueeze(trg_batch, 1).tolist() bleu_score - plfunc.nlp.bleu_score(predicted_ids,", "single decoder time step, but will # use all encoder", "self.enc_emb_dim = emb_dim self.dec_emb_dim = emb_dim self.enc_hid_dim = hidden_dim self.dec_hid_dim", "target_vocab_size).to(self.device) encoder_hidden = self.encoder(source, source_len) # mask = [batch_size, src", "# Note that we will only be running forward for", "in name: nn.init.normal_(param.data, mean=0, std=0.01) else: nn.init.constant_(param.data, 0) def create_mask(self,", "# mask = [batch_size, src len] # without sos token", "training seq2seq model with teacher forcing Module try to learn", "__init__(self, input_size, embedding_size, hidden_size, num_layers=2, p=0.1): super(Encoder, self).__init__() self.hidden_size =", "self._loss = nn.CrossEntropyLoss(ignore_index=self.pad_idx) self.encoder = Encoder( self.input_dim, self.enc_emb_dim, self.enc_hid_dim, self.num_layers,", "- pred_seq.tolist() # need to add additional dim to each", "sync_dist=True) self.log( \"val_acc\", acc, on_step=False, on_epoch=True, prog_bar=True, logger=True, sync_dist=True )", "* batch_size -> batch_size * seq_len trg_batch = trg_seq[1:].T #", "'val_loss', loss, on_step=False, on_epoch=True, prog_bar=True, logger=True, sync_dist=True) self.log( \"val_acc\", acc,", "self.max_length =10 self.save_hyperparameters() self.max_epochs= kwargs.get('max_epochs',5) self.learning_rate = 0.0005 self._loss =", "self).__init__() self.hidden_size = hidden_size self.num_layers = num_layers self.dropout = nn.Dropout(p)", "shape (seq_length, N) embedding = self.dropout(self.embedding(x)) # embedding shape :", "self.input_dim, self.enc_emb_dim, self.enc_hid_dim, self.num_layers, self.enc_dropout ) self.decoder = NewDecoder( self.enc_hid_dim,", "batch['src_len'] outputs = self.forward(src_seq, src_lengths, trg_seq, 0) logits = outputs[1:].view(-1,", "output_vocab_size self.enc_emb_dim = emb_dim self.dec_emb_dim = emb_dim self.enc_hid_dim = hidden_dim", "hidden,cell # Cell class NewDecoder(nn.Module): def __init__(self, hidden_size, embedding_size, output_size,", "1 X B word_embedded = self.dropout(word_embedded) # 1 X B", "hidden state #output_padded, output_lengths = pad_packed_sequence(output_packed, batch_first=True) # output is", "outputs[1:].view(-1, self.output_dim) trg = trg_seq[1:].reshape(-1) loss = self.loss(logits, trg) pred_seq", "only be running forward for a single decoder time step,", "X B X emb_length # Combine embedded input word and", "= outputs[1:].view(-1, self.output_dim) trg = trg_seq[1:].reshape(-1) loss = self.loss(logits, trg)", "def loss(self, logits, target): return self._loss(logits, target) def configure_optimizers(self): optimizer", "as list of token ids ) [ seq_tok1, seqtok2] predicted_ids", "NewDecoder( self.enc_hid_dim, self.dec_emb_dim, self.output_dim, self.num_layers, self.dec_dropout ) self._init_weights() def _init_weights(self):", "= True, logger=True) return loss def validation_step(self, batch,batch_idx): \"\"\" validation", "output is irrelevant, context vector is important return hidden,cell #", "format needed by blue_score_func # [seq1=[[reference1],[reference2]], seq2=[reference1]] target_ids = torch.unsqueeze(trg_batch,", "problem, loss will be computed with sos token # without", "at the end #x = target[0,:] decoder_input = torch.ones(batch_size).long().to(self.device) decoder_hidden", "self.input_dim = input_vocab_size self.output_dim = output_vocab_size self.enc_emb_dim = emb_dim self.dec_emb_dim", "datamodule self.input_dim = input_vocab_size self.output_dim = output_vocab_size self.enc_emb_dim = emb_dim", "1 X B X hidden predictions = self.out(output) # 1,", "= dropout self.pad_idx = padding_index self.num_layers = 2 self.max_length =10", "self.num_layers, self.dec_dropout ) self._init_weights() def _init_weights(self): for name, param in", "#best_guess = output.argmax(1) topv, topi = decoder_output.topk(1) decoder_input = topi.squeeze().detach()", "targets acc = plfunc.accuracy(pred_seq.reshape(-1), trg_batch.reshape(-1)) # need to cast to", "0) def create_mask(self, src): mask = (src != self.pad_idx).permute(1, 0)", "we are interested only in hidden state #output_padded, output_lengths =", "import torch.nn.functional as F from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence #", "configure_optimizers(self): optimizer = optim.AdamW(self.parameters(), lr=self.learning_rate) lr_scheduler = { 'scheduler': optim.lr_scheduler.OneCycleLR(", "return self._loss(logits, target) def configure_optimizers(self): optimizer = optim.AdamW(self.parameters(), lr=self.learning_rate) lr_scheduler", "hidden_size, n_layers, dropout=dropout_p, batch_first=False) self.out = nn.Linear(hidden_size, output_size) def forward(self,", "B X emb_length # Combine embedded input word and hidden", "dropout_p=0.1): super(NewDecoder, self).__init__() # Define parameters self.hidden_size = hidden_size self.output_size", "batch,batch_idx): \"\"\" validation is in eval model so we do", "= NewDecoder( self.enc_hid_dim, self.dec_emb_dim, self.output_dim, self.num_layers, self.dec_dropout ) self._init_weights() def", "target_len = target.shape[0] batch_size = source.shape[1] target_vocab_size = self.output_dim outputs", "not None: target = trg_seq.transpose(0, 1) target_len = target.shape[0] batch_size", "pack_padded_sequence(embedding, x_len.cpu(), batch_first=False, enforce_sorted=False) output_packed, (hidden,cell) = self.rnn(x_packed) # irrelevant", "is used to help in decoding. In starting, original input", "return mask def forward(self, src_seq, source_len, trg_seq, teacher_force_ratio=0.5): \"\"\" teacher_force_ratio", "= trg_seq.transpose(0, 1) trg = trg_seq.reshape(-1) loss = self.loss(output, trg)", "seq_len trg_batch = trg_seq[1:].T # compare list of predicted ids", "each target reference sequence in order to # conver to", "nn.init.normal_(param.data, mean=0, std=0.01) else: nn.init.constant_(param.data, 0) def create_mask(self, src): mask", "range(target_len): decoder_output, decoder_hidden = self.decoder(decoder_input, decoder_hidden, encoder_outputs) outputs[t] = decoder_output", "__init__(self, input_vocab_size, output_vocab_size, padding_index = 0, emb_dim = 8, hidden_dim=32,", "try to learn mapping from one sequence to another \"\"\"", "#(N, english_vocab_size) #best_guess = output.argmax(1) topv, topi = decoder_output.topk(1) decoder_input", "x_len.cpu(), batch_first=False, enforce_sorted=False) output_packed, (hidden,cell) = self.rnn(x_packed) # irrelevant because", "pad_packed_sequence(output_packed, batch_first=True) # output is irrelevant, context vector is important", "self.learning_rate, steps_per_epoch = 3379, epochs=self.max_epochs, anneal_strategy='linear', final_div_factor=1000, pct_start = 0.01", "def forward(self, src_seq, source_len, trg_seq, teacher_force_ratio=0.5): \"\"\" teacher_force_ratio is used", "trg_seq, 0) logits = outputs[1:].view(-1, self.output_dim) trg = trg_seq[1:].reshape(-1) loss", "dynamic, based on tokenizer vocab size defined in datamodule self.input_dim", "target = trg_seq.transpose(0, 1) target_len = target.shape[0] batch_size = source.shape[1]", "forward(self, x, x_len): # x shape (seq_length, N) embedding =", "emb_dim = 8, hidden_dim=32, dropout=0.1, max_length=20, **kwargs): super().__init__() # dynamic,", "= None for t in range(target_len): decoder_output, decoder_hidden = self.decoder(decoder_input,", "= target[0,:] decoder_input = torch.ones(batch_size).long().to(self.device) decoder_hidden = encoder_hidden encoder_outputs =", "= 0.01 ), \"name\": \"learning_rate\", \"interval\":\"step\", \"frequency\": 1 } return", "additional dim to each target reference sequence in order to", "optim import torch import torch.nn.functional as F from torch.nn.utils.rnn import", "= src_seq.transpose(0, 1) target_len = self.max_length if trg_seq is not", "as pl import pytorch_lightning.metrics.functional as plfunc from pytorch_lightning.loggers import TensorBoardLogger", "import TensorBoardLogger # Cell class Seq2Seq(pl.LightningModule): \"\"\" Encoder decoder pytorch", "is not None: target = trg_seq.transpose(0, 1) target_len = target.shape[0]", "need to add additional dim to each target reference sequence", "use placeholder input sensors\"\"\" src_seq, trg_seq, src_lengths = batch['src'],batch['trg'], batch['src_len']", "running forward for a single decoder time step, but will", "= F.log_softmax(predictions) return predictions, hidden # Cell import random import", "module for training seq2seq model with teacher forcing Module try", "self.log( \"val_bleu_idx\", bleu_score, on_step=False, on_epoch=True, prog_bar=True, logger=True, sync_dist=True ) return", "src_lengths, trg_seq, 0) logits = outputs[1:].view(-1, self.output_dim) trg = trg_seq[1:].reshape(-1)", "= 2 self.max_length =10 self.save_hyperparameters() self.max_epochs= kwargs.get('max_epochs',5) self.learning_rate = 0.0005", "in range(target_len): decoder_output, decoder_hidden = self.decoder(decoder_input, decoder_hidden, encoder_outputs) outputs[t] =", "self.num_layers = num_layers self.dropout = nn.Dropout(p) self.embedding = nn.Embedding(input_size, embedding_size)", "target[t] if random.random() < teacher_force_ratio and target is not None", "pack_padded_sequence, pad_packed_sequence # Cell class Encoder(nn.Module): def __init__(self, input_size, embedding_size,", "hidden vector, run through RNN output, hidden = self.rnn(word_embedded, last_hidden)", "= trg_seq[1:].T # compare list of predicted ids for all", "so we do not have to use placeholder input sensors\"\"\"", "is important return hidden,cell # Cell class NewDecoder(nn.Module): def __init__(self,", "change layout: sesq_len * batch_size -> batch_size * seq_len pred_seq", "encoder_outputs = None for t in range(target_len): decoder_output, decoder_hidden =", "optimizer, max_lr = self.learning_rate, steps_per_epoch = 3379, epochs=self.max_epochs, anneal_strategy='linear', final_div_factor=1000,", "with teacher forcing Module try to learn mapping from one", "outputs[1:].argmax(2) # seq_len*batch_size*vocab_size -> seq_len * batch_size # change layout:", "EDIT! File to edit: nbs/01_seq2seq.ipynb (unless otherwise specified). __all__ =", "run through RNN output, hidden = self.rnn(word_embedded, last_hidden) # 1", "batch_size # change layout: sesq_len * batch_size -> batch_size *", "import torch import torch.nn.functional as F from torch.nn.utils.rnn import pack_padded_sequence,", "seq_len * batch_size # change layout: sesq_len * batch_size ->", "forcing Module try to learn mapping from one sequence to", "output_packed, (hidden,cell) = self.rnn(x_packed) # irrelevant because we are interested", "reference sequence in order to # conver to format needed", "1) trg = trg_seq.reshape(-1) loss = self.loss(output, trg) self.log('train_loss',loss.item(), on_step", "trg) self.log('train_loss',loss.item(), on_step = True, on_epoch=True, prog_bar = True, logger=True)", "target_ids = torch.unsqueeze(trg_batch, 1).tolist() bleu_score - plfunc.nlp.bleu_score(predicted_ids, target_ids, n_gram=3).to(self.device) self.log(", "word_embedded = self.dropout(word_embedded) # 1 X B X emb_length #", "< teacher_force_ratio and target is not None else decoder_input return", "self.hidden_size = hidden_size self.output_size = output_size self.n_layers =n_layers self.dropout_p =", "defined in datamodule self.input_dim = input_vocab_size self.output_dim = output_vocab_size self.enc_emb_dim", "[ seq_tok1, seqtok2] predicted_ids - pred_seq.tolist() # need to add", "decoder_input = target[t] if random.random() < teacher_force_ratio and target is", "0) logits = outputs[1:].view(-1, self.output_dim) trg = trg_seq[1:].reshape(-1) loss =", "of predicted ids for all sequences in a batch to", "batch_size -> batch_size * seq_len trg_batch = trg_seq[1:].T # compare", "name, param in self.named_parameters(): if \"weight\" in name: nn.init.normal_(param.data, mean=0,", "type=int, default=32) parser.add_argument('--hidden_dim', type=int, default=64) parser.add_argument('--dropout', type=float, default=0.1) return parser", "starting, original input token will be sent as input token", "= num_layers self.dropout = nn.Dropout(p) self.embedding = nn.Embedding(input_size, embedding_size) self.rnn", "order to # conver to format needed by blue_score_func #", "trg = trg_seq.reshape(-1) loss = self.loss(output, trg) self.log('train_loss',loss.item(), on_step =", "= self.output_dim outputs = torch.zeros(target_len, batch_size, target_vocab_size).to(self.device) encoder_hidden = self.encoder(source,", "to targets acc = plfunc.accuracy(pred_seq.reshape(-1), trg_batch.reshape(-1)) # need to cast", "n_gram=3).to(self.device) self.log( 'val_loss', loss, on_step=False, on_epoch=True, prog_bar=True, logger=True, sync_dist=True) self.log(", "= nn.Linear(hidden_size, output_size) def forward(self, word_input, last_hidden, encoder_outputs): # Note", "sent as input token \"\"\" source = src_seq.transpose(0, 1) target_len", "outputs def loss(self, logits, target): return self._loss(logits, target) def configure_optimizers(self):", "not have to use placeholder input sensors\"\"\" src_seq, trg_seq, src_lengths", "self.dropout_p = dropout_p # Define layers self.embedding = nn.Embedding(output_size, embedding_size)", "self.output_dim = output_vocab_size self.enc_emb_dim = emb_dim self.dec_emb_dim = emb_dim self.enc_hid_dim", "embedding_size) self.rnn = nn.LSTM(embedding_size, hidden_size, num_layers, dropout=p,batch_first=False) def forward(self, x,", "prog_bar=True, logger=True, sync_dist=True) self.log( \"val_acc\", acc, on_step=False, on_epoch=True, prog_bar=True, logger=True,", "@staticmethod def add_model_specific_args(parent_parser): parser = ArgumentParser(parents=[parent_parser], add_help=False) parser.add_argument(\"--emb_dim\", type=int, default=32)", "self.enc_hid_dim = hidden_dim self.dec_hid_dim = hidden_dim self.enc_dropout = dropout self.dec_dropout", "\"\"\" Encoder decoder pytorch lightning module for training seq2seq model", "but will # use all encoder outputs word_input = word_input.unsqueeze(0)", "output, hidden = self.rnn(word_embedded, last_hidden) # 1 X B X", "if random.random() < teacher_force_ratio and target is not None else", "blue_score_func # [seq1=[[reference1],[reference2]], seq2=[reference1]] target_ids = torch.unsqueeze(trg_batch, 1).tolist() bleu_score -", "to cast to list of predicted sequences ( as list", "std=0.01) else: nn.init.constant_(param.data, 0) def create_mask(self, src): mask = (src", "# without sos token at the beginning and eos token", "irrelevant, context vector is important return hidden,cell # Cell class", "and eos token at the end output = output.view(-1, self.output_dim)", "output_size, n_layers=1, dropout_p=0.1): super(NewDecoder, self).__init__() # Define parameters self.hidden_size =", "torch import optim import torch import torch.nn.functional as F from", "def __init__(self, input_vocab_size, output_vocab_size, padding_index = 0, emb_dim = 8,", "steps_per_epoch = 3379, epochs=self.max_epochs, anneal_strategy='linear', final_div_factor=1000, pct_start = 0.01 ),", "= self.loss(logits, trg) pred_seq = outputs[1:].argmax(2) # seq_len*batch_size*vocab_size -> seq_len", "through RNN output, hidden = self.rnn(word_embedded, last_hidden) # 1 X", "# need to add additional dim to each target reference", "= decoder_output.topk(1) decoder_input = topi.squeeze().detach() decoder_input = target[t] if random.random()", "= ArgumentParser(parents=[parent_parser], add_help=False) parser.add_argument(\"--emb_dim\", type=int, default=32) parser.add_argument('--hidden_dim', type=int, default=64) parser.add_argument('--dropout',", "not using encoder_outputs here word_embedded = self.embedding(word_input) # 1 X", "Encoder( self.input_dim, self.enc_emb_dim, self.enc_hid_dim, self.num_layers, self.enc_dropout ) self.decoder = NewDecoder(", "#output_padded, output_lengths = pad_packed_sequence(output_packed, batch_first=True) # output is irrelevant, context", "trg_seq, src_lengths = batch['src'],batch['trg'], batch['src_len'] outputs = self.forward(src_seq, src_lengths, trg_seq,", ") self._init_weights() def _init_weights(self): for name, param in self.named_parameters(): if", "(hidden,cell) = self.rnn(x_packed) # irrelevant because we are interested only", "= output.argmax(1) topv, topi = decoder_output.topk(1) decoder_input = topi.squeeze().detach() decoder_input", "# 1 X B X emb_length # Combine embedded input", "for a single decoder time step, but will # use", "batch_size -> batch_size * seq_len pred_seq = pred_seq.T # change", "Module try to learn mapping from one sequence to another", "size defined in datamodule self.input_dim = input_vocab_size self.output_dim = output_vocab_size", "self.learning_rate = 0.0005 self._loss = nn.CrossEntropyLoss(ignore_index=self.pad_idx) self.encoder = Encoder( self.input_dim,", "in self.named_parameters(): if \"weight\" in name: nn.init.normal_(param.data, mean=0, std=0.01) else:", "max_length=20, **kwargs): super().__init__() # dynamic, based on tokenizer vocab size", "validation is in eval model so we do not have", "output.argmax(1) topv, topi = decoder_output.topk(1) decoder_input = topi.squeeze().detach() decoder_input =", "have to use placeholder input sensors\"\"\" src_seq, trg_seq, src_lengths =", "# Cell class Encoder(nn.Module): def __init__(self, input_size, embedding_size, hidden_size, num_layers=2,", "self.dropout(word_embedded) # 1 X B X emb_length # Combine embedded", "decoding. In starting, original input token will be sent as", "token at the end #x = target[0,:] decoder_input = torch.ones(batch_size).long().to(self.device)", "topv, topi = decoder_output.topk(1) decoder_input = topi.squeeze().detach() decoder_input = target[t]", "= topi.squeeze().detach() decoder_input = target[t] if random.random() < teacher_force_ratio and", "parser.add_argument('--dropout', type=float, default=0.1) return parser def __init__(self, input_vocab_size, output_vocab_size, padding_index", "on_epoch=True, prog_bar=True, logger=True, sync_dist=True) self.log( \"val_acc\", acc, on_step=False, on_epoch=True, prog_bar=True,", "self.loss(output, trg) self.log('train_loss',loss.item(), on_step = True, on_epoch=True, prog_bar = True,", "= self.rnn(word_embedded, last_hidden) # 1 X B X hidden predictions", "change layout: seq_len * batch_size -> batch_size * seq_len trg_batch", "pytorch_lightning.metrics.functional as plfunc from pytorch_lightning.loggers import TensorBoardLogger # Cell class", "x_packed = pack_padded_sequence(embedding, x_len.cpu(), batch_first=False, enforce_sorted=False) output_packed, (hidden,cell) = self.rnn(x_packed)", "None else decoder_input return outputs def loss(self, logits, target): return", "(unless otherwise specified). __all__ = ['Encoder', 'NewDecoder', 'Seq2Seq'] # Cell", "for all sequences in a batch to targets acc =", "0) return mask def forward(self, src_seq, source_len, trg_seq, teacher_force_ratio=0.5): \"\"\"", "trg_seq is not None: target = trg_seq.transpose(0, 1) target_len =", "F from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence # Cell class Encoder(nn.Module):", "mapping from one sequence to another \"\"\" @staticmethod def add_model_specific_args(parent_parser):", "= dropout self.dec_dropout = dropout self.pad_idx = padding_index self.num_layers =", "Note that we will only be running forward for a", "plfunc.accuracy(pred_seq.reshape(-1), trg_batch.reshape(-1)) # need to cast to list of predicted", "X hidden predictions = self.out(output) # 1, B, out #output", "source_len) # mask = [batch_size, src len] # without sos", "lr=self.learning_rate) lr_scheduler = { 'scheduler': optim.lr_scheduler.OneCycleLR( optimizer, max_lr = self.learning_rate,", "hidden_size, num_layers, dropout=p,batch_first=False) def forward(self, x, x_len): # x shape", "'Seq2Seq'] # Cell from torch import nn from torch import", "as F from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence # Cell class", "hidden predictions = self.out(output) # 1, B, out #output =", "# 1, B, out #output = F.log_softmax(predictions) return predictions, hidden", "prog_bar = True, logger=True) return loss def validation_step(self, batch,batch_idx): \"\"\"", "pred_seq = outputs[1:].argmax(2) # seq_len*batch_size*vocab_size -> seq_len * batch_size #", "import pytorch_lightning as pl import pytorch_lightning.metrics.functional as plfunc from pytorch_lightning.loggers", "trg_seq[1:].reshape(-1) loss = self.loss(logits, trg) pred_seq = outputs[1:].argmax(2) # seq_len*batch_size*vocab_size", "batch_first=False) self.out = nn.Linear(hidden_size, output_size) def forward(self, word_input, last_hidden, encoder_outputs):", "Cell import random import pytorch_lightning as pl import pytorch_lightning.metrics.functional as", "self.embedding = nn.Embedding(input_size, embedding_size) self.rnn = nn.LSTM(embedding_size, hidden_size, num_layers, dropout=p,batch_first=False)", "trg_seq, teacher_force_ratio=0.5): \"\"\" teacher_force_ratio is used to help in decoding.", "at the end output = output.view(-1, self.output_dim) trg_seq = trg_seq.transpose(0,", "# need to cast to list of predicted sequences (", ") self.log( \"val_bleu_idx\", bleu_score, on_step=False, on_epoch=True, prog_bar=True, logger=True, sync_dist=True )", "= emb_dim self.dec_emb_dim = emb_dim self.enc_hid_dim = hidden_dim self.dec_hid_dim =", "nn.Dropout(p) self.embedding = nn.Embedding(input_size, embedding_size) self.rnn = nn.LSTM(embedding_size, hidden_size, num_layers,", "# use all encoder outputs word_input = word_input.unsqueeze(0) # we", "hidden = self.rnn(word_embedded, last_hidden) # 1 X B X hidden", "topi = decoder_output.topk(1) decoder_input = topi.squeeze().detach() decoder_input = target[t] if", "-> batch_size * seq_len trg_batch = trg_seq[1:].T # compare list", "enforce_sorted=False) output_packed, (hidden,cell) = self.rnn(x_packed) # irrelevant because we are", "from torch import optim import torch import torch.nn.functional as F", "_init_weights(self): for name, param in self.named_parameters(): if \"weight\" in name:", "target_len = self.max_length if trg_seq is not None: target =", "self.dec_emb_dim, self.output_dim, self.num_layers, self.dec_dropout ) self._init_weights() def _init_weights(self): for name,", "of token ids ) [ seq_tok1, seqtok2] predicted_ids - pred_seq.tolist()", "to add additional dim to each target reference sequence in", "Seq2Seq(pl.LightningModule): \"\"\" Encoder decoder pytorch lightning module for training seq2seq", "predictions, hidden # Cell import random import pytorch_lightning as pl", "trg) pred_seq = outputs[1:].argmax(2) # seq_len*batch_size*vocab_size -> seq_len * batch_size", "are interested only in hidden state #output_padded, output_lengths = pad_packed_sequence(output_packed," ]
[ "= api_url consent_module_config[\"config\"][\"redirect_url\"] = redirect_url satosa_config_dict[\"MICRO_SERVICES\"].append(consent_module_config) # application test_client =", "werkzeug.test import Client from werkzeug.wrappers import Response from satosa.proxy_server import", "re import responses from werkzeug.test import Client from werkzeug.wrappers import", "test_client = Client(make_app(SATOSAConfig(satosa_config_dict)), Response) # incoming auth req http_resp =", "http_resp.status_code == 302 assert http_resp.headers[\"Location\"].startswith(redirect_url) with responses.RequestsMock() as rsps: #", "api_url = \"https://consent.example.com/api\" redirect_url = \"https://consent.example.com/redirect\" consent_module_config[\"config\"][\"api_url\"] = api_url consent_module_config[\"config\"][\"redirect_url\"]", "= Client(make_app(SATOSAConfig(satosa_config_dict)), Response) # incoming auth req http_resp = test_client.get(\"/{}/{}/request\".format(satosa_config_dict[\"BACKEND_MODULES\"][0][\"name\"],", "= test_client.get(\"/{}/response\".format(satosa_config_dict[\"BACKEND_MODULES\"][0][\"name\"])) assert http_resp.status_code == 302 assert http_resp.headers[\"Location\"].startswith(redirect_url) with responses.RequestsMock()", "http_resp = test_client.get(\"/{}/response\".format(satosa_config_dict[\"BACKEND_MODULES\"][0][\"name\"])) assert http_resp.status_code == 302 assert http_resp.headers[\"Location\"].startswith(redirect_url) with", "with responses.RequestsMock() as rsps: # fake consent rsps.add(responses.GET, verify_url_re, json.dumps({\"foo\":", "Response) # incoming auth req http_resp = test_client.get(\"/{}/{}/request\".format(satosa_config_dict[\"BACKEND_MODULES\"][0][\"name\"], satosa_config_dict[\"FRONTEND_MODULES\"][0][\"name\"])) assert", "Client from werkzeug.wrappers import Response from satosa.proxy_server import make_app from", "redirect_url = \"https://consent.example.com/redirect\" consent_module_config[\"config\"][\"api_url\"] = api_url consent_module_config[\"config\"][\"redirect_url\"] = redirect_url satosa_config_dict[\"MICRO_SERVICES\"].append(consent_module_config)", "incoming auth req http_resp = test_client.get(\"/{}/{}/request\".format(satosa_config_dict[\"BACKEND_MODULES\"][0][\"name\"], satosa_config_dict[\"FRONTEND_MODULES\"][0][\"name\"])) assert http_resp.status_code ==", "import json import re import responses from werkzeug.test import Client", "incoming consent response http_resp = test_client.get(\"/consent/handle_consent\") assert http_resp.status_code == 200", "satosa.proxy_server import make_app from satosa.satosa_config import SATOSAConfig class TestConsent: def", "responses.RequestsMock() as rsps: # fake no previous consent consent_request_url_re =", "consent_request_url_re = re.compile(r\"{}/creq/\\w+\".format(api_url)) rsps.add(responses.GET, verify_url_re, status=401) rsps.add(responses.GET, consent_request_url_re, \"test_ticket\", status=200)", "import make_app from satosa.satosa_config import SATOSAConfig class TestConsent: def test_full_flow(self,", "= test_client.get(\"/{}/{}/request\".format(satosa_config_dict[\"BACKEND_MODULES\"][0][\"name\"], satosa_config_dict[\"FRONTEND_MODULES\"][0][\"name\"])) assert http_resp.status_code == 200 verify_url_re = re.compile(r\"{}/verify/\\w+\".format(api_url))", "assert http_resp.headers[\"Location\"].startswith(redirect_url) with responses.RequestsMock() as rsps: # fake consent rsps.add(responses.GET,", "rsps.add(responses.GET, verify_url_re, status=401) rsps.add(responses.GET, consent_request_url_re, \"test_ticket\", status=200) # incoming auth", "SATOSAConfig class TestConsent: def test_full_flow(self, satosa_config_dict, consent_module_config): api_url = \"https://consent.example.com/api\"", "302 assert http_resp.headers[\"Location\"].startswith(redirect_url) with responses.RequestsMock() as rsps: # fake consent", "\"test_ticket\", status=200) # incoming auth resp http_resp = test_client.get(\"/{}/response\".format(satosa_config_dict[\"BACKEND_MODULES\"][0][\"name\"])) assert", "= re.compile(r\"{}/verify/\\w+\".format(api_url)) with responses.RequestsMock() as rsps: # fake no previous", "auth resp http_resp = test_client.get(\"/{}/response\".format(satosa_config_dict[\"BACKEND_MODULES\"][0][\"name\"])) assert http_resp.status_code == 302 assert", "test_full_flow(self, satosa_config_dict, consent_module_config): api_url = \"https://consent.example.com/api\" redirect_url = \"https://consent.example.com/redirect\" consent_module_config[\"config\"][\"api_url\"]", "no previous consent consent_request_url_re = re.compile(r\"{}/creq/\\w+\".format(api_url)) rsps.add(responses.GET, verify_url_re, status=401) rsps.add(responses.GET,", "api_url consent_module_config[\"config\"][\"redirect_url\"] = redirect_url satosa_config_dict[\"MICRO_SERVICES\"].append(consent_module_config) # application test_client = Client(make_app(SATOSAConfig(satosa_config_dict)),", "satosa_config_dict, consent_module_config): api_url = \"https://consent.example.com/api\" redirect_url = \"https://consent.example.com/redirect\" consent_module_config[\"config\"][\"api_url\"] =", "rsps.add(responses.GET, consent_request_url_re, \"test_ticket\", status=200) # incoming auth resp http_resp =", "\"bar\"}), status=200) # incoming consent response http_resp = test_client.get(\"/consent/handle_consent\") assert", "verify_url_re, status=401) rsps.add(responses.GET, consent_request_url_re, \"test_ticket\", status=200) # incoming auth resp", "import responses from werkzeug.test import Client from werkzeug.wrappers import Response", "http_resp.status_code == 200 verify_url_re = re.compile(r\"{}/verify/\\w+\".format(api_url)) with responses.RequestsMock() as rsps:", "consent_request_url_re, \"test_ticket\", status=200) # incoming auth resp http_resp = test_client.get(\"/{}/response\".format(satosa_config_dict[\"BACKEND_MODULES\"][0][\"name\"]))", "\"https://consent.example.com/redirect\" consent_module_config[\"config\"][\"api_url\"] = api_url consent_module_config[\"config\"][\"redirect_url\"] = redirect_url satosa_config_dict[\"MICRO_SERVICES\"].append(consent_module_config) # application", "= re.compile(r\"{}/creq/\\w+\".format(api_url)) rsps.add(responses.GET, verify_url_re, status=401) rsps.add(responses.GET, consent_request_url_re, \"test_ticket\", status=200) #", "import Client from werkzeug.wrappers import Response from satosa.proxy_server import make_app", "import Response from satosa.proxy_server import make_app from satosa.satosa_config import SATOSAConfig", "status=200) # incoming consent response http_resp = test_client.get(\"/consent/handle_consent\") assert http_resp.status_code", "rsps: # fake consent rsps.add(responses.GET, verify_url_re, json.dumps({\"foo\": \"bar\"}), status=200) #", "fake no previous consent consent_request_url_re = re.compile(r\"{}/creq/\\w+\".format(api_url)) rsps.add(responses.GET, verify_url_re, status=401)", "class TestConsent: def test_full_flow(self, satosa_config_dict, consent_module_config): api_url = \"https://consent.example.com/api\" redirect_url", "resp http_resp = test_client.get(\"/{}/response\".format(satosa_config_dict[\"BACKEND_MODULES\"][0][\"name\"])) assert http_resp.status_code == 302 assert http_resp.headers[\"Location\"].startswith(redirect_url)", "consent rsps.add(responses.GET, verify_url_re, json.dumps({\"foo\": \"bar\"}), status=200) # incoming consent response", "http_resp.headers[\"Location\"].startswith(redirect_url) with responses.RequestsMock() as rsps: # fake consent rsps.add(responses.GET, verify_url_re,", "TestConsent: def test_full_flow(self, satosa_config_dict, consent_module_config): api_url = \"https://consent.example.com/api\" redirect_url =", "rsps: # fake no previous consent consent_request_url_re = re.compile(r\"{}/creq/\\w+\".format(api_url)) rsps.add(responses.GET,", "consent_module_config[\"config\"][\"redirect_url\"] = redirect_url satosa_config_dict[\"MICRO_SERVICES\"].append(consent_module_config) # application test_client = Client(make_app(SATOSAConfig(satosa_config_dict)), Response)", "req http_resp = test_client.get(\"/{}/{}/request\".format(satosa_config_dict[\"BACKEND_MODULES\"][0][\"name\"], satosa_config_dict[\"FRONTEND_MODULES\"][0][\"name\"])) assert http_resp.status_code == 200 verify_url_re", "json import re import responses from werkzeug.test import Client from", "redirect_url satosa_config_dict[\"MICRO_SERVICES\"].append(consent_module_config) # application test_client = Client(make_app(SATOSAConfig(satosa_config_dict)), Response) # incoming", "consent consent_request_url_re = re.compile(r\"{}/creq/\\w+\".format(api_url)) rsps.add(responses.GET, verify_url_re, status=401) rsps.add(responses.GET, consent_request_url_re, \"test_ticket\",", "satosa_config_dict[\"MICRO_SERVICES\"].append(consent_module_config) # application test_client = Client(make_app(SATOSAConfig(satosa_config_dict)), Response) # incoming auth", "auth req http_resp = test_client.get(\"/{}/{}/request\".format(satosa_config_dict[\"BACKEND_MODULES\"][0][\"name\"], satosa_config_dict[\"FRONTEND_MODULES\"][0][\"name\"])) assert http_resp.status_code == 200", "\"https://consent.example.com/api\" redirect_url = \"https://consent.example.com/redirect\" consent_module_config[\"config\"][\"api_url\"] = api_url consent_module_config[\"config\"][\"redirect_url\"] = redirect_url", "# incoming consent response http_resp = test_client.get(\"/consent/handle_consent\") assert http_resp.status_code ==", "status=200) # incoming auth resp http_resp = test_client.get(\"/{}/response\".format(satosa_config_dict[\"BACKEND_MODULES\"][0][\"name\"])) assert http_resp.status_code", "with responses.RequestsMock() as rsps: # fake no previous consent consent_request_url_re", "re.compile(r\"{}/verify/\\w+\".format(api_url)) with responses.RequestsMock() as rsps: # fake no previous consent", "Client(make_app(SATOSAConfig(satosa_config_dict)), Response) # incoming auth req http_resp = test_client.get(\"/{}/{}/request\".format(satosa_config_dict[\"BACKEND_MODULES\"][0][\"name\"], satosa_config_dict[\"FRONTEND_MODULES\"][0][\"name\"]))", "# fake consent rsps.add(responses.GET, verify_url_re, json.dumps({\"foo\": \"bar\"}), status=200) # incoming", "import re import responses from werkzeug.test import Client from werkzeug.wrappers", "assert http_resp.status_code == 302 assert http_resp.headers[\"Location\"].startswith(redirect_url) with responses.RequestsMock() as rsps:", "satosa_config_dict[\"FRONTEND_MODULES\"][0][\"name\"])) assert http_resp.status_code == 200 verify_url_re = re.compile(r\"{}/verify/\\w+\".format(api_url)) with responses.RequestsMock()", "== 200 verify_url_re = re.compile(r\"{}/verify/\\w+\".format(api_url)) with responses.RequestsMock() as rsps: #", "responses.RequestsMock() as rsps: # fake consent rsps.add(responses.GET, verify_url_re, json.dumps({\"foo\": \"bar\"}),", "def test_full_flow(self, satosa_config_dict, consent_module_config): api_url = \"https://consent.example.com/api\" redirect_url = \"https://consent.example.com/redirect\"", "= \"https://consent.example.com/api\" redirect_url = \"https://consent.example.com/redirect\" consent_module_config[\"config\"][\"api_url\"] = api_url consent_module_config[\"config\"][\"redirect_url\"] =", "http_resp = test_client.get(\"/{}/{}/request\".format(satosa_config_dict[\"BACKEND_MODULES\"][0][\"name\"], satosa_config_dict[\"FRONTEND_MODULES\"][0][\"name\"])) assert http_resp.status_code == 200 verify_url_re =", "rsps.add(responses.GET, verify_url_re, json.dumps({\"foo\": \"bar\"}), status=200) # incoming consent response http_resp", "# incoming auth req http_resp = test_client.get(\"/{}/{}/request\".format(satosa_config_dict[\"BACKEND_MODULES\"][0][\"name\"], satosa_config_dict[\"FRONTEND_MODULES\"][0][\"name\"])) assert http_resp.status_code", "== 302 assert http_resp.headers[\"Location\"].startswith(redirect_url) with responses.RequestsMock() as rsps: # fake", "status=401) rsps.add(responses.GET, consent_request_url_re, \"test_ticket\", status=200) # incoming auth resp http_resp", "# application test_client = Client(make_app(SATOSAConfig(satosa_config_dict)), Response) # incoming auth req", "responses from werkzeug.test import Client from werkzeug.wrappers import Response from", "= \"https://consent.example.com/redirect\" consent_module_config[\"config\"][\"api_url\"] = api_url consent_module_config[\"config\"][\"redirect_url\"] = redirect_url satosa_config_dict[\"MICRO_SERVICES\"].append(consent_module_config) #", "# fake no previous consent consent_request_url_re = re.compile(r\"{}/creq/\\w+\".format(api_url)) rsps.add(responses.GET, verify_url_re,", "previous consent consent_request_url_re = re.compile(r\"{}/creq/\\w+\".format(api_url)) rsps.add(responses.GET, verify_url_re, status=401) rsps.add(responses.GET, consent_request_url_re,", "verify_url_re = re.compile(r\"{}/verify/\\w+\".format(api_url)) with responses.RequestsMock() as rsps: # fake no", "from satosa.proxy_server import make_app from satosa.satosa_config import SATOSAConfig class TestConsent:", "from werkzeug.test import Client from werkzeug.wrappers import Response from satosa.proxy_server", "as rsps: # fake no previous consent consent_request_url_re = re.compile(r\"{}/creq/\\w+\".format(api_url))", "application test_client = Client(make_app(SATOSAConfig(satosa_config_dict)), Response) # incoming auth req http_resp", "assert http_resp.status_code == 200 verify_url_re = re.compile(r\"{}/verify/\\w+\".format(api_url)) with responses.RequestsMock() as", "as rsps: # fake consent rsps.add(responses.GET, verify_url_re, json.dumps({\"foo\": \"bar\"}), status=200)", "Response from satosa.proxy_server import make_app from satosa.satosa_config import SATOSAConfig class", "werkzeug.wrappers import Response from satosa.proxy_server import make_app from satosa.satosa_config import", "test_client.get(\"/{}/{}/request\".format(satosa_config_dict[\"BACKEND_MODULES\"][0][\"name\"], satosa_config_dict[\"FRONTEND_MODULES\"][0][\"name\"])) assert http_resp.status_code == 200 verify_url_re = re.compile(r\"{}/verify/\\w+\".format(api_url)) with", "from werkzeug.wrappers import Response from satosa.proxy_server import make_app from satosa.satosa_config", "from satosa.satosa_config import SATOSAConfig class TestConsent: def test_full_flow(self, satosa_config_dict, consent_module_config):", "satosa.satosa_config import SATOSAConfig class TestConsent: def test_full_flow(self, satosa_config_dict, consent_module_config): api_url", "import SATOSAConfig class TestConsent: def test_full_flow(self, satosa_config_dict, consent_module_config): api_url =", "re.compile(r\"{}/creq/\\w+\".format(api_url)) rsps.add(responses.GET, verify_url_re, status=401) rsps.add(responses.GET, consent_request_url_re, \"test_ticket\", status=200) # incoming", "verify_url_re, json.dumps({\"foo\": \"bar\"}), status=200) # incoming consent response http_resp =", "test_client.get(\"/{}/response\".format(satosa_config_dict[\"BACKEND_MODULES\"][0][\"name\"])) assert http_resp.status_code == 302 assert http_resp.headers[\"Location\"].startswith(redirect_url) with responses.RequestsMock() as", "# incoming auth resp http_resp = test_client.get(\"/{}/response\".format(satosa_config_dict[\"BACKEND_MODULES\"][0][\"name\"])) assert http_resp.status_code ==", "json.dumps({\"foo\": \"bar\"}), status=200) # incoming consent response http_resp = test_client.get(\"/consent/handle_consent\")", "fake consent rsps.add(responses.GET, verify_url_re, json.dumps({\"foo\": \"bar\"}), status=200) # incoming consent", "consent_module_config[\"config\"][\"api_url\"] = api_url consent_module_config[\"config\"][\"redirect_url\"] = redirect_url satosa_config_dict[\"MICRO_SERVICES\"].append(consent_module_config) # application test_client", "consent_module_config): api_url = \"https://consent.example.com/api\" redirect_url = \"https://consent.example.com/redirect\" consent_module_config[\"config\"][\"api_url\"] = api_url", "200 verify_url_re = re.compile(r\"{}/verify/\\w+\".format(api_url)) with responses.RequestsMock() as rsps: # fake", "incoming auth resp http_resp = test_client.get(\"/{}/response\".format(satosa_config_dict[\"BACKEND_MODULES\"][0][\"name\"])) assert http_resp.status_code == 302", "make_app from satosa.satosa_config import SATOSAConfig class TestConsent: def test_full_flow(self, satosa_config_dict,", "= redirect_url satosa_config_dict[\"MICRO_SERVICES\"].append(consent_module_config) # application test_client = Client(make_app(SATOSAConfig(satosa_config_dict)), Response) #" ]
[ "2.0 (the \"License\"); # you may not use this file", "= self.drop(y) mems = None if mems is None else", "= qc.Dropout(cfg.drop, **kw) def init_mems(self, b): cfg = self.cfg if", "None r_bias = None else: q_bias = nn.Parameter(torch.FloatTensor(cfg.n_heads, cfg.d_head)) r_bias", "mems is None: mems = self.init_mems(b) mlen = mems[0].size(0) if", "self.drop_attn(F.softmax(a, dim=1)) if head_m is not None: a = a", "[] for i in range(len(self.cutoffs)): if self.div_val == 1: l_idx,", "rel_shift(self, x, zero_triu=False): s = (x.size(0), 1) + x.size()[2:] y", "if mems is None else mems[i] ys = lay(y, pos,", "qc.Hypers( {\"d_head\", \"d_model\", \"drop\", \"n_heads\"}, {\"drop_attn\": 0.0, \"eps\": 1e-5, \"pre_norm\":", "logit + bias return y def forward(self, x, labels=None, keep_order=False):", "= self.cfg yo = self.get_y_opts(**kw) if x is None: x_emb", "d_proj != d_embed: self.out_projs.append(nn.Parameter(torch.FloatTensor(d_proj, d_embed))) else: self.out_projs.append(None) self.out_layers.append(qc.Linear(d_embed, s_vocab)) else:", "= qc.Stack() self.out_projs = nn.ParameterList() if div_val == 1: for", "is not None: logprob_i = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather( 1,", "None and torch.sum(mask).item(): mask = mask == 1 i =", ": self.cutoffs[0]] = head_logprob[:, : self.cutoffs[0]] else: weight_i, bias_i, proj_i", "(h**0.5) self.qkv = qc.Linear(m, 3 * n * h, bias=False)", "..prep.config.transfo_xl import PreTrained log = logging.get_logger(__name__) class Model(PreTrained): def __init__(self,", "qc.Dropout(cfg.drop, **kw) self.drop_attn = qc.Dropout(cfg.drop_attn, **kw) self.proj = qc.Linear(n *", "= self.drop(y) attns = () if yo.attn else None hiddens", "in hiddens) y = y.transpose(0, 1).contiguous() ys = (y, attns,", "\"keep_order\") and self.keep_order) or keep_order: y.index_copy_(0, indices_i, -logprob_i) else: y[offset", "+ self.cutoffs for i in range(len(cutoff_values) - 1): l_idx, r_idx", "None b, tgt = x_emb.size(0), x_emb.size(1) else: b, tgt =", "yo.attn: attns = tuple(x.permute(2, 3, 0, 1).contiguous() for x in", "n, b = s if mems is None: mems =", "xs.view(b, tgt - 1) if labels is not None else", "else x_emb.shape[:2])[0] if cfg.PAD is None: n = -1 else:", "= keep_order def _compute_logit(self, x, weight, bias, proj): if proj", "None head_m = self.get_head_m2(head_m, cfg.n_lays) for i, lay in enumerate(self.lays):", "s = (x.size(1) + 1, x.size(0)) + x.size()[2:] y =", "0: self.cluster_weight = nn.Parameter(torch.zeros(self.n_clusters, self.d_embed)) self.cluster_bias = nn.Parameter(torch.zeros(self.n_clusters)) self.out_layers =", "= qc.Linear(m, n * h, bias=False) if r_bias is None", "License for the specific language governing permissions and # limitations", "self.model.tok_emb.lays[i]) if cfg.tie_projs: for i, tie_proj in enumerate(cfg.tie_projs): if tie_proj", "h) k = k.view(klen, b, n, h) v = v.view(klen,", "hiddens = tuple(x.transpose(0, 1).contiguous() for x in hiddens) y =", "..core import utils as qu from ..core import forward as", "= self._compute_logit( x, self.out_layers[0].weight, self.out_layers[0].bias, self.out_projs[0] ) return F.log_softmax(y, dim=-1)", "Reserved. # # Licensed under the Apache License, Version 2.0", "import Positionwise from ..prep.config.transfo_xl import PreTrained log = logging.get_logger(__name__) class", "n, h = cfg.d_model, cfg.n_heads, cfg.d_head cfg.scale = 1 /", "head_proj) y = x.new_empty((head_logit.size(0), self.s_vocab)) head_logprob = F.log_softmax(head_logit, dim=1) cutoff_values", "x, self.out_layers[0].weight, self.out_layers[0].bias, self.out_projs[0] ) return F.log_softmax(y, dim=-1) else: ws,", "# if bias is not None: # logit = logit", "= qc.Linear(m, 3 * n * h, bias=False) self.r_net =", "self.qkv(self.norm(y) if cfg.pre_norm else y) r = self.r_net(r) q, k,", "class ForSeqClassifier(PreTrained): def __init__(self, **kw): super().__init__(**kw) cfg = self.get_cfg(kw) self.model", "from ..core import utils as qu from ..core import forward", "loss = xs.view(b, tgt - 1) if labels is not", "+ self.q_bias, k)) BD = self.rel_shift(torch.einsum(\"ibnd,jnd->ijbn\", (q + self.r_bias, r)))", "self.out_projs[0] head_logit = self._compute_logit(x, head_weight, head_bias, head_proj) y = x.new_empty((head_logit.size(0),", "Copyright 2022 Quantapix Authors. All Rights Reserved. # # Licensed", "indices_i, -logprob_i) else: y[offset : offset + logprob_i.size(0)].copy_(-logprob_i) offset +=", "yo.kw else ys class Projector(qc.Module): def __init__(self, s_vocab, d_embed, d_proj,", "bias return y def forward(self, x, labels=None, keep_order=False): if labels", "if proj is None: y = F.linear(x, weight, bias=bias) else:", "yo.hidden: hiddens += (y,) hiddens = tuple(x.transpose(0, 1).contiguous() for x", "= self._compute_logit(hidden_i, weight_i, bias_i, proj_i) tail_logprob_i = F.log_softmax(tail_logit_i, dim=1) cluster_prob_idx", "y.index_copy_(0, indices_i, -logprob_i) else: y[offset : offset + logprob_i.size(0)].copy_(-logprob_i) offset", "max(0, qlen) b = max(0, e - self.cfg.mem_len) with torch.no_grad():", "= (y,) + ys[1:] + (loss,) return qo.LossMems(*ys) if yo.kw", "if yo.attn else None hiddens = () if yo.hidden else", "i in range(len(self.cutoffs)): if d_proj != d_embed: self.out_projs.append(nn.Parameter(torch.FloatTensor(d_proj, d_embed))) else:", "bs[0], self.out_projs[0] head_logit = self._compute_logit(x, head_weight, head_bias, head_proj) head_logprob =", "is None: assert x_emb is not None b, tgt =", "not None: y = -F.log_softmax(y, dim=-1).gather(1, labels.unsqueeze(1)).squeeze(1) else: y =", "0 klen = mlen + n pos = torch.arange(klen -", "None: logprob_i = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather( 1, target_i[:, None]", "self.out_projs[0] ) if labels is not None: y = -F.log_softmax(y,", "y = torch.zeros_like(labels, dtype=x.dtype, device=x.device) offset = 0 cutoff_values =", "in range(len(self.proj.out_layers)): self._tie_or_clone_weights(self.proj.out_layers[i], self.model.tok_emb.lays[i]) if cfg.tie_projs: for i, tie_proj in", "x_emb.size()[:-1] else: assert x_emb is None x = x.transpose(0, 1).contiguous()", "dec_m = (torch.triu(ones, 1 + mlen) + torch.tril(ones, -shift))[:, :,", "init_mems(self, b): cfg = self.cfg if cfg.mem_len > 0: p", "OF ANY KIND, either express or implied. # See the", "3, 0, 1).contiguous() for x in attns) if yo.hidden: hiddens", "= qc.Stack() for _ in range(cfg.n_lays): self.lays.append(Layer(q_bias=q_bias, r_bias=r_bias, **kw)) self.drop", "x = F.linear(x, proj.t().contiguous()) y = F.linear(x, weight, bias=bias) #", "See the License for the specific language governing permissions and", "to in writing, software # distributed under the License is", "(labels >= l_idx) & (labels < r_idx) indices_i = mask_i.nonzero().squeeze()", "= cutoff_values[i], cutoff_values[i + 1] if i == 0: y[:,", "(y,) + ys[1:] + (loss,) return qo.LossMems(*ys) if yo.kw else", "or agreed to in writing, software # distributed under the", "cfg = self.cfg yo = self.get_y_opts(**kw) if x is None:", "tail_logprob_i = F.log_softmax(tail_logit_i, dim=1) cluster_prob_idx = self.cutoffs[0] + i -", "hiddens) y = y.transpose(0, 1).contiguous() ys = (y, attns, hiddens,", "= Projector( cfg.s_vocab, cfg.d_embed, cfg.d_model, cfg.cutoffs, div_val=cfg.div_val, **kw ) def", "**kw) self.norm = qc.LayerNorm(m, **kw) def rel_shift(self, x, zero_triu=False): s", "= self.cfg if cfg.mem_len > 0: p = next(self.parameters()) kw", "() if yo.hidden else None head_m = self.get_head_m2(head_m, cfg.n_lays) for", "= y[1:].view_as(x) if zero_triu: ones = torch.ones((y.size(0), y.size(1))) y =", "Positionwise from ..prep.config.transfo_xl import PreTrained log = logging.get_logger(__name__) class Model(PreTrained):", "d_emb_i = d_embed // (div_val**i) self.out_projs.append(nn.Parameter(torch.FloatTensor(d_proj, d_emb_i))) self.out_layers.append(qc.Linear(d_emb_i, r_idx -", "if i == 0: if labels is not None: logprob_i", "r_bias = None else: q_bias = nn.Parameter(torch.FloatTensor(cfg.n_heads, cfg.d_head)) r_bias =", "self.shortlist_size + self.n_clusters if self.n_clusters > 0: self.cluster_weight = nn.Parameter(torch.zeros(self.n_clusters,", "= self.shortlist_size + self.n_clusters if self.n_clusters > 0: self.cluster_weight =", "not None: logprob_i = head_logprob_i.gather(1, target_i[:, None]).squeeze(1) else: y[:, :", "bias_i, proj_i = ws[i], bs[i], self.out_projs[i] tail_logit_i = self._compute_logit(hidden_i, weight_i,", "compliance with the License. # You may obtain a copy", "All Rights Reserved. # # Licensed under the Apache License,", "self.model.tok_emb.projs[0] elif tie_proj and cfg.div_val != 1: if cfg.torchscript: self.proj.out_projs[i]", "= self.cutoffs[0] + i - 1 if labels is not", "y * torch.tril(ones, y.size(1) - y.size(0))[:, :, None, None] return", "= self._compute_logit(x, head_weight, head_bias, head_proj) y = x.new_empty((head_logit.size(0), self.s_vocab)) head_logprob", "elif mask.dim() == 3: a = a.float().masked_fill(mask[:, :, :, None],", "head_m=head_m[i], mems=m, yo=yo) y = ys[0] if yo.attn: attns +=", "self.ff = Positionwise(**kw) def forward(self, x, r, dec_m=None, **kw): ys", "cfg.d_head)) r_bias = nn.Parameter(torch.FloatTensor(cfg.n_heads, cfg.d_head)) self.lays = qc.Stack() for _", "self.model = Model(**kw) self.proj = qc.Linear(cfg.d_embed, cfg.n_labels, bias=False, **kw) forward", "e = mlen + max(0, qlen) b = max(0, e", "x is None: x_emb = x_emb.transpose(0, 1).contiguous() s = x_emb.size()[:-1]", "**kw) cfg = self.get_cfg(kw) m, n, h = cfg.d_model, cfg.n_heads,", "cfg.n_heads, cfg.d_head cfg.scale = 1 / (h**0.5) self.qkv = qc.Linear(m,", "dec_m=dec_m, head_m=head_m[i], mems=m, yo=yo) y = ys[0] if yo.attn: attns", "cutoff_values = [0] + self.cutoffs for i in range(len(cutoff_values) -", "not use this file except in compliance with the License.", "enumerate(self.lays): if yo.hidden: hiddens += (y,) m = None if", "if yo.hidden: hiddens += (y,) m = None if mems", "head_bias, head_proj) head_logprob = F.log_softmax(head_logit, dim=1) if labels is None:", "def __init__(self, **kw): super().__init__() self.attn = Attention(**kw) self.ff = Positionwise(**kw)", "== 0: continue target_i = labels.index_select(0, indices_i) - l_idx head_logprob_i", "you may not use this file except in compliance with", "x, r, dec_m=None, **kw): ys = self.attn(x, r, mask=dec_m, **kw)", "else: weight_i = self.out_layers[i].weight bias_i = self.out_layers[i].bias if i ==", "as qf from ..core import output as qo from ..core.embed", "is None else torch.cat([mems, x], 0) y = self.qkv(self.norm(y) if", "qo.WithMems(*ys) if yo.kw else ys class ForSeqClassifier(PreTrained): def __init__(self, **kw):", "**kw): super().__init__(**kw) cfg = self.get_cfg(kw) self.model = Model(**kw) self.proj =", "self.model.tok_emb.projs[i] def init_mems(self, bsz): return self.model.init_mems(bsz) def forward(self, x, x_emb=None,", "_compute_logit(self, x, weight, bias, proj): if proj is None: y", "is not None: x = x[..., :-1, :].contiguous() labels =", "x.size(1) ys = self.model(x, x_emb=x_emb, **kw, yo=yo) xs = self.proj(ys[0][:,", "cfg.n_lays) for i, lay in enumerate(self.lays): if yo.hidden: hiddens +=", "mask is not None and torch.sum(mask).item(): mask = mask ==", "div_val=cfg.div_val, **kw) self.pos_emb = Positional(cfg.d_model, **kw) if cfg.untie_r: q_bias =", "x.view(-1, x.size(-1)) labels = labels.view(-1) assert x.size(0) == labels.size(0) else:", "- cfg.mem_len shift = n - d if d >", "hs=[], **kw): super().__init__(ps, [self.hs] + hs, **kw) cfg = self.get_cfg(kw)", "x is not None else x_emb.shape[:2])[0] if cfg.PAD is None:", "is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES", "__init__(self, **kw): super().__init__(**kw) cfg = self.get_cfg(kw) self.tok_emb = Adaptive(cfg.cutoffs, div_val=cfg.div_val,", "= d_embed // (div_val**i) self.out_projs.append(nn.Parameter(torch.FloatTensor(d_proj, d_emb_i))) self.out_layers.append(qc.Linear(d_emb_i, r_idx - l_idx))", "**kw): super().__init__() self.attn = Attention(**kw) self.ff = Positionwise(**kw) def forward(self,", "l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i + 1] weight_i = self.out_layers[0].weight[l_idx:r_idx]", "self.cutoff_ends[i + 1] weight_i = self.out_layers[0].weight[l_idx:r_idx] bias_i = self.out_layers[0].bias[l_idx:r_idx] else:", "LLMHead(PreTrained): def __init__(self, **kw): super().__init__(**kw) cfg = self.get_cfg(kw) self.model =", "= v.view(klen, b, n, h) r = r.view(rlen, n, h)", "__init__(self, **kw): super().__init__(**kw) cfg = self.get_cfg(kw) self.model = Model(**kw) self.proj", "is None: mems = self.init_mems(b) mlen = mems[0].size(0) if mems", "None] y = self.drop(y) attns = () if yo.attn else", "= r_bias self.drop = qc.Dropout(cfg.drop, **kw) self.drop_attn = qc.Dropout(cfg.drop_attn, **kw)", "= x if mems is None else torch.cat([mems, x], 0)", "[self.hs] + hs, **kw) cfg = self.get_cfg(kw) m, n, h", "Layer(qc.Module): def __init__(self, **kw): super().__init__() self.attn = Attention(**kw) self.ff =", "(q + self.q_bias, k)) BD = self.rel_shift(torch.einsum(\"ibnd,jnd->ijbn\", (q + self.r_bias,", "x_emb is None else x_emb n, b = s if", "the License. # ============================================================================= # https://arxiv.org/abs/1901.02860 # https://github.com/kimiyoung/transformer-xl import torch", "self.get_y_opts(**kw) if x is None: x_emb = x_emb.transpose(0, 1).contiguous() s", "None hiddens = () if yo.hidden else None head_m =", "None else self.update_mems(hiddens, mems, mlen, n) if yo.attn: attns =", "if cfg.same_length: d = klen - cfg.mem_len shift = n", "Attention(qc.Module): hs = qc.Hypers( {\"d_head\", \"d_model\", \"drop\", \"n_heads\"}, {\"drop_attn\": 0.0,", "n, h) r = r.view(rlen, n, h) AC = torch.einsum(\"ibnd,jbnd->ijbn\",", "x_emb is not None b, tgt = x_emb.size(0), x_emb.size(1) else:", "labels is not None: if (hasattr(self, \"keep_order\") and self.keep_order) or", "y = F.linear(x, weight, bias=bias) else: # if CUDA_MAJOR <=", "lay(y, pos, **kw, dec_m=dec_m, head_m=head_m[i], mems=m, yo=yo) y = ys[0]", "torch from torch import nn from torch.nn import functional as", "0 self.proj = Projector( cfg.s_vocab, cfg.d_embed, cfg.d_model, cfg.cutoffs, div_val=cfg.div_val, **kw", "torch.einsum(\"ibnd,jbnd->ijbn\", (q + self.q_bias, k)) BD = self.rel_shift(torch.einsum(\"ibnd,jnd->ijbn\", (q +", "device=p.device) return [torch.zeros(cfg.mem_len, b, cfg.d_model, **kw) for _ in range(cfg.n_lays)]", "not None: if (hasattr(self, \"keep_order\") and self.keep_order) or keep_order: y.index_copy_(0,", "1) if labels is not None else None ys =", "/ (h**0.5) self.qkv = qc.Linear(m, 3 * n * h,", "for i in range(len(self.cutoffs)): l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i +", "1).contiguous() s = x.size() y = self.tok_emb(x) if x_emb is", "from ..core import forward as qf from ..core import output", "in range(len(xs))] def forward(self, x, mems=None, head_m=None, x_emb=None, **kw): cfg", "not None else None ys = (y,) + ys[1:] +", "mlen + n pos = torch.arange(klen - 1, -1, -1.0,", "v = v.view(klen, b, n, h) r = r.view(rlen, n,", "dim=1) cluster_prob_idx = self.cutoffs[0] + i - 1 if labels", "dim=-1) else: ws, bs = [], [] for i in", "= -F.log_softmax(y, dim=-1).gather(1, labels.unsqueeze(1)).squeeze(1) else: y = F.log_softmax(y, dim=-1) else:", "= a.float().masked_fill(mask[:, :, :, None], i).type_as(a) a = self.drop_attn(F.softmax(a, dim=1))", "forward(self, x, r, mask=None, mems=None, head_m=None, **kw): cfg = self.cfg", "labels is not None else None ys = (y,) +", "+ x.size()[2:] y = torch.zeros(s, device=x.device, dtype=x.dtype) y = torch.cat([y,", "ys[1:] class Attention(qc.Module): hs = qc.Hypers( {\"d_head\", \"d_model\", \"drop\", \"n_heads\"},", "return [torch.zeros(cfg.mem_len, b, cfg.d_model, **kw) for _ in range(cfg.n_lays)] return", "= torch.cat([bias_i, self.cluster_bias], dim=0) ws.append(weight_i) bs.append(bias_i) head_weight, head_bias, head_proj =", "head_logprob[:, : self.cutoffs[0]] else: weight_i, bias_i, proj_i = ws[i], bs[i],", "return qo.WithMems(*ys) if yo.kw else ys class ForSeqClassifier(PreTrained): def __init__(self,", "= Model(**kw) assert cfg.sample_softmax <= 0 self.proj = Projector( cfg.s_vocab,", "(q + self.r_bias, r))) a = AC + BD a.mul_(cfg.scale)", "x_emb.size(0), x_emb.size(1) else: b, tgt = x.size(0), x.size(1) ys =", "== 0: weight_i = torch.cat([weight_i, self.cluster_weight], dim=0) bias_i = torch.cat([bias_i,", "b = (x.shape[:2] if x is not None else x_emb.shape[:2])[0]", "self.update_mems(hiddens, mems, mlen, n) if yo.attn: attns = tuple(x.permute(2, 3,", "IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "bias=bias) # else: # logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))", "1): beg_idx, stop_idx = cutoff_values[i], cutoff_values[i + 1] if i", "x_emb.shape[:2])[0] if cfg.PAD is None: n = -1 else: assert", "= n - d if d > 0 else n", "!= d_embed: self.out_projs.append(nn.Parameter(torch.FloatTensor(d_proj, d_embed))) else: self.out_projs.append(None) self.out_layers.append(qc.Linear(d_embed, s_vocab)) else: for", "mask_i.nonzero().squeeze() if indices_i.numel() == 0: continue target_i = labels.index_select(0, indices_i)", "**kw): super().__init__(**kw) cfg = self.get_cfg(kw) self.tok_emb = Adaptive(cfg.cutoffs, div_val=cfg.div_val, **kw)", "nn.Parameter(torch.FloatTensor(cfg.n_heads, cfg.d_head)) r_bias = nn.Parameter(torch.FloatTensor(cfg.n_heads, cfg.d_head)) self.lays = qc.Stack() for", "if labels is None: y = x.new_empty((head_logit.size(0), self.s_vocab)) else: y", ": self.cutoffs[0]] else: weight_i, bias_i, proj_i = ws[i], bs[i], self.out_projs[i]", "- 1): beg_idx, stop_idx = cutoff_values[i], cutoff_values[i + 1] if", "= (x.shape[:2] if x is not None else x_emb.shape[:2])[0] if", "and cfg.div_val != 1: if cfg.torchscript: self.proj.out_projs[i] = nn.Parameter(self.model.tok_emb.projs[i].clone()) else:", "[s_vocab] self.cutoff_ends = [0] + self.cutoffs self.div_val = div_val self.shortlist_size", "file except in compliance with the License. # You may", "if i == 0: y[:, : self.cutoffs[0]] = head_logprob[:, :", "y = torch.cat([y, x], dim=1) s = (x.size(1) + 1,", "init_mems(self, bsz): return self.model.init_mems(bsz) def forward(self, x, x_emb=None, labels=None, **kw):", "cutoff_values[i + 1] if labels is not None: mask_i =", "e - self.cfg.mem_len) with torch.no_grad(): return [torch.cat([ys[i], xs[i]], dim=0)[b:e].detach() for", ") def __init__(self, r_bias=None, q_bias=None, ps={}, hs=[], **kw): super().__init__(ps, [self.hs]", "dec_m = torch.triu(ones, diagonal=1 + mlen)[:, :, None] y =", "\"drop\", \"n_heads\"}, {\"drop_attn\": 0.0, \"eps\": 1e-5, \"pre_norm\": False}, ) def", "labels.index_select(0, indices_i) - l_idx head_logprob_i = head_logprob.index_select(0, indices_i) hidden_i =", "x], dim=1) s = (x.size(1) + 1, x.size(0)) + x.size()[2:]", "head_weight, head_bias, head_proj) y = x.new_empty((head_logit.size(0), self.s_vocab)) head_logprob = F.log_softmax(head_logit,", "= self.cfg if cfg.tie_word_embeds: for i in range(len(self.proj.out_layers)): self._tie_or_clone_weights(self.proj.out_layers[i], self.model.tok_emb.lays[i])", "-1.0, device=y.device, dtype=y.dtype) if cfg.clamp_len > 0: pos.clamp_(max=cfg.clamp_len) pos =", "- d if d > 0 else n dec_m =", "for i, tie_proj in enumerate(cfg.tie_projs): if tie_proj and cfg.div_val ==", "y = self._compute_logit( x, self.out_layers[0].weight, self.out_layers[0].bias, self.out_projs[0] ) if labels", "= labels.index_select(0, indices_i) - l_idx head_logprob_i = head_logprob.index_select(0, indices_i) hidden_i", "bias=bias) else: # if CUDA_MAJOR <= 9 and CUDA_MINOR <=", "dim=1)) if head_m is not None: a = a *", "x.size() y = self.tok_emb(x) if x_emb is None else x_emb", "None else x_emb n, b = s if mems is", "KIND, either express or implied. # See the License for", "__init__(self, **kw): super().__init__(**kw) cfg = self.get_cfg(kw) self.model = Model(**kw) assert", "if bias is not None: # logit = logit +", "assert cfg.sample_softmax <= 0 self.proj = Projector( cfg.s_vocab, cfg.d_embed, cfg.d_model,", "0.0, \"eps\": 1e-5, \"pre_norm\": False}, ) def __init__(self, r_bias=None, q_bias=None,", "else: y = torch.zeros_like(labels, dtype=x.dtype, device=x.device) offset = 0 cutoff_values", "self.s_vocab = s_vocab self.d_embed = d_embed self.d_proj = d_proj self.cutoffs", "= None else: q_bias = nn.Parameter(torch.FloatTensor(cfg.n_heads, cfg.d_head)) r_bias = nn.Parameter(torch.FloatTensor(cfg.n_heads,", "if cfg.torchscript: self.proj.out_projs[i] = nn.Parameter(self.model.tok_emb.projs[0].clone()) else: self.proj.out_projs[i] = self.model.tok_emb.projs[0] elif", "assert x.size(0) == labels.size(0) else: x = x.view(-1, x.size(-1)) if", "cfg.d_model, cfg.cutoffs, div_val=cfg.div_val, **kw ) def tie_weights(self): cfg = self.cfg", "(the \"License\"); # you may not use this file except", "if self.n_clusters > 0: self.cluster_weight = nn.Parameter(torch.zeros(self.n_clusters, self.d_embed)) self.cluster_bias =", "dtype=x.dtype) y = torch.cat([y, x], dim=1) s = (x.size(1) +", "else: logprob_i = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i y[:, l_idx:r_idx]", "else: x = x.view(-1, x.size(-1)) if self.n_clusters == 0: y", "self.init_mems(b) mlen = mems[0].size(0) if mems is not None else", "cluster_prob_idx, None] + tail_logprob_i y[:, l_idx:r_idx] = logprob_i if labels", "self.cutoffs self.div_val = div_val self.shortlist_size = self.cutoffs[0] self.n_clusters = len(self.cutoffs)", "is None else mems[i] ys = lay(y, pos, **kw, dec_m=dec_m,", "1) + x.size()[2:] y = torch.zeros(s, device=x.device, dtype=x.dtype) y =", "() if yo.attn else None hiddens = () if yo.hidden", "# # Unless required by applicable law or agreed to", "self._compute_logit(x, weight_i, bias_i, proj_i) tail_logprob_i = F.log_softmax(tail_logit_i, dim=1) logprob_i =", "else q[-qlen:] b, n, h = x.size(1), cfg.n_heads, cfg.d_head q", "torch.zeros(s, device=x.device, dtype=x.dtype) y = torch.cat([y, x], dim=1) s =", "self.div_val = div_val self.shortlist_size = self.cutoffs[0] self.n_clusters = len(self.cutoffs) -", "= Adaptive(cfg.cutoffs, div_val=cfg.div_val, **kw) self.pos_emb = Positional(cfg.d_model, **kw) if cfg.untie_r:", "dim=1) cutoff_values = [0] + self.cutoffs for i in range(len(cutoff_values)", "self.head_size = self.shortlist_size + self.n_clusters if self.n_clusters > 0: self.cluster_weight", "+ self.cutoffs for i in range(len(cutoff_values) - 1): beg_idx, stop_idx", "None: n = -1 else: assert b == 1 n", "self._compute_logit(hidden_i, weight_i, bias_i, proj_i) tail_logprob_i = F.log_softmax(tail_logit_i, dim=1) cluster_prob_idx =", "* n * h, bias=False) self.r_net = qc.Linear(m, n *", "implied. # See the License for the specific language governing", "if cfg.untie_r: q_bias = None r_bias = None else: q_bias", "is None else self.update_mems(hiddens, mems, mlen, n) if yo.attn: attns", "cfg.n_labels, bias=False, **kw) forward = qf.forward_seq def post_proj(self, x): cfg", "proj is None: y = F.linear(x, weight, bias=bias) else: #", "= cfg.d_model, cfg.n_heads, cfg.d_head cfg.scale = 1 / (h**0.5) self.qkv", "1): l_idx, r_idx = cutoff_values[i], cutoff_values[i + 1] if labels", "0, 1).contiguous() for x in attns) if yo.hidden: hiddens +=", "self.tok_emb = Adaptive(cfg.cutoffs, div_val=cfg.div_val, **kw) self.pos_emb = Positional(cfg.d_model, **kw) if", "yo.hidden else None head_m = self.get_head_m2(head_m, cfg.n_lays) for i, lay", "x.size()[2:] y = torch.zeros(s, device=x.device, dtype=x.dtype) y = torch.cat([y, x],", "+ ys[1:] class Attention(qc.Module): hs = qc.Hypers( {\"d_head\", \"d_model\", \"drop\",", "n) if yo.attn: attns = tuple(x.permute(2, 3, 0, 1).contiguous() for", "None] else: dec_m = torch.triu(ones, diagonal=1 + mlen)[:, :, None]", "== len(ys) e = mlen + max(0, qlen) b =", "= nn.Parameter(torch.FloatTensor(n, h)) self.r_bias = nn.Parameter(torch.FloatTensor(n, h)) else: self.q_bias =", "y = x.new_empty((head_logit.size(0), self.s_vocab)) else: y = torch.zeros_like(labels, dtype=x.dtype, device=x.device)", "else: self.out_projs.append(None) self.out_layers.append(qc.Linear(d_embed, s_vocab)) else: for i in range(len(self.cutoffs)): l_idx,", "x = x[..., :-1, :].contiguous() labels = labels[..., 1:].contiguous() x", "self.cutoff_ends[i], self.cutoff_ends[i + 1] weight_i = self.out_layers[0].weight[l_idx:r_idx] bias_i = self.out_layers[0].bias[l_idx:r_idx]", "self.proj.out_projs[i] = self.model.tok_emb.projs[i] def init_mems(self, bsz): return self.model.init_mems(bsz) def forward(self,", "+= (y,) hiddens = tuple(x.transpose(0, 1).contiguous() for x in hiddens)", "[], [] for i in range(len(self.cutoffs)): if self.div_val == 1:", "= Positional(cfg.d_model, **kw) if cfg.untie_r: q_bias = None r_bias =", "cfg.d_head q = q.view(qlen, b, n, h) k = k.view(klen,", "not None: mask_i = (labels >= l_idx) & (labels <", "-F.log_softmax(y, dim=-1).gather(1, labels.unsqueeze(1)).squeeze(1) else: y = F.log_softmax(y, dim=-1) else: ws,", "head_m = self.get_head_m2(head_m, cfg.n_lays) for i, lay in enumerate(self.lays): if", "1] d_emb_i = d_embed // (div_val**i) self.out_projs.append(nn.Parameter(torch.FloatTensor(d_proj, d_emb_i))) self.out_layers.append(qc.Linear(d_emb_i, r_idx", "for i in range(len(cutoff_values) - 1): beg_idx, stop_idx = cutoff_values[i],", "1, -1, -1.0, device=y.device, dtype=y.dtype) if cfg.clamp_len > 0: pos.clamp_(max=cfg.clamp_len)", "yo.kw else ys class ForSeqClassifier(PreTrained): def __init__(self, **kw): super().__init__(**kw) cfg", "r_idx = self.cutoff_ends[i], self.cutoff_ends[i + 1] weight_i = self.out_layers[0].weight[l_idx:r_idx] bias_i", "License. # ============================================================================= # https://arxiv.org/abs/1901.02860 # https://github.com/kimiyoung/transformer-xl import torch from", "Unless required by applicable law or agreed to in writing,", "# logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t())) # if bias", "output as qo from ..core.embed import Adaptive, Positional from ..core.ffnet", "if cfg.pre_norm else (self.norm(y),) if yo.attn: ys += (a,) return", "b, tgt = x.size(0), x.size(1) ys = self.model(x, x_emb=x_emb, **kw,", "= torch.einsum(\"ijbn,jbnd->ibnd\", (a, v)) y = y.contiguous().view(y.size(0), y.size(1), n *", "= logit + bias return y def forward(self, x, labels=None,", "ys = self.attn(x, r, mask=dec_m, **kw) return (self.ff(ys[0]),) + ys[1:]", "the specific language governing permissions and # limitations under the", "pos.clamp_(max=cfg.clamp_len) pos = self.drop(self.pos_emb(pos)) ones = y.new_ones((n, klen), dtype=torch.uint8) if", "+ mlen)[:, :, None] y = self.drop(y) attns = ()", "weight_i, bias_i, proj_i) tail_logprob_i = F.log_softmax(tail_logit_i, dim=1) logprob_i = head_logprob[:,", "= r.view(rlen, n, h) AC = torch.einsum(\"ibnd,jbnd->ijbn\", (q + self.q_bias,", "import utils as qu from ..core import forward as qf", "= torch.triu(ones, diagonal=1 + mlen)[:, :, None] y = self.drop(y)", "n, h) AC = torch.einsum(\"ibnd,jbnd->ijbn\", (q + self.q_bias, k)) BD", "https://arxiv.org/abs/1901.02860 # https://github.com/kimiyoung/transformer-xl import torch from torch import nn from", "from ..prep.config.transfo_xl import PreTrained log = logging.get_logger(__name__) class Model(PreTrained): def", "= dict(dtype=p.dtype, device=p.device) return [torch.zeros(cfg.mem_len, b, cfg.d_model, **kw) for _", "class Projector(qc.Module): def __init__(self, s_vocab, d_embed, d_proj, cutoffs, div_val=1, keep_order=False):", "y = torch.einsum(\"ijbn,jbnd->ibnd\", (a, v)) y = y.contiguous().view(y.size(0), y.size(1), n", "is not None: if (hasattr(self, \"keep_order\") and self.keep_order) or keep_order:", "head_weight, head_bias, head_proj) head_logprob = F.log_softmax(head_logit, dim=1) if labels is", "from torch import nn from torch.nn import functional as F", "self.qkv = qc.Linear(m, 3 * n * h, bias=False) self.r_net", "yo.hidden: hiddens += (y,) m = None if mems is", "head_m y = torch.einsum(\"ijbn,jbnd->ibnd\", (a, v)) y = y.contiguous().view(y.size(0), y.size(1),", "n, h = x.size(1), cfg.n_heads, cfg.d_head q = q.view(qlen, b,", "= head_logprob.index_select(0, indices_i) hidden_i = x.index_select(0, indices_i) else: hidden_i =", "n - d if d > 0 else n dec_m", "super().__init__(**kw) cfg = self.get_cfg(kw) self.model = Model(**kw) assert cfg.sample_softmax <=", "x_emb n, b = s if mems is None: mems", "r))) a = AC + BD a.mul_(cfg.scale) if mask is", "if mems is not None else 0 klen = mlen", "None] + tail_logprob_i y[:, l_idx:r_idx] = logprob_i if labels is", "in range(len(self.cutoffs)): if self.div_val == 1: l_idx, r_idx = self.cutoff_ends[i],", "cfg = self.cfg b = (x.shape[:2] if x is not", "# if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1: x", "s if mems is None: mems = self.init_mems(b) mlen =", "d_emb_i))) self.out_layers.append(qc.Linear(d_emb_i, r_idx - l_idx)) self.keep_order = keep_order def _compute_logit(self,", "s_vocab)) else: for i in range(len(self.cutoffs)): l_idx, r_idx = self.cutoff_ends[i],", "= -1 if x is None else torch.ne(x, cfg.PAD).sum(-1) -", "-i] + tail_logprob_i y[:, beg_idx, stop_idx] = logprob_i return y", "d_embed // (div_val**i) self.out_projs.append(nn.Parameter(torch.FloatTensor(d_proj, d_emb_i))) self.out_layers.append(qc.Linear(d_emb_i, r_idx - l_idx)) self.keep_order", "= head_logprob_i.gather(1, target_i[:, None]).squeeze(1) else: y[:, : self.cutoffs[0]] = head_logprob[:,", "d_embed self.d_proj = d_proj self.cutoffs = cutoffs + [s_vocab] self.cutoff_ends", "h = x.size(1), cfg.n_heads, cfg.d_head q = q.view(qlen, b, n,", "> 0: self.cluster_weight = nn.Parameter(torch.zeros(self.n_clusters, self.d_embed)) self.cluster_bias = nn.Parameter(torch.zeros(self.n_clusters)) self.out_layers", "as qu from ..core import forward as qf from ..core", "= mems[0].size(0) if mems is not None else 0 klen", "h)) self.r_bias = nn.Parameter(torch.FloatTensor(n, h)) else: self.q_bias = q_bias self.r_bias", "s = x.size() y = self.tok_emb(x) if x_emb is None", "cfg.d_head cfg.scale = 1 / (h**0.5) self.qkv = qc.Linear(m, 3", "self.q_bias = q_bias self.r_bias = r_bias self.drop = qc.Dropout(cfg.drop, **kw)", "assert x_emb is None x = x.transpose(0, 1).contiguous() s =", "y def log_prob(self, x): if self.n_clusters == 0: y =", "if yo.kw else ys class Projector(qc.Module): def __init__(self, s_vocab, d_embed,", "zero_triu: ones = torch.ones((y.size(0), y.size(1))) y = y * torch.tril(ones,", "mlen, qlen): assert len(xs) == len(ys) e = mlen +", "self.d_proj = d_proj self.cutoffs = cutoffs + [s_vocab] self.cutoff_ends =", "= self.get_cfg(kw) self.model = Model(**kw) self.proj = qc.Linear(cfg.d_embed, cfg.n_labels, bias=False,", "= q if mems is None else q[-qlen:] b, n,", "= 0 cutoff_values = [0] + self.cutoffs for i in", "(x.size(1) + 1, x.size(0)) + x.size()[2:] y = y.view(*s) y", "qlen): assert len(xs) == len(ys) e = mlen + max(0,", "self.get_minus_inf() if mask.dim() == 2: a = a.float().masked_fill(mask[None, :, :,", "> 0 else n dec_m = (torch.triu(ones, 1 + mlen)", "y = ys[0] if yo.attn: attns += (ys[1],) y =", "mems = None if mems is None else self.update_mems(hiddens, mems,", "if head_m is not None: a = a * head_m", "= nn.Parameter(self.model.tok_emb.projs[0].clone()) else: self.proj.out_projs[i] = self.model.tok_emb.projs[0] elif tie_proj and cfg.div_val", "+ [s_vocab] self.cutoff_ends = [0] + self.cutoffs self.div_val = div_val", "functional as F from transformers.utils import logging from .. import", "self.proj = qc.Linear(n * h, m, bias=False, **kw) self.norm =", "tie_proj in enumerate(cfg.tie_projs): if tie_proj and cfg.div_val == 1 and", "def __init__(self, **kw): super().__init__(**kw) cfg = self.get_cfg(kw) self.tok_emb = Adaptive(cfg.cutoffs,", "-1 if x is None else torch.ne(x, cfg.PAD).sum(-1) - 1", "= qf.forward_seq def post_proj(self, x): cfg = self.cfg b =", "= x.view(-1, x.size(-1)) labels = labels.view(-1) assert x.size(0) == labels.size(0)", "None]).squeeze(1) else: y[:, : self.cutoffs[0]] = head_logprob[:, : self.cutoffs[0]] else:", "{\"d_head\", \"d_model\", \"drop\", \"n_heads\"}, {\"drop_attn\": 0.0, \"eps\": 1e-5, \"pre_norm\": False},", "cfg.PAD is None: n = -1 else: assert b ==", "x is None else torch.ne(x, cfg.PAD).sum(-1) - 1 return x[torch.arange(b,", "# https://github.com/kimiyoung/transformer-xl import torch from torch import nn from torch.nn", "is None or q_bias is None: self.q_bias = nn.Parameter(torch.FloatTensor(n, h))", "> 0: p = next(self.parameters()) kw = dict(dtype=p.dtype, device=p.device) return", "You may obtain a copy of the License at #", "x = x.transpose(0, 1).contiguous() s = x.size() y = self.tok_emb(x)", ").squeeze(1) else: logprob_i = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i y[:,", "def forward(self, x, r, dec_m=None, **kw): ys = self.attn(x, r,", "self.get_cfg(kw) m, n, h = cfg.d_model, cfg.n_heads, cfg.d_head cfg.scale =", "qc.Linear(m, n * h, bias=False) if r_bias is None or", "klen), dtype=torch.uint8) if cfg.same_length: d = klen - cfg.mem_len shift", "labels.unsqueeze(1)).squeeze(1) else: y = F.log_softmax(y, dim=-1) else: ws, bs =", ":, None, None] return y def forward(self, x, r, mask=None,", "r_bias=None, q_bias=None, ps={}, hs=[], **kw): super().__init__(ps, [self.hs] + hs, **kw)", "- 1 if labels is not None: logprob_i = head_logprob_i[:,", "self.out_layers.append(qc.Linear(d_emb_i, r_idx - l_idx)) self.keep_order = keep_order def _compute_logit(self, x,", "x.size(-1)) if self.n_clusters == 0: y = self._compute_logit( x, self.out_layers[0].weight,", "update_mems(self, xs, ys, mlen, qlen): assert len(xs) == len(ys) e", "attns, hiddens, mems) return qo.WithMems(*ys) if yo.kw else ys class", "= torch.chunk(a, 3, dim=-1) qlen, klen, rlen = x.size(0), k.size(0),", ".. import core as qc from ..core import utils as", "elif tie_proj and cfg.div_val != 1: if cfg.torchscript: self.proj.out_projs[i] =", "None: y = F.linear(x, weight, bias=bias) else: # if CUDA_MAJOR", "cutoff_values[i], cutoff_values[i + 1] if i == 0: y[:, :", "Positionwise(**kw) def forward(self, x, r, dec_m=None, **kw): ys = self.attn(x,", "self.out_layers[0].bias, self.out_projs[0] ) if labels is not None: y =", "= self._compute_logit(x, head_weight, head_bias, head_proj) head_logprob = F.log_softmax(head_logit, dim=1) if", "= -1 else: assert b == 1 n = -1", "**kw): cfg = self.cfg yo = self.get_y_opts(**kw) y = x", "AC + BD a.mul_(cfg.scale) if mask is not None and", "tie_weights(self): cfg = self.cfg if cfg.tie_word_embeds: for i in range(len(self.proj.out_layers)):", "1 + mlen) + torch.tril(ones, -shift))[:, :, None] else: dec_m", "forward(self, x, mems=None, head_m=None, x_emb=None, **kw): cfg = self.cfg yo", "of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "+ 1, x.size(0)) + x.size()[2:] y = y.view(*s) y =", "is not None and torch.sum(mask).item(): mask = mask == 1", "None or q_bias is None: self.q_bias = nn.Parameter(torch.FloatTensor(n, h)) self.r_bias", "torch.ones((y.size(0), y.size(1))) y = y * torch.tril(ones, y.size(1) - y.size(0))[:,", "**kw) forward = qf.forward_seq def post_proj(self, x): cfg = self.cfg", "**kw): super().__init__(**kw) cfg = self.get_cfg(kw) self.model = Model(**kw) assert cfg.sample_softmax", "mems=None, head_m=None, x_emb=None, **kw): cfg = self.cfg yo = self.get_y_opts(**kw)", "self.proj.out_projs[i] = nn.Parameter(self.model.tok_emb.projs[i].clone()) else: self.proj.out_projs[i] = self.model.tok_emb.projs[i] def init_mems(self, bsz):", "ys = (y,) + ys[1:] + (loss,) return qo.LossMems(*ys) if", "self.out_layers[0].weight[l_idx:r_idx] bias_i = self.out_layers[0].bias[l_idx:r_idx] else: weight_i = self.out_layers[i].weight bias_i =", "y = self._compute_logit( x, self.out_layers[0].weight, self.out_layers[0].bias, self.out_projs[0] ) return F.log_softmax(y,", "**kw) self.pos_emb = Positional(cfg.d_model, **kw) if cfg.untie_r: q_bias = None", "y = y.view(*s) y = y[1:].view_as(x) if zero_triu: ones =", "return qo.LossMems(*ys) if yo.kw else ys class Projector(qc.Module): def __init__(self,", "torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t())) # if bias is not None:", "= self.out_layers[0].weight[l_idx:r_idx] bias_i = self.out_layers[0].bias[l_idx:r_idx] else: weight_i = self.out_layers[i].weight bias_i", "klen = mlen + n pos = torch.arange(klen - 1,", "the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "y = self.tok_emb(x) if x_emb is None else x_emb n,", "License. # You may obtain a copy of the License", "self.cluster_weight = nn.Parameter(torch.zeros(self.n_clusters, self.d_embed)) self.cluster_bias = nn.Parameter(torch.zeros(self.n_clusters)) self.out_layers = qc.Stack()", "nn.Parameter(self.model.tok_emb.projs[i].clone()) else: self.proj.out_projs[i] = self.model.tok_emb.projs[i] def init_mems(self, bsz): return self.model.init_mems(bsz)", "# limitations under the License. # ============================================================================= # https://arxiv.org/abs/1901.02860 #", "-1 else: assert b == 1 n = -1 if", "if x is None: x_emb = x_emb.transpose(0, 1).contiguous() s =", "y.view(*s) y = y[1:].view_as(x) if zero_triu: ones = torch.ones((y.size(0), y.size(1)))", "__init__(self, r_bias=None, q_bias=None, ps={}, hs=[], **kw): super().__init__(ps, [self.hs] + hs,", "if mems is None else self.update_mems(hiddens, mems, mlen, n) if", "torch.ne(x, cfg.PAD).sum(-1) - 1 return x[torch.arange(b, device=self.device), n] class LLMHead(PreTrained):", "Quantapix Authors. All Rights Reserved. # # Licensed under the", "(loss,) return qo.LossMems(*ys) if yo.kw else ys class Projector(qc.Module): def", "+ hs, **kw) cfg = self.get_cfg(kw) m, n, h =", ":, None], i).type_as(a) a = self.drop_attn(F.softmax(a, dim=1)) if head_m is", "labels = labels[..., 1:].contiguous() x = x.view(-1, x.size(-1)) labels =", "and # limitations under the License. # ============================================================================= # https://arxiv.org/abs/1901.02860", "len(ys) e = mlen + max(0, qlen) b = max(0,", "self.shortlist_size = self.cutoffs[0] self.n_clusters = len(self.cutoffs) - 1 self.head_size =", "else: dec_m = torch.triu(ones, diagonal=1 + mlen)[:, :, None] y", "import forward as qf from ..core import output as qo", "y = self.drop(y) attns = () if yo.attn else None", "== 3: a = a.float().masked_fill(mask[:, :, :, None], i).type_as(a) a", "self.drop_attn = qc.Dropout(cfg.drop_attn, **kw) self.proj = qc.Linear(n * h, m,", "x.view(-1, x.size(-1)) if self.n_clusters == 0: y = self._compute_logit( x,", "in range(len(self.cutoffs)): if d_proj != d_embed: self.out_projs.append(nn.Parameter(torch.FloatTensor(d_proj, d_embed))) else: self.out_projs.append(None)", "div_val=cfg.div_val, **kw ) def tie_weights(self): cfg = self.cfg if cfg.tie_word_embeds:", "diagonal=1 + mlen)[:, :, None] y = self.drop(y) attns =", "3 * n * h, bias=False) self.r_net = qc.Linear(m, n", "b = max(0, e - self.cfg.mem_len) with torch.no_grad(): return [torch.cat([ys[i],", "r_idx - l_idx)) self.keep_order = keep_order def _compute_logit(self, x, weight,", "(x.size(0), 1) + x.size()[2:] y = torch.zeros(s, device=x.device, dtype=x.dtype) y", "* h, bias=False) if r_bias is None or q_bias is", "i == 0: if labels is not None: logprob_i =", "return F.log_softmax(y, dim=-1) else: ws, bs = [], [] for", "tgt - 1) if labels is not None else None", "range(len(self.cutoffs)): if self.div_val == 1: l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i", "ys[1:] + (loss,) return qo.LossMems(*ys) if yo.kw else ys class", "forward = qf.forward_seq def post_proj(self, x): cfg = self.cfg b", "0: y = self._compute_logit( x, self.out_layers[0].weight, self.out_layers[0].bias, self.out_projs[0] ) return", ": offset + logprob_i.size(0)].copy_(-logprob_i) offset += logprob_i.size(0) return y def", "Adaptive, Positional from ..core.ffnet import Positionwise from ..prep.config.transfo_xl import PreTrained", "i == 0: y[:, : self.cutoffs[0]] = head_logprob[:, : self.cutoffs[0]]", "self.proj.out_projs[i] = nn.Parameter(self.model.tok_emb.projs[0].clone()) else: self.proj.out_projs[i] = self.model.tok_emb.projs[0] elif tie_proj and", "**kw, dec_m=dec_m, head_m=head_m[i], mems=m, yo=yo) y = ys[0] if yo.attn:", "0: y = self._compute_logit( x, self.out_layers[0].weight, self.out_layers[0].bias, self.out_projs[0] ) if", "AC = torch.einsum(\"ibnd,jbnd->ijbn\", (q + self.q_bias, k)) BD = self.rel_shift(torch.einsum(\"ibnd,jnd->ijbn\",", "= self.get_cfg(kw) self.tok_emb = Adaptive(cfg.cutoffs, div_val=cfg.div_val, **kw) self.pos_emb = Positional(cfg.d_model,", "nn.Parameter(torch.FloatTensor(cfg.n_heads, cfg.d_head)) self.lays = qc.Stack() for _ in range(cfg.n_lays): self.lays.append(Layer(q_bias=q_bias,", "torch.cat([mems, x], 0) y = self.qkv(self.norm(y) if cfg.pre_norm else y)", "False}, ) def __init__(self, r_bias=None, q_bias=None, ps={}, hs=[], **kw): super().__init__(ps,", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "and CUDA_MINOR <= 1: x = F.linear(x, proj.t().contiguous()) y =", "= None if mems is None else self.update_mems(hiddens, mems, mlen,", "_ in range(cfg.n_lays)] return None def update_mems(self, xs, ys, mlen,", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "for the specific language governing permissions and # limitations under", "self.cfg.mem_len) with torch.no_grad(): return [torch.cat([ys[i], xs[i]], dim=0)[b:e].detach() for i in", "= qc.Hypers( {\"d_head\", \"d_model\", \"drop\", \"n_heads\"}, {\"drop_attn\": 0.0, \"eps\": 1e-5,", "hs = qc.Hypers( {\"d_head\", \"d_model\", \"drop\", \"n_heads\"}, {\"drop_attn\": 0.0, \"eps\":", "or keep_order: y.index_copy_(0, indices_i, -logprob_i) else: y[offset : offset +", "[0] + self.cutoffs for i in range(len(cutoff_values) - 1): beg_idx,", "1] if i == 0: y[:, : self.cutoffs[0]] = head_logprob[:,", "language governing permissions and # limitations under the License. #", "q_bias self.r_bias = r_bias self.drop = qc.Dropout(cfg.drop, **kw) self.drop_attn =", "required by applicable law or agreed to in writing, software", "BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "= x if i == 0: if labels is not", "else: y[offset : offset + logprob_i.size(0)].copy_(-logprob_i) offset += logprob_i.size(0) return", "attns = tuple(x.permute(2, 3, 0, 1).contiguous() for x in attns)", "self.cutoff_ends = [0] + self.cutoffs self.div_val = div_val self.shortlist_size =", "head_logprob.index_select(0, indices_i) hidden_i = x.index_select(0, indices_i) else: hidden_i = x", "n = -1 if x is None else torch.ne(x, cfg.PAD).sum(-1)", "self.drop(self.pos_emb(pos)) ones = y.new_ones((n, klen), dtype=torch.uint8) if cfg.same_length: d =", "= y.contiguous().view(y.size(0), y.size(1), n * h) y = x +", "= a.float().masked_fill(mask[None, :, :, None], i).type_as(a) elif mask.dim() == 3:", "return self.model.init_mems(bsz) def forward(self, x, x_emb=None, labels=None, **kw): yo =", "9 and CUDA_MINOR <= 1: x = F.linear(x, proj.t().contiguous()) y", "agreed to in writing, software # distributed under the License", "from ..core import output as qo from ..core.embed import Adaptive,", "distributed under the License is distributed on an \"AS IS\"", "= self.get_y_opts(**kw) if x is None: assert x_emb is not", "x is None: assert x_emb is not None b, tgt", "self.cutoffs for i in range(len(cutoff_values) - 1): l_idx, r_idx =", "**kw) for _ in range(cfg.n_lays)] return None def update_mems(self, xs,", "== 1 and cfg.d_model != cfg.d_embed: if cfg.torchscript: self.proj.out_projs[i] =", "d_embed: self.out_projs.append(nn.Parameter(torch.FloatTensor(d_proj, d_embed))) else: self.out_projs.append(None) self.out_layers.append(qc.Linear(d_embed, s_vocab)) else: for i", "self.cutoffs[0] + i - 1 if labels is not None:", "**kw) return (self.ff(ys[0]),) + ys[1:] class Attention(qc.Module): hs = qc.Hypers(", "cfg.d_embed, cfg.d_model, cfg.cutoffs, div_val=cfg.div_val, **kw ) def tie_weights(self): cfg =", "hiddens, mems) return qo.WithMems(*ys) if yo.kw else ys class ForSeqClassifier(PreTrained):", "* h, m, bias=False, **kw) self.norm = qc.LayerNorm(m, **kw) def", "(ys[1],) y = self.drop(y) mems = None if mems is", "if mems is None: mems = self.init_mems(b) mlen = mems[0].size(0)", "1: x = F.linear(x, proj.t().contiguous()) y = F.linear(x, weight, bias=bias)", "y = F.linear(x, weight, bias=bias) # else: # logit =", "bs = [], [] for i in range(len(self.cutoffs)): if self.div_val", "self.cfg b = (x.shape[:2] if x is not None else", "r, mask=dec_m, **kw) return (self.ff(ys[0]),) + ys[1:] class Attention(qc.Module): hs", "Projector( cfg.s_vocab, cfg.d_embed, cfg.d_model, cfg.cutoffs, div_val=cfg.div_val, **kw ) def tie_weights(self):", "= a * head_m y = torch.einsum(\"ijbn,jbnd->ibnd\", (a, v)) y", "beg_idx, stop_idx = cutoff_values[i], cutoff_values[i + 1] if i ==", "dtype=y.dtype) if cfg.clamp_len > 0: pos.clamp_(max=cfg.clamp_len) pos = self.drop(self.pos_emb(pos)) ones", "1).contiguous() s = x_emb.size()[:-1] else: assert x_emb is None x", "y.transpose(0, 1).contiguous() ys = (y, attns, hiddens, mems) return qo.WithMems(*ys)", "else: ws, bs = [], [] for i in range(len(self.cutoffs)):", "(hasattr(self, \"keep_order\") and self.keep_order) or keep_order: y.index_copy_(0, indices_i, -logprob_i) else:", "bias_i = torch.cat([bias_i, self.cluster_bias], dim=0) ws.append(weight_i) bs.append(bias_i) head_weight, head_bias, head_proj", "= x.index_select(0, indices_i) else: hidden_i = x if i ==", "keep_order def _compute_logit(self, x, weight, bias, proj): if proj is", "= next(self.parameters()) kw = dict(dtype=p.dtype, device=p.device) return [torch.zeros(cfg.mem_len, b, cfg.d_model,", "else: y = F.log_softmax(y, dim=-1) else: ws, bs = [],", "self.cluster_bias], dim=0) ws.append(weight_i) bs.append(bias_i) head_weight, head_bias, head_proj = ws[0], bs[0],", "self.q_bias, k)) BD = self.rel_shift(torch.einsum(\"ibnd,jnd->ijbn\", (q + self.r_bias, r))) a", "None else x_emb.shape[:2])[0] if cfg.PAD is None: n = -1", "== 0: if labels is not None: logprob_i = head_logprob_i.gather(1,", "self.model = Model(**kw) assert cfg.sample_softmax <= 0 self.proj = Projector(", "self.get_y_opts(**kw) if x is None: assert x_emb is not None", "range(len(self.cutoffs)): l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i + 1] d_emb_i =", "x.size(0) == labels.size(0) else: x = x.view(-1, x.size(-1)) if self.n_clusters", "OR CONDITIONS OF ANY KIND, either express or implied. #", "labels is not None: logprob_i = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather(", "log = logging.get_logger(__name__) class Model(PreTrained): def __init__(self, **kw): super().__init__(**kw) cfg", "the License is distributed on an \"AS IS\" BASIS, #", "tuple(x.permute(2, 3, 0, 1).contiguous() for x in attns) if yo.hidden:", "bias=False) self.r_net = qc.Linear(m, n * h, bias=False) if r_bias", "cfg.d_model, **kw) for _ in range(cfg.n_lays)] return None def update_mems(self,", "x if i == 0: if labels is not None:", "torch.arange(klen - 1, -1, -1.0, device=y.device, dtype=y.dtype) if cfg.clamp_len >", "cfg.div_val == 1 and cfg.d_model != cfg.d_embed: if cfg.torchscript: self.proj.out_projs[i]", "else: y[:, : self.cutoffs[0]] = head_logprob[:, : self.cutoffs[0]] else: weight_i,", "a = a * head_m y = torch.einsum(\"ijbn,jbnd->ibnd\", (a, v))", "None: y = x.new_empty((head_logit.size(0), self.s_vocab)) else: y = torch.zeros_like(labels, dtype=x.dtype,", "y[:, l_idx:r_idx] = logprob_i if labels is not None: if", "in attns) if yo.hidden: hiddens += (y,) hiddens = tuple(x.transpose(0,", "= self.cutoff_ends[i], self.cutoff_ends[i + 1] d_emb_i = d_embed // (div_val**i)", "proj_i = ws[i], bs[i], self.out_projs[i] tail_logit_i = self._compute_logit(x, weight_i, bias_i,", "= x.new_empty((head_logit.size(0), self.s_vocab)) else: y = torch.zeros_like(labels, dtype=x.dtype, device=x.device) offset", "cfg.untie_r: q_bias = None r_bias = None else: q_bias =", "s_vocab, d_embed, d_proj, cutoffs, div_val=1, keep_order=False): super().__init__() self.s_vocab = s_vocab", "law or agreed to in writing, software # distributed under", "return y def forward(self, x, labels=None, keep_order=False): if labels is", "= None if mems is None else mems[i] ys =", "if x is None else torch.ne(x, cfg.PAD).sum(-1) - 1 return", "forward as qf from ..core import output as qo from", "> 0: pos.clamp_(max=cfg.clamp_len) pos = self.drop(self.pos_emb(pos)) ones = y.new_ones((n, klen),", "r, dec_m=None, **kw): ys = self.attn(x, r, mask=dec_m, **kw) return", "cfg = self.cfg yo = self.get_y_opts(**kw) y = x if", "1 / (h**0.5) self.qkv = qc.Linear(m, 3 * n *", "self.r_bias = nn.Parameter(torch.FloatTensor(n, h)) else: self.q_bias = q_bias self.r_bias =", "def forward(self, x, mems=None, head_m=None, x_emb=None, **kw): cfg = self.cfg", "self.d_embed)) self.cluster_bias = nn.Parameter(torch.zeros(self.n_clusters)) self.out_layers = qc.Stack() self.out_projs = nn.ParameterList()", "may obtain a copy of the License at # #", "torch.nn import functional as F from transformers.utils import logging from", "ones = torch.ones((y.size(0), y.size(1))) y = y * torch.tril(ones, y.size(1)", "= torch.zeros(s, device=x.device, dtype=x.dtype) y = torch.cat([y, x], dim=1) s", "x = x.view(-1, x.size(-1)) labels = labels.view(-1) assert x.size(0) ==", "a = a.float().masked_fill(mask[None, :, :, None], i).type_as(a) elif mask.dim() ==", "1).contiguous() ys = (y, attns, hiddens, mems) return qo.WithMems(*ys) if", "= cutoffs + [s_vocab] self.cutoff_ends = [0] + self.cutoffs self.div_val", "= qc.Dropout(cfg.drop, **kw) self.drop_attn = qc.Dropout(cfg.drop_attn, **kw) self.proj = qc.Linear(n", "ys class Projector(qc.Module): def __init__(self, s_vocab, d_embed, d_proj, cutoffs, div_val=1,", "return y def forward(self, x, r, mask=None, mems=None, head_m=None, **kw):", "may not use this file except in compliance with the", "self.get_y_opts(**kw) y = x if mems is None else torch.cat([mems,", "= self.cfg yo = self.get_y_opts(**kw) y = x if mems", "keep_order=False): super().__init__() self.s_vocab = s_vocab self.d_embed = d_embed self.d_proj =", "() loss = xs.view(b, tgt - 1) if labels is", "= mlen + max(0, qlen) b = max(0, e -", "this file except in compliance with the License. # You", "l_idx:r_idx] = logprob_i if labels is not None: if (hasattr(self,", "self.out_projs[0] ) return F.log_softmax(y, dim=-1) else: ws, bs = [],", "= F.log_softmax(head_logit, dim=1) cutoff_values = [0] + self.cutoffs for i", "# # Licensed under the Apache License, Version 2.0 (the", "on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS", "h, bias=False) self.r_net = qc.Linear(m, n * h, bias=False) if", "y = self.qkv(self.norm(y) if cfg.pre_norm else y) r = self.r_net(r)", "+ tail_logprob_i y[:, l_idx:r_idx] = logprob_i if labels is not", "ones = y.new_ones((n, klen), dtype=torch.uint8) if cfg.same_length: d = klen", "class Model(PreTrained): def __init__(self, **kw): super().__init__(**kw) cfg = self.get_cfg(kw) self.tok_emb", "-1) if labels is None else () loss = xs.view(b,", "attns) if yo.hidden: hiddens += (y,) hiddens = tuple(x.transpose(0, 1).contiguous()", "import core as qc from ..core import utils as qu", "yo = self.get_y_opts(**kw) y = x if mems is None", "= mlen + n pos = torch.arange(klen - 1, -1,", "< r_idx) indices_i = mask_i.nonzero().squeeze() if indices_i.numel() == 0: continue", "v.view(klen, b, n, h) r = r.view(rlen, n, h) AC", "qlen, klen, rlen = x.size(0), k.size(0), r.size(0) q = q", "for i in range(len(self.proj.out_layers)): self._tie_or_clone_weights(self.proj.out_layers[i], self.model.tok_emb.lays[i]) if cfg.tie_projs: for i,", "self.proj(ys[0][:, -tgt:], labels) y = xs.view(b, tgt, -1) if labels", "cfg.mem_len shift = n - d if d > 0", "PreTrained log = logging.get_logger(__name__) class Model(PreTrained): def __init__(self, **kw): super().__init__(**kw)", "labels=None, keep_order=False): if labels is not None: x = x[...,", "ys[0] if yo.attn: attns += (ys[1],) y = self.drop(y) mems", "= (x.size(0), 1) + x.size()[2:] y = torch.zeros(s, device=x.device, dtype=x.dtype)", "weight_i, bias_i, proj_i = ws[i], bs[i], self.out_projs[i] tail_logit_i = self._compute_logit(x,", "y = self.drop(y) mems = None if mems is None", "limitations under the License. # ============================================================================= # https://arxiv.org/abs/1901.02860 # https://github.com/kimiyoung/transformer-xl", "logprob_i.size(0) return y def log_prob(self, x): if self.n_clusters == 0:", "class Layer(qc.Module): def __init__(self, **kw): super().__init__() self.attn = Attention(**kw) self.ff", "or implied. # See the License for the specific language", "n, h) k = k.view(klen, b, n, h) v =", "b, tgt = x_emb.size(0), x_emb.size(1) else: b, tgt = x.size(0),", "head_proj) head_logprob = F.log_softmax(head_logit, dim=1) if labels is None: y", "ys = (y, attns, hiddens, mems) return qo.WithMems(*ys) if yo.kw", "else ys class ForSeqClassifier(PreTrained): def __init__(self, **kw): super().__init__(**kw) cfg =", "torch.zeros_like(labels, dtype=x.dtype, device=x.device) offset = 0 cutoff_values = [0] +", "dict(dtype=p.dtype, device=p.device) return [torch.zeros(cfg.mem_len, b, cfg.d_model, **kw) for _ in", "hiddens += (y,) hiddens = tuple(x.transpose(0, 1).contiguous() for x in", "tgt = x_emb.size(0), x_emb.size(1) else: b, tgt = x.size(0), x.size(1)", "-tgt:], labels) y = xs.view(b, tgt, -1) if labels is", "r = self.r_net(r) q, k, v = torch.chunk(a, 3, dim=-1)", "# ============================================================================= # https://arxiv.org/abs/1901.02860 # https://github.com/kimiyoung/transformer-xl import torch from torch", "cfg.pre_norm else (self.norm(y),) if yo.attn: ys += (a,) return ys", "if x_emb is None else x_emb n, b = s", "if labels is not None: logprob_i = head_logprob_i.gather(1, target_i[:, None]).squeeze(1)", "x_emb=None, **kw): cfg = self.cfg yo = self.get_y_opts(**kw) if x", "mems[0].size(0) if mems is not None else 0 klen =", "x if mems is None else torch.cat([mems, x], 0) y", "None ys = (y,) + ys[1:] + (loss,) return qo.LossMems(*ys)", "y = y.transpose(0, 1).contiguous() ys = (y, attns, hiddens, mems)", "else ys class Projector(qc.Module): def __init__(self, s_vocab, d_embed, d_proj, cutoffs,", "proj_i) tail_logprob_i = F.log_softmax(tail_logit_i, dim=1) logprob_i = head_logprob[:, -i] +", "= () if yo.attn else None hiddens = () if", "x.new_empty((head_logit.size(0), self.s_vocab)) head_logprob = F.log_softmax(head_logit, dim=1) cutoff_values = [0] +", "self.out_projs[i] tail_logit_i = self._compute_logit(hidden_i, weight_i, bias_i, proj_i) tail_logprob_i = F.log_softmax(tail_logit_i,", "x_emb.transpose(0, 1).contiguous() s = x_emb.size()[:-1] else: assert x_emb is None", "q_bias = nn.Parameter(torch.FloatTensor(cfg.n_heads, cfg.d_head)) r_bias = nn.Parameter(torch.FloatTensor(cfg.n_heads, cfg.d_head)) self.lays =", "- 1 return x[torch.arange(b, device=self.device), n] class LLMHead(PreTrained): def __init__(self,", "= (y, attns, hiddens, mems) return qo.WithMems(*ys) if yo.kw else", "self.model.init_mems(bsz) def forward(self, x, x_emb=None, labels=None, **kw): yo = self.get_y_opts(**kw)", "if zero_triu: ones = torch.ones((y.size(0), y.size(1))) y = y *", "l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i + 1] d_emb_i = d_embed", "self.drop(y) mems = None if mems is None else self.update_mems(hiddens,", "utils as qu from ..core import forward as qf from", "= [0] + self.cutoffs for i in range(len(cutoff_values) - 1):", "0: weight_i = torch.cat([weight_i, self.cluster_weight], dim=0) bias_i = torch.cat([bias_i, self.cluster_bias],", "indices_i) else: hidden_i = x if i == 0: if", "\"pre_norm\": False}, ) def __init__(self, r_bias=None, q_bias=None, ps={}, hs=[], **kw):", "= self.get_y_opts(**kw) y = x if mems is None else", "y class Layer(qc.Module): def __init__(self, **kw): super().__init__() self.attn = Attention(**kw)", "a.float().masked_fill(mask[:, :, :, None], i).type_as(a) a = self.drop_attn(F.softmax(a, dim=1)) if", "x = x.view(-1, x.size(-1)) if self.n_clusters == 0: y =", "+ self.r_bias, r))) a = AC + BD a.mul_(cfg.scale) if", "+ BD a.mul_(cfg.scale) if mask is not None and torch.sum(mask).item():", "if mems is None else torch.cat([mems, x], 0) y =", "if yo.kw else ys class ForSeqClassifier(PreTrained): def __init__(self, **kw): super().__init__(**kw)", "mask_i = (labels >= l_idx) & (labels < r_idx) indices_i", "+= (y,) m = None if mems is None else", "qo.LossMems(*ys) if yo.kw else ys class Projector(qc.Module): def __init__(self, s_vocab,", "range(cfg.n_lays): self.lays.append(Layer(q_bias=q_bias, r_bias=r_bias, **kw)) self.drop = qc.Dropout(cfg.drop, **kw) def init_mems(self,", "= y.transpose(0, 1).contiguous() ys = (y, attns, hiddens, mems) return", "cutoffs + [s_vocab] self.cutoff_ends = [0] + self.cutoffs self.div_val =", "= x.transpose(0, 1).contiguous() s = x.size() y = self.tok_emb(x) if", "self._compute_logit(x, head_weight, head_bias, head_proj) y = x.new_empty((head_logit.size(0), self.s_vocab)) head_logprob =", "div_val == 1: for i in range(len(self.cutoffs)): if d_proj !=", "r, mask=None, mems=None, head_m=None, **kw): cfg = self.cfg yo =", "F.log_softmax(y, dim=-1) else: ws, bs = [], [] for i", "head_logprob = F.log_softmax(head_logit, dim=1) cutoff_values = [0] + self.cutoffs for", "None: y = -F.log_softmax(y, dim=-1).gather(1, labels.unsqueeze(1)).squeeze(1) else: y = F.log_softmax(y,", "1 return x[torch.arange(b, device=self.device), n] class LLMHead(PreTrained): def __init__(self, **kw):", "for x in attns) if yo.hidden: hiddens += (y,) hiddens", "- 1): l_idx, r_idx = cutoff_values[i], cutoff_values[i + 1] if", "else: weight_i, bias_i, proj_i = ws[i], bs[i], self.out_projs[i] tail_logit_i =", "nn from torch.nn import functional as F from transformers.utils import", "+ self.drop(self.proj(y)) ys = (y,) if cfg.pre_norm else (self.norm(y),) if", "= self.drop(self.pos_emb(pos)) ones = y.new_ones((n, klen), dtype=torch.uint8) if cfg.same_length: d", "range(len(xs))] def forward(self, x, mems=None, head_m=None, x_emb=None, **kw): cfg =", "= () if yo.hidden else None head_m = self.get_head_m2(head_m, cfg.n_lays)", "CUDA_MINOR <= 1: x = F.linear(x, proj.t().contiguous()) y = F.linear(x,", "1: l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i + 1] weight_i =", "self.out_layers.append(qc.Linear(d_embed, s_vocab)) else: for i in range(len(self.cutoffs)): l_idx, r_idx =", "3: a = a.float().masked_fill(mask[:, :, :, None], i).type_as(a) a =", "= AC + BD a.mul_(cfg.scale) if mask is not None", "max(0, e - self.cfg.mem_len) with torch.no_grad(): return [torch.cat([ys[i], xs[i]], dim=0)[b:e].detach()", "with torch.no_grad(): return [torch.cat([ys[i], xs[i]], dim=0)[b:e].detach() for i in range(len(xs))]", "target_i[:, None] ).squeeze(1) else: logprob_i = head_logprob[:, cluster_prob_idx, None] +", "return (self.ff(ys[0]),) + ys[1:] class Attention(qc.Module): hs = qc.Hypers( {\"d_head\",", "<= 0 self.proj = Projector( cfg.s_vocab, cfg.d_embed, cfg.d_model, cfg.cutoffs, div_val=cfg.div_val,", "0) y = self.qkv(self.norm(y) if cfg.pre_norm else y) r =", "import Adaptive, Positional from ..core.ffnet import Positionwise from ..prep.config.transfo_xl import", "self.out_layers[0].bias[l_idx:r_idx] else: weight_i = self.out_layers[i].weight bias_i = self.out_layers[i].bias if i", "bias=False) if r_bias is None or q_bias is None: self.q_bias", "qc.Stack() for _ in range(cfg.n_lays): self.lays.append(Layer(q_bias=q_bias, r_bias=r_bias, **kw)) self.drop =", "torch.no_grad(): return [torch.cat([ys[i], xs[i]], dim=0)[b:e].detach() for i in range(len(xs))] def", "= nn.Parameter(torch.zeros(self.n_clusters)) self.out_layers = qc.Stack() self.out_projs = nn.ParameterList() if div_val", "def log_prob(self, x): if self.n_clusters == 0: y = self._compute_logit(", "zero_triu=False): s = (x.size(0), 1) + x.size()[2:] y = torch.zeros(s,", "= d_proj self.cutoffs = cutoffs + [s_vocab] self.cutoff_ends = [0]", "if mask.dim() == 2: a = a.float().masked_fill(mask[None, :, :, None],", "in writing, software # distributed under the License is distributed", "= self.cfg b = (x.shape[:2] if x is not None", "torch.cat([weight_i, self.cluster_weight], dim=0) bias_i = torch.cat([bias_i, self.cluster_bias], dim=0) ws.append(weight_i) bs.append(bias_i)", "range(cfg.n_lays)] return None def update_mems(self, xs, ys, mlen, qlen): assert", "hiddens += (y,) m = None if mems is None", "else: self.proj.out_projs[i] = self.model.tok_emb.projs[i] def init_mems(self, bsz): return self.model.init_mems(bsz) def", "y.new_ones((n, klen), dtype=torch.uint8) if cfg.same_length: d = klen - cfg.mem_len", "= [0] + self.cutoffs self.div_val = div_val self.shortlist_size = self.cutoffs[0]", "x_emb.size(1) else: b, tgt = x.size(0), x.size(1) ys = self.model(x,", "= y * torch.tril(ones, y.size(1) - y.size(0))[:, :, None, None]", "self.attn = Attention(**kw) self.ff = Positionwise(**kw) def forward(self, x, r,", "= self.get_head_m2(head_m, cfg.n_lays) for i, lay in enumerate(self.lays): if yo.hidden:", "xs.view(b, tgt, -1) if labels is None else () loss", "torch.chunk(a, 3, dim=-1) qlen, klen, rlen = x.size(0), k.size(0), r.size(0)", "is None: x_emb = x_emb.transpose(0, 1).contiguous() s = x_emb.size()[:-1] else:", "and cfg.d_model != cfg.d_embed: if cfg.torchscript: self.proj.out_projs[i] = nn.Parameter(self.model.tok_emb.projs[0].clone()) else:", "x_emb is None x = x.transpose(0, 1).contiguous() s = x.size()", "-logprob_i) else: y[offset : offset + logprob_i.size(0)].copy_(-logprob_i) offset += logprob_i.size(0)", "License is distributed on an \"AS IS\" BASIS, # WITHOUT", "if cfg.PAD is None: n = -1 else: assert b", "License, Version 2.0 (the \"License\"); # you may not use", "return None def update_mems(self, xs, ys, mlen, qlen): assert len(xs)", "import logging from .. import core as qc from ..core", "n = -1 else: assert b == 1 n =", "s = x_emb.size()[:-1] else: assert x_emb is None x =", "* h) y = x + self.drop(self.proj(y)) ys = (y,)", "bias, proj): if proj is None: y = F.linear(x, weight,", "= self.out_layers[i].weight bias_i = self.out_layers[i].bias if i == 0: weight_i", "= self.cutoff_ends[i], self.cutoff_ends[i + 1] weight_i = self.out_layers[0].weight[l_idx:r_idx] bias_i =", "torch.triu(ones, diagonal=1 + mlen)[:, :, None] y = self.drop(y) attns", "bs[i], self.out_projs[i] tail_logit_i = self._compute_logit(x, weight_i, bias_i, proj_i) tail_logprob_i =", "offset = 0 cutoff_values = [0] + self.cutoffs for i", "the License for the specific language governing permissions and #", "= self.get_y_opts(**kw) if x is None: x_emb = x_emb.transpose(0, 1).contiguous()", "hidden_i = x.index_select(0, indices_i) else: hidden_i = x if i", "+ ys[1:] + (loss,) return qo.LossMems(*ys) if yo.kw else ys", "class Attention(qc.Module): hs = qc.Hypers( {\"d_head\", \"d_model\", \"drop\", \"n_heads\"}, {\"drop_attn\":", "bs[0], self.out_projs[0] head_logit = self._compute_logit(x, head_weight, head_bias, head_proj) y =", "p = next(self.parameters()) kw = dict(dtype=p.dtype, device=p.device) return [torch.zeros(cfg.mem_len, b,", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "x_emb=None, labels=None, **kw): yo = self.get_y_opts(**kw) if x is None:", "cfg = self.cfg if cfg.tie_word_embeds: for i in range(len(self.proj.out_layers)): self._tie_or_clone_weights(self.proj.out_layers[i],", "if labels is not None: logprob_i = head_logprob_i[:, cluster_prob_idx] +", "= self.proj(ys[0][:, -tgt:], labels) y = xs.view(b, tgt, -1) if", "__init__(self, **kw): super().__init__() self.attn = Attention(**kw) self.ff = Positionwise(**kw) def", "in range(cfg.n_lays)] return None def update_mems(self, xs, ys, mlen, qlen):", "i in range(len(self.cutoffs)): l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i + 1]", "b, n, h = x.size(1), cfg.n_heads, cfg.d_head q = q.view(qlen,", "weight_i = self.out_layers[i].weight bias_i = self.out_layers[i].bias if i == 0:", "[torch.cat([ys[i], xs[i]], dim=0)[b:e].detach() for i in range(len(xs))] def forward(self, x,", "labels.view(-1) assert x.size(0) == labels.size(0) else: x = x.view(-1, x.size(-1))", "a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #", "assert len(xs) == len(ys) e = mlen + max(0, qlen)", "n, h) v = v.view(klen, b, n, h) r =", "weight_i, bias_i, proj_i = ws[i], bs[i], self.out_projs[i] tail_logit_i = self._compute_logit(hidden_i,", "mems) return qo.WithMems(*ys) if yo.kw else ys class ForSeqClassifier(PreTrained): def", "self.cutoffs for i in range(len(cutoff_values) - 1): beg_idx, stop_idx =", "bsz): return self.model.init_mems(bsz) def forward(self, x, x_emb=None, labels=None, **kw): yo", "= xs.view(b, tgt - 1) if labels is not None", "ps={}, hs=[], **kw): super().__init__(ps, [self.hs] + hs, **kw) cfg =", "class LLMHead(PreTrained): def __init__(self, **kw): super().__init__(**kw) cfg = self.get_cfg(kw) self.model", "= torch.zeros_like(labels, dtype=x.dtype, device=x.device) offset = 0 cutoff_values = [0]", "None: x = x[..., :-1, :].contiguous() labels = labels[..., 1:].contiguous()", "h) r = r.view(rlen, n, h) AC = torch.einsum(\"ibnd,jbnd->ijbn\", (q", "logging.get_logger(__name__) class Model(PreTrained): def __init__(self, **kw): super().__init__(**kw) cfg = self.get_cfg(kw)", "= nn.Parameter(torch.FloatTensor(cfg.n_heads, cfg.d_head)) self.lays = qc.Stack() for _ in range(cfg.n_lays):", "device=y.device, dtype=y.dtype) if cfg.clamp_len > 0: pos.clamp_(max=cfg.clamp_len) pos = self.drop(self.pos_emb(pos))", "tail_logprob_i y[:, beg_idx, stop_idx] = logprob_i return y class Layer(qc.Module):", "head_logprob[:, cluster_prob_idx, None] + tail_logprob_i y[:, l_idx:r_idx] = logprob_i if", "== 0: y[:, : self.cutoffs[0]] = head_logprob[:, : self.cutoffs[0]] else:", "bias=False, **kw) forward = qf.forward_seq def post_proj(self, x): cfg =", "# distributed under the License is distributed on an \"AS", "# Unless required by applicable law or agreed to in", "as F from transformers.utils import logging from .. import core", "else: q_bias = nn.Parameter(torch.FloatTensor(cfg.n_heads, cfg.d_head)) r_bias = nn.Parameter(torch.FloatTensor(cfg.n_heads, cfg.d_head)) self.lays", "* torch.tril(ones, y.size(1) - y.size(0))[:, :, None, None] return y", "= ws[i], bs[i], self.out_projs[i] tail_logit_i = self._compute_logit(x, weight_i, bias_i, proj_i)", "None: mems = self.init_mems(b) mlen = mems[0].size(0) if mems is", "\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY", "if yo.attn: attns = tuple(x.permute(2, 3, 0, 1).contiguous() for x", "= nn.Parameter(torch.FloatTensor(cfg.n_heads, cfg.d_head)) r_bias = nn.Parameter(torch.FloatTensor(cfg.n_heads, cfg.d_head)) self.lays = qc.Stack()", "logprob_i = head_logprob_i.gather(1, target_i[:, None]).squeeze(1) else: y[:, : self.cutoffs[0]] =", "self.proj.out_projs[i] = self.model.tok_emb.projs[0] elif tie_proj and cfg.div_val != 1: if", "if labels is None else () loss = xs.view(b, tgt", "weight_i, bias_i, proj_i) tail_logprob_i = F.log_softmax(tail_logit_i, dim=1) cluster_prob_idx = self.cutoffs[0]", "x[torch.arange(b, device=self.device), n] class LLMHead(PreTrained): def __init__(self, **kw): super().__init__(**kw) cfg", "else: self.proj.out_projs[i] = self.model.tok_emb.projs[0] elif tie_proj and cfg.div_val != 1:", "r_bias=r_bias, **kw)) self.drop = qc.Dropout(cfg.drop, **kw) def init_mems(self, b): cfg", "logprob_i return y class Layer(qc.Module): def __init__(self, **kw): super().__init__() self.attn", "a.float().masked_fill(mask[None, :, :, None], i).type_as(a) elif mask.dim() == 3: a", "the Apache License, Version 2.0 (the \"License\"); # you may", "h) y = x + self.drop(self.proj(y)) ys = (y,) if", "== 1 i = self.get_minus_inf() if mask.dim() == 2: a", "not None b, tgt = x_emb.size(0), x_emb.size(1) else: b, tgt", "cutoff_values[i + 1] if i == 0: y[:, : self.cutoffs[0]]", "= self.model.tok_emb.projs[i] def init_mems(self, bsz): return self.model.init_mems(bsz) def forward(self, x,", "tie_proj and cfg.div_val != 1: if cfg.torchscript: self.proj.out_projs[i] = nn.Parameter(self.model.tok_emb.projs[i].clone())", "yo=yo) xs = self.proj(ys[0][:, -tgt:], labels) y = xs.view(b, tgt,", "(x.shape[:2] if x is not None else x_emb.shape[:2])[0] if cfg.PAD", "dtype=torch.uint8) if cfg.same_length: d = klen - cfg.mem_len shift =", "= x.view(-1, x.size(-1)) if self.n_clusters == 0: y = self._compute_logit(", "mask == 1 i = self.get_minus_inf() if mask.dim() == 2:", "else () loss = xs.view(b, tgt - 1) if labels", "mems, mlen, n) if yo.attn: attns = tuple(x.permute(2, 3, 0,", "b = s if mems is None: mems = self.init_mems(b)", "target_i = labels.index_select(0, indices_i) - l_idx head_logprob_i = head_logprob.index_select(0, indices_i)", "not None and torch.sum(mask).item(): mask = mask == 1 i", "if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1: x =", "!= cfg.d_embed: if cfg.torchscript: self.proj.out_projs[i] = nn.Parameter(self.model.tok_emb.projs[0].clone()) else: self.proj.out_projs[i] =", "if mask is not None and torch.sum(mask).item(): mask = mask", "..core.embed import Adaptive, Positional from ..core.ffnet import Positionwise from ..prep.config.transfo_xl", "0: y[:, : self.cutoffs[0]] = head_logprob[:, : self.cutoffs[0]] else: weight_i,", "labels is None: y = x.new_empty((head_logit.size(0), self.s_vocab)) else: y =", "qc from ..core import utils as qu from ..core import", "+ mlen) + torch.tril(ones, -shift))[:, :, None] else: dec_m =", "+ i - 1 if labels is not None: logprob_i", "= x + self.drop(self.proj(y)) ys = (y,) if cfg.pre_norm else", "dim=0) ws.append(weight_i) bs.append(bias_i) head_weight, head_bias, head_proj = ws[0], bs[0], self.out_projs[0]", "for _ in range(cfg.n_lays): self.lays.append(Layer(q_bias=q_bias, r_bias=r_bias, **kw)) self.drop = qc.Dropout(cfg.drop,", "(y,) if cfg.pre_norm else (self.norm(y),) if yo.attn: ys += (a,)", "else y) r = self.r_net(r) q, k, v = torch.chunk(a,", "k)) BD = self.rel_shift(torch.einsum(\"ibnd,jnd->ijbn\", (q + self.r_bias, r))) a =", "cfg = self.get_cfg(kw) self.model = Model(**kw) assert cfg.sample_softmax <= 0", "labels) y = xs.view(b, tgt, -1) if labels is None", "proj.t().contiguous()) y = F.linear(x, weight, bias=bias) # else: # logit", "from torch.nn import functional as F from transformers.utils import logging", "= F.linear(x, proj.t().contiguous()) y = F.linear(x, weight, bias=bias) # else:", "is None: y = x.new_empty((head_logit.size(0), self.s_vocab)) else: y = torch.zeros_like(labels,", "next(self.parameters()) kw = dict(dtype=p.dtype, device=p.device) return [torch.zeros(cfg.mem_len, b, cfg.d_model, **kw)", "-shift))[:, :, None] else: dec_m = torch.triu(ones, diagonal=1 + mlen)[:,", "if mems is None else q[-qlen:] b, n, h =", ":].contiguous() labels = labels[..., 1:].contiguous() x = x.view(-1, x.size(-1)) labels", "self.pos_emb = Positional(cfg.d_model, **kw) if cfg.untie_r: q_bias = None r_bias", "labels is not None: logprob_i = head_logprob_i.gather(1, target_i[:, None]).squeeze(1) else:", "mems=None, head_m=None, **kw): cfg = self.cfg yo = self.get_y_opts(**kw) y", "..core import forward as qf from ..core import output as", "= mask == 1 i = self.get_minus_inf() if mask.dim() ==", "1] if labels is not None: mask_i = (labels >=", "qc.Dropout(cfg.drop, **kw) def init_mems(self, b): cfg = self.cfg if cfg.mem_len", "under the License is distributed on an \"AS IS\" BASIS,", "= self.out_layers[i].bias if i == 0: weight_i = torch.cat([weight_i, self.cluster_weight],", "(y,) hiddens = tuple(x.transpose(0, 1).contiguous() for x in hiddens) y", "if d > 0 else n dec_m = (torch.triu(ones, 1", "1 and cfg.d_model != cfg.d_embed: if cfg.torchscript: self.proj.out_projs[i] = nn.Parameter(self.model.tok_emb.projs[0].clone())", "else None head_m = self.get_head_m2(head_m, cfg.n_lays) for i, lay in", "+ bias return y def forward(self, x, labels=None, keep_order=False): if", "0 cutoff_values = [0] + self.cutoffs for i in range(len(cutoff_values)", "= ys[0] if yo.attn: attns += (ys[1],) y = self.drop(y)", "bias_i = self.out_layers[0].bias[l_idx:r_idx] else: weight_i = self.out_layers[i].weight bias_i = self.out_layers[i].bias", "ws[i], bs[i], self.out_projs[i] tail_logit_i = self._compute_logit(x, weight_i, bias_i, proj_i) tail_logprob_i", "**kw) self.drop_attn = qc.Dropout(cfg.drop_attn, **kw) self.proj = qc.Linear(n * h,", "self.r_net(r) q, k, v = torch.chunk(a, 3, dim=-1) qlen, klen,", "nn.Parameter(torch.FloatTensor(n, h)) self.r_bias = nn.Parameter(torch.FloatTensor(n, h)) else: self.q_bias = q_bias", "shift = n - d if d > 0 else", "logprob_i if labels is not None: if (hasattr(self, \"keep_order\") and", "y = x.new_empty((head_logit.size(0), self.s_vocab)) head_logprob = F.log_softmax(head_logit, dim=1) cutoff_values =", "i).type_as(a) a = self.drop_attn(F.softmax(a, dim=1)) if head_m is not None:", "dtype=x.dtype, device=x.device) offset = 0 cutoff_values = [0] + self.cutoffs", "self.keep_order) or keep_order: y.index_copy_(0, indices_i, -logprob_i) else: y[offset : offset", "1).contiguous() for x in hiddens) y = y.transpose(0, 1).contiguous() ys", "else: assert b == 1 n = -1 if x", "i in range(len(self.proj.out_layers)): self._tie_or_clone_weights(self.proj.out_layers[i], self.model.tok_emb.lays[i]) if cfg.tie_projs: for i, tie_proj", "- 1 self.head_size = self.shortlist_size + self.n_clusters if self.n_clusters >", "d_proj, cutoffs, div_val=1, keep_order=False): super().__init__() self.s_vocab = s_vocab self.d_embed =", "+ logprob_i.size(0)].copy_(-logprob_i) offset += logprob_i.size(0) return y def log_prob(self, x):", "tgt, -1) if labels is None else () loss =", "= (x.size(1) + 1, x.size(0)) + x.size()[2:] y = y.view(*s)", "bs.append(bias_i) head_weight, head_bias, head_proj = ws[0], bs[0], self.out_projs[0] head_logit =", "[0] + self.cutoffs for i in range(len(cutoff_values) - 1): l_idx,", "div_val self.shortlist_size = self.cutoffs[0] self.n_clusters = len(self.cutoffs) - 1 self.head_size", "y = y * torch.tril(ones, y.size(1) - y.size(0))[:, :, None,", "if cfg.tie_word_embeds: for i in range(len(self.proj.out_layers)): self._tie_or_clone_weights(self.proj.out_layers[i], self.model.tok_emb.lays[i]) if cfg.tie_projs:", "self.norm = qc.LayerNorm(m, **kw) def rel_shift(self, x, zero_triu=False): s =", "tail_logit_i = self._compute_logit(hidden_i, weight_i, bias_i, proj_i) tail_logprob_i = F.log_softmax(tail_logit_i, dim=1)", "= torch.cat([y, x], dim=1) s = (x.size(1) + 1, x.size(0))", "(torch.triu(ones, 1 + mlen) + torch.tril(ones, -shift))[:, :, None] else:", "============================================================================= # https://arxiv.org/abs/1901.02860 # https://github.com/kimiyoung/transformer-xl import torch from torch import", "(self.ff(ys[0]),) + ys[1:] class Attention(qc.Module): hs = qc.Hypers( {\"d_head\", \"d_model\",", "x.transpose(0, 1).contiguous() s = x.size() y = self.tok_emb(x) if x_emb", "if cfg.tie_projs: for i, tie_proj in enumerate(cfg.tie_projs): if tie_proj and", "x, mems=None, head_m=None, x_emb=None, **kw): cfg = self.cfg yo =", "labels[..., 1:].contiguous() x = x.view(-1, x.size(-1)) labels = labels.view(-1) assert", "else mems[i] ys = lay(y, pos, **kw, dec_m=dec_m, head_m=head_m[i], mems=m,", "= torch.einsum(\"ibnd,jbnd->ijbn\", (q + self.q_bias, k)) BD = self.rel_shift(torch.einsum(\"ibnd,jnd->ijbn\", (q", "ys class ForSeqClassifier(PreTrained): def __init__(self, **kw): super().__init__(**kw) cfg = self.get_cfg(kw)", "head_logprob_i = head_logprob.index_select(0, indices_i) hidden_i = x.index_select(0, indices_i) else: hidden_i", "Model(PreTrained): def __init__(self, **kw): super().__init__(**kw) cfg = self.get_cfg(kw) self.tok_emb =", "F.linear(x, weight, bias=bias) # else: # logit = torch.einsum('bd,de,ev->bv', (hidden,", "ANY KIND, either express or implied. # See the License", "self.cutoff_ends[i + 1] d_emb_i = d_embed // (div_val**i) self.out_projs.append(nn.Parameter(torch.FloatTensor(d_proj, d_emb_i)))", "= logprob_i return y class Layer(qc.Module): def __init__(self, **kw): super().__init__()", "the License. # You may obtain a copy of the", "https://github.com/kimiyoung/transformer-xl import torch from torch import nn from torch.nn import", "mask=None, mems=None, head_m=None, **kw): cfg = self.cfg yo = self.get_y_opts(**kw)", "0: p = next(self.parameters()) kw = dict(dtype=p.dtype, device=p.device) return [torch.zeros(cfg.mem_len,", "(a, v)) y = y.contiguous().view(y.size(0), y.size(1), n * h) y", "F from transformers.utils import logging from .. import core as", "self.out_projs.append(nn.Parameter(torch.FloatTensor(d_proj, d_emb_i))) self.out_layers.append(qc.Linear(d_emb_i, r_idx - l_idx)) self.keep_order = keep_order def", "# See the License for the specific language governing permissions", "if div_val == 1: for i in range(len(self.cutoffs)): if d_proj", "None else torch.ne(x, cfg.PAD).sum(-1) - 1 return x[torch.arange(b, device=self.device), n]", "qf from ..core import output as qo from ..core.embed import", "pos, **kw, dec_m=dec_m, head_m=head_m[i], mems=m, yo=yo) y = ys[0] if", "ys = lay(y, pos, **kw, dec_m=dec_m, head_m=head_m[i], mems=m, yo=yo) y", "not None: # logit = logit + bias return y", "else torch.cat([mems, x], 0) y = self.qkv(self.norm(y) if cfg.pre_norm else", "h, m, bias=False, **kw) self.norm = qc.LayerNorm(m, **kw) def rel_shift(self,", "self.out_layers[0].weight, self.out_layers[0].bias, self.out_projs[0] ) if labels is not None: y", "self.proj = Projector( cfg.s_vocab, cfg.d_embed, cfg.d_model, cfg.cutoffs, div_val=cfg.div_val, **kw )", "None if mems is None else self.update_mems(hiddens, mems, mlen, n)", "m, n, h = cfg.d_model, cfg.n_heads, cfg.d_head cfg.scale = 1", "= div_val self.shortlist_size = self.cutoffs[0] self.n_clusters = len(self.cutoffs) - 1", "self.cfg if cfg.tie_word_embeds: for i in range(len(self.proj.out_layers)): self._tie_or_clone_weights(self.proj.out_layers[i], self.model.tok_emb.lays[i]) if", "mems is not None else 0 klen = mlen +", "q.view(qlen, b, n, h) k = k.view(klen, b, n, h)", "i, tie_proj in enumerate(cfg.tie_projs): if tie_proj and cfg.div_val == 1", "if yo.hidden: hiddens += (y,) hiddens = tuple(x.transpose(0, 1).contiguous() for", "proj, weight.t())) # if bias is not None: # logit", "= F.log_softmax(y, dim=-1) else: ws, bs = [], [] for", "if labels is not None: if (hasattr(self, \"keep_order\") and self.keep_order)", "hidden_i = x if i == 0: if labels is", "r_idx) indices_i = mask_i.nonzero().squeeze() if indices_i.numel() == 0: continue target_i", "self.cfg yo = self.get_y_opts(**kw) y = x if mems is", "q_bias is None: self.q_bias = nn.Parameter(torch.FloatTensor(n, h)) self.r_bias = nn.Parameter(torch.FloatTensor(n,", "r_bias is None or q_bias is None: self.q_bias = nn.Parameter(torch.FloatTensor(n,", "1, x.size(0)) + x.size()[2:] y = y.view(*s) y = y[1:].view_as(x)", "self.cutoffs[0] self.n_clusters = len(self.cutoffs) - 1 self.head_size = self.shortlist_size +", "in range(len(self.cutoffs)): l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i + 1] d_emb_i", "else: for i in range(len(self.cutoffs)): l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i", "i in range(len(self.cutoffs)): if self.div_val == 1: l_idx, r_idx =", "1: for i in range(len(self.cutoffs)): if d_proj != d_embed: self.out_projs.append(nn.Parameter(torch.FloatTensor(d_proj,", "logprob_i = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i y[:, l_idx:r_idx] =", "self.out_layers[i].bias if i == 0: weight_i = torch.cat([weight_i, self.cluster_weight], dim=0)", "None else: q_bias = nn.Parameter(torch.FloatTensor(cfg.n_heads, cfg.d_head)) r_bias = nn.Parameter(torch.FloatTensor(cfg.n_heads, cfg.d_head))", "Licensed under the Apache License, Version 2.0 (the \"License\"); #", ":, :, None], i).type_as(a) a = self.drop_attn(F.softmax(a, dim=1)) if head_m", "d > 0 else n dec_m = (torch.triu(ones, 1 +", "writing, software # distributed under the License is distributed on", "stop_idx] = logprob_i return y class Layer(qc.Module): def __init__(self, **kw):", "import nn from torch.nn import functional as F from transformers.utils", "torch.tril(ones, -shift))[:, :, None] else: dec_m = torch.triu(ones, diagonal=1 +", "Model(**kw) assert cfg.sample_softmax <= 0 self.proj = Projector( cfg.s_vocab, cfg.d_embed,", "a = a.float().masked_fill(mask[:, :, :, None], i).type_as(a) a = self.drop_attn(F.softmax(a,", "**kw) def rel_shift(self, x, zero_triu=False): s = (x.size(0), 1) +", "None: # logit = logit + bias return y def", "a = AC + BD a.mul_(cfg.scale) if mask is not", "= Model(**kw) self.proj = qc.Linear(cfg.d_embed, cfg.n_labels, bias=False, **kw) forward =", "def __init__(self, s_vocab, d_embed, d_proj, cutoffs, div_val=1, keep_order=False): super().__init__() self.s_vocab", "F.log_softmax(tail_logit_i, dim=1) cluster_prob_idx = self.cutoffs[0] + i - 1 if", "range(len(cutoff_values) - 1): l_idx, r_idx = cutoff_values[i], cutoff_values[i + 1]", "self.out_projs[0] head_logit = self._compute_logit(x, head_weight, head_bias, head_proj) head_logprob = F.log_softmax(head_logit,", "tuple(x.transpose(0, 1).contiguous() for x in hiddens) y = y.transpose(0, 1).contiguous()", "dim=-1).gather(1, labels.unsqueeze(1)).squeeze(1) else: y = F.log_softmax(y, dim=-1) else: ws, bs", "\"eps\": 1e-5, \"pre_norm\": False}, ) def __init__(self, r_bias=None, q_bias=None, ps={},", "= head_logprob[:, cluster_prob_idx, None] + tail_logprob_i y[:, l_idx:r_idx] = logprob_i", "cutoff_values[i], cutoff_values[i + 1] if labels is not None: mask_i", "dec_m=None, **kw): ys = self.attn(x, r, mask=dec_m, **kw) return (self.ff(ys[0]),)", "= torch.cat([weight_i, self.cluster_weight], dim=0) bias_i = torch.cat([bias_i, self.cluster_bias], dim=0) ws.append(weight_i)", "== 1: l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i + 1] weight_i", "tail_logit_i = self._compute_logit(x, weight_i, bias_i, proj_i) tail_logprob_i = F.log_softmax(tail_logit_i, dim=1)", "i in range(len(xs))] def forward(self, x, mems=None, head_m=None, x_emb=None, **kw):", "= self._compute_logit(x, weight_i, bias_i, proj_i) tail_logprob_i = F.log_softmax(tail_logit_i, dim=1) logprob_i", "self.n_clusters > 0: self.cluster_weight = nn.Parameter(torch.zeros(self.n_clusters, self.d_embed)) self.cluster_bias = nn.Parameter(torch.zeros(self.n_clusters))", "= self.out_layers[0].bias[l_idx:r_idx] else: weight_i = self.out_layers[i].weight bias_i = self.out_layers[i].bias if", "i).type_as(a) elif mask.dim() == 3: a = a.float().masked_fill(mask[:, :, :,", "= head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather( 1, target_i[:, None] ).squeeze(1) else:", "hiddens = () if yo.hidden else None head_m = self.get_head_m2(head_m,", "nn.Parameter(torch.zeros(self.n_clusters)) self.out_layers = qc.Stack() self.out_projs = nn.ParameterList() if div_val ==", "and self.keep_order) or keep_order: y.index_copy_(0, indices_i, -logprob_i) else: y[offset :", "**kw) if cfg.untie_r: q_bias = None r_bias = None else:", "1, target_i[:, None] ).squeeze(1) else: logprob_i = head_logprob[:, cluster_prob_idx, None]", "for i in range(len(self.cutoffs)): if d_proj != d_embed: self.out_projs.append(nn.Parameter(torch.FloatTensor(d_proj, d_embed)))", "head_logprob[:, -i] + tail_logprob_i y[:, beg_idx, stop_idx] = logprob_i return", "y = y.contiguous().view(y.size(0), y.size(1), n * h) y = x", "+ tail_logprob_i.gather( 1, target_i[:, None] ).squeeze(1) else: logprob_i = head_logprob[:,", "if cfg.pre_norm else y) r = self.r_net(r) q, k, v", "head_bias, head_proj = ws[0], bs[0], self.out_projs[0] head_logit = self._compute_logit(x, head_weight,", "& (labels < r_idx) indices_i = mask_i.nonzero().squeeze() if indices_i.numel() ==", "y = y[1:].view_as(x) if zero_triu: ones = torch.ones((y.size(0), y.size(1))) y", "== 1 n = -1 if x is None else", "and torch.sum(mask).item(): mask = mask == 1 i = self.get_minus_inf()", "self._compute_logit( x, self.out_layers[0].weight, self.out_layers[0].bias, self.out_projs[0] ) return F.log_softmax(y, dim=-1) else:", "y.contiguous().view(y.size(0), y.size(1), n * h) y = x + self.drop(self.proj(y))", "yo = self.get_y_opts(**kw) if x is None: x_emb = x_emb.transpose(0,", "return x[torch.arange(b, device=self.device), n] class LLMHead(PreTrained): def __init__(self, **kw): super().__init__(**kw)", "None else None ys = (y,) + ys[1:] + (loss,)", "self.s_vocab)) else: y = torch.zeros_like(labels, dtype=x.dtype, device=x.device) offset = 0", "x, x_emb=None, labels=None, **kw): yo = self.get_y_opts(**kw) if x is", "None else q[-qlen:] b, n, h = x.size(1), cfg.n_heads, cfg.d_head", "self.d_embed = d_embed self.d_proj = d_proj self.cutoffs = cutoffs +", "1e-5, \"pre_norm\": False}, ) def __init__(self, r_bias=None, q_bias=None, ps={}, hs=[],", "= k.view(klen, b, n, h) v = v.view(klen, b, n,", "+ x.size()[2:] y = y.view(*s) y = y[1:].view_as(x) if zero_triu:", "head_logprob_i.gather(1, target_i[:, None]).squeeze(1) else: y[:, : self.cutoffs[0]] = head_logprob[:, :", "self.cutoffs = cutoffs + [s_vocab] self.cutoff_ends = [0] + self.cutoffs", "ys = self.model(x, x_emb=x_emb, **kw, yo=yo) xs = self.proj(ys[0][:, -tgt:],", "q_bias = None r_bias = None else: q_bias = nn.Parameter(torch.FloatTensor(cfg.n_heads,", "BD = self.rel_shift(torch.einsum(\"ibnd,jnd->ijbn\", (q + self.r_bias, r))) a = AC", "in range(len(cutoff_values) - 1): l_idx, r_idx = cutoff_values[i], cutoff_values[i +", "r.size(0) q = q if mems is None else q[-qlen:]", "1: if cfg.torchscript: self.proj.out_projs[i] = nn.Parameter(self.model.tok_emb.projs[i].clone()) else: self.proj.out_projs[i] = self.model.tok_emb.projs[i]", "if self.n_clusters == 0: y = self._compute_logit( x, self.out_layers[0].weight, self.out_layers[0].bias,", "= self.get_cfg(kw) m, n, h = cfg.d_model, cfg.n_heads, cfg.d_head cfg.scale", "as qo from ..core.embed import Adaptive, Positional from ..core.ffnet import", "attns = () if yo.attn else None hiddens = ()", "mems[i] ys = lay(y, pos, **kw, dec_m=dec_m, head_m=head_m[i], mems=m, yo=yo)", "rlen = x.size(0), k.size(0), r.size(0) q = q if mems", "+ n pos = torch.arange(klen - 1, -1, -1.0, device=y.device,", "self.proj = qc.Linear(cfg.d_embed, cfg.n_labels, bias=False, **kw) forward = qf.forward_seq def", "len(xs) == len(ys) e = mlen + max(0, qlen) b", "device=x.device) offset = 0 cutoff_values = [0] + self.cutoffs for", "xs = self.proj(ys[0][:, -tgt:], labels) y = xs.view(b, tgt, -1)", "= x[..., :-1, :].contiguous() labels = labels[..., 1:].contiguous() x =", "self.keep_order = keep_order def _compute_logit(self, x, weight, bias, proj): if", "(y,) m = None if mems is None else mems[i]", "cfg = self.get_cfg(kw) self.tok_emb = Adaptive(cfg.cutoffs, div_val=cfg.div_val, **kw) self.pos_emb =", "mems is None else mems[i] ys = lay(y, pos, **kw,", "self.s_vocab)) head_logprob = F.log_softmax(head_logit, dim=1) cutoff_values = [0] + self.cutoffs", "b, n, h) v = v.view(klen, b, n, h) r", "self.out_projs.append(None) self.out_layers.append(qc.Linear(d_embed, s_vocab)) else: for i in range(len(self.cutoffs)): l_idx, r_idx", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "!= 1: if cfg.torchscript: self.proj.out_projs[i] = nn.Parameter(self.model.tok_emb.projs[i].clone()) else: self.proj.out_projs[i] =", "i in range(len(cutoff_values) - 1): l_idx, r_idx = cutoff_values[i], cutoff_values[i", "mlen + max(0, qlen) b = max(0, e - self.cfg.mem_len)", "d_embed, d_proj, cutoffs, div_val=1, keep_order=False): super().__init__() self.s_vocab = s_vocab self.d_embed", "is None x = x.transpose(0, 1).contiguous() s = x.size() y", "y = xs.view(b, tgt, -1) if labels is None else", "cluster_prob_idx = self.cutoffs[0] + i - 1 if labels is", "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "ws.append(weight_i) bs.append(bias_i) head_weight, head_bias, head_proj = ws[0], bs[0], self.out_projs[0] head_logit", "y.size(0))[:, :, None, None] return y def forward(self, x, r,", "+ 1] if labels is not None: mask_i = (labels", "[torch.zeros(cfg.mem_len, b, cfg.d_model, **kw) for _ in range(cfg.n_lays)] return None", "else n dec_m = (torch.triu(ones, 1 + mlen) + torch.tril(ones,", "**kw): cfg = self.cfg yo = self.get_y_opts(**kw) if x is", "labels = labels.view(-1) assert x.size(0) == labels.size(0) else: x =", "q if mems is None else q[-qlen:] b, n, h", "a * head_m y = torch.einsum(\"ijbn,jbnd->ibnd\", (a, v)) y =", "if labels is not None: y = -F.log_softmax(y, dim=-1).gather(1, labels.unsqueeze(1)).squeeze(1)", "(div_val**i) self.out_projs.append(nn.Parameter(torch.FloatTensor(d_proj, d_emb_i))) self.out_layers.append(qc.Linear(d_emb_i, r_idx - l_idx)) self.keep_order = keep_order", "assert b == 1 n = -1 if x is", "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "yo=yo) y = ys[0] if yo.attn: attns += (ys[1],) y", "None: a = a * head_m y = torch.einsum(\"ijbn,jbnd->ibnd\", (a,", "xs[i]], dim=0)[b:e].detach() for i in range(len(xs))] def forward(self, x, mems=None,", "else: hidden_i = x if i == 0: if labels", "keep_order=False): if labels is not None: x = x[..., :-1,", "**kw): super().__init__(ps, [self.hs] + hs, **kw) cfg = self.get_cfg(kw) m,", "device=self.device), n] class LLMHead(PreTrained): def __init__(self, **kw): super().__init__(**kw) cfg =", "device=x.device, dtype=x.dtype) y = torch.cat([y, x], dim=1) s = (x.size(1)", "attns += (ys[1],) y = self.drop(y) mems = None if", "dim=1) s = (x.size(1) + 1, x.size(0)) + x.size()[2:] y", "def forward(self, x, labels=None, keep_order=False): if labels is not None:", "y = torch.zeros(s, device=x.device, dtype=x.dtype) y = torch.cat([y, x], dim=1)", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "x, weight, bias, proj): if proj is None: y =", "if tie_proj and cfg.div_val == 1 and cfg.d_model != cfg.d_embed:", "+ self.cutoffs self.div_val = div_val self.shortlist_size = self.cutoffs[0] self.n_clusters =", "self._tie_or_clone_weights(self.proj.out_layers[i], self.model.tok_emb.lays[i]) if cfg.tie_projs: for i, tie_proj in enumerate(cfg.tie_projs): if", "mask.dim() == 2: a = a.float().masked_fill(mask[None, :, :, None], i).type_as(a)", "Rights Reserved. # # Licensed under the Apache License, Version", "bias_i, proj_i) tail_logprob_i = F.log_softmax(tail_logit_i, dim=1) logprob_i = head_logprob[:, -i]", "proj): if proj is None: y = F.linear(x, weight, bias=bias)", "s = (x.size(0), 1) + x.size()[2:] y = torch.zeros(s, device=x.device,", "specific language governing permissions and # limitations under the License.", "y = x + self.drop(self.proj(y)) ys = (y,) if cfg.pre_norm", "self.out_projs.append(nn.Parameter(torch.FloatTensor(d_proj, d_embed))) else: self.out_projs.append(None) self.out_layers.append(qc.Linear(d_embed, s_vocab)) else: for i in", "= F.log_softmax(tail_logit_i, dim=1) logprob_i = head_logprob[:, -i] + tail_logprob_i y[:,", "bias=False, **kw) self.norm = qc.LayerNorm(m, **kw) def rel_shift(self, x, zero_triu=False):", "weight_i = self.out_layers[0].weight[l_idx:r_idx] bias_i = self.out_layers[0].bias[l_idx:r_idx] else: weight_i = self.out_layers[i].weight", "for x in hiddens) y = y.transpose(0, 1).contiguous() ys =", "self.r_net = qc.Linear(m, n * h, bias=False) if r_bias is", "h)) else: self.q_bias = q_bias self.r_bias = r_bias self.drop =", "Projector(qc.Module): def __init__(self, s_vocab, d_embed, d_proj, cutoffs, div_val=1, keep_order=False): super().__init__()", "= qc.Dropout(cfg.drop_attn, **kw) self.proj = qc.Linear(n * h, m, bias=False,", "# you may not use this file except in compliance", "1] weight_i = self.out_layers[0].weight[l_idx:r_idx] bias_i = self.out_layers[0].bias[l_idx:r_idx] else: weight_i =", ") if labels is not None: y = -F.log_softmax(y, dim=-1).gather(1,", "..core.ffnet import Positionwise from ..prep.config.transfo_xl import PreTrained log = logging.get_logger(__name__)", "x): if self.n_clusters == 0: y = self._compute_logit( x, self.out_layers[0].weight,", "i, lay in enumerate(self.lays): if yo.hidden: hiddens += (y,) m", ":, None], i).type_as(a) elif mask.dim() == 3: a = a.float().masked_fill(mask[:,", "is None: y = F.linear(x, weight, bias=bias) else: # if", "def forward(self, x, r, mask=None, mems=None, head_m=None, **kw): cfg =", "self.out_layers = qc.Stack() self.out_projs = nn.ParameterList() if div_val == 1:", "in range(len(cutoff_values) - 1): beg_idx, stop_idx = cutoff_values[i], cutoff_values[i +", "# https://arxiv.org/abs/1901.02860 # https://github.com/kimiyoung/transformer-xl import torch from torch import nn", "tail_logprob_i.gather( 1, target_i[:, None] ).squeeze(1) else: logprob_i = head_logprob[:, cluster_prob_idx,", "obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0", "x], 0) y = self.qkv(self.norm(y) if cfg.pre_norm else y) r", "= len(self.cutoffs) - 1 self.head_size = self.shortlist_size + self.n_clusters if", "(y, attns, hiddens, mems) return qo.WithMems(*ys) if yo.kw else ys", "else: self.q_bias = q_bias self.r_bias = r_bias self.drop = qc.Dropout(cfg.drop,", "self.out_projs[i] tail_logit_i = self._compute_logit(x, weight_i, bias_i, proj_i) tail_logprob_i = F.log_softmax(tail_logit_i,", "**kw)) self.drop = qc.Dropout(cfg.drop, **kw) def init_mems(self, b): cfg =", "from ..core.ffnet import Positionwise from ..prep.config.transfo_xl import PreTrained log =", "under the Apache License, Version 2.0 (the \"License\"); # you", "= (y,) if cfg.pre_norm else (self.norm(y),) if yo.attn: ys +=", "def init_mems(self, b): cfg = self.cfg if cfg.mem_len > 0:", "- l_idx head_logprob_i = head_logprob.index_select(0, indices_i) hidden_i = x.index_select(0, indices_i)", "kw = dict(dtype=p.dtype, device=p.device) return [torch.zeros(cfg.mem_len, b, cfg.d_model, **kw) for", "q = q if mems is None else q[-qlen:] b,", "proj_i) tail_logprob_i = F.log_softmax(tail_logit_i, dim=1) cluster_prob_idx = self.cutoffs[0] + i", "i - 1 if labels is not None: logprob_i =", "else torch.ne(x, cfg.PAD).sum(-1) - 1 return x[torch.arange(b, device=self.device), n] class", "if x is None: assert x_emb is not None b,", "None: logprob_i = head_logprob_i.gather(1, target_i[:, None]).squeeze(1) else: y[:, : self.cutoffs[0]]", "if (hasattr(self, \"keep_order\") and self.keep_order) or keep_order: y.index_copy_(0, indices_i, -logprob_i)", "self.get_cfg(kw) self.model = Model(**kw) self.proj = qc.Linear(cfg.d_embed, cfg.n_labels, bias=False, **kw)", "x.size(0), x.size(1) ys = self.model(x, x_emb=x_emb, **kw, yo=yo) xs =", "y[1:].view_as(x) if zero_triu: ones = torch.ones((y.size(0), y.size(1))) y = y", "self.drop(self.proj(y)) ys = (y,) if cfg.pre_norm else (self.norm(y),) if yo.attn:", "= s if mems is None: mems = self.init_mems(b) mlen", "or q_bias is None: self.q_bias = nn.Parameter(torch.FloatTensor(n, h)) self.r_bias =", "3, dim=-1) qlen, klen, rlen = x.size(0), k.size(0), r.size(0) q", "2022 Quantapix Authors. All Rights Reserved. # # Licensed under", "None x = x.transpose(0, 1).contiguous() s = x.size() y =", "keep_order: y.index_copy_(0, indices_i, -logprob_i) else: y[offset : offset + logprob_i.size(0)].copy_(-logprob_i)", "is not None: y = -F.log_softmax(y, dim=-1).gather(1, labels.unsqueeze(1)).squeeze(1) else: y", "post_proj(self, x): cfg = self.cfg b = (x.shape[:2] if x", "self.get_cfg(kw) self.model = Model(**kw) assert cfg.sample_softmax <= 0 self.proj =", "-1, -1.0, device=y.device, dtype=y.dtype) if cfg.clamp_len > 0: pos.clamp_(max=cfg.clamp_len) pos", "an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF", "= s_vocab self.d_embed = d_embed self.d_proj = d_proj self.cutoffs =", "== 1: for i in range(len(self.cutoffs)): if d_proj != d_embed:", "+ self.n_clusters if self.n_clusters > 0: self.cluster_weight = nn.Parameter(torch.zeros(self.n_clusters, self.d_embed))", "# logit = logit + bias return y def forward(self,", "def init_mems(self, bsz): return self.model.init_mems(bsz) def forward(self, x, x_emb=None, labels=None,", "= nn.Parameter(self.model.tok_emb.projs[i].clone()) else: self.proj.out_projs[i] = self.model.tok_emb.projs[i] def init_mems(self, bsz): return", "dim=0)[b:e].detach() for i in range(len(xs))] def forward(self, x, mems=None, head_m=None,", "labels is not None: x = x[..., :-1, :].contiguous() labels", "= self.model.tok_emb.projs[0] elif tie_proj and cfg.div_val != 1: if cfg.torchscript:", "n] class LLMHead(PreTrained): def __init__(self, **kw): super().__init__(**kw) cfg = self.get_cfg(kw)", "**kw): yo = self.get_y_opts(**kw) if x is None: assert x_emb", "weight, bias=bias) # else: # logit = torch.einsum('bd,de,ev->bv', (hidden, proj,", "self.model(x, x_emb=x_emb, **kw, yo=yo) xs = self.proj(ys[0][:, -tgt:], labels) y", "offset += logprob_i.size(0) return y def log_prob(self, x): if self.n_clusters", "= nn.Parameter(torch.zeros(self.n_clusters, self.d_embed)) self.cluster_bias = nn.Parameter(torch.zeros(self.n_clusters)) self.out_layers = qc.Stack() self.out_projs", "= labels[..., 1:].contiguous() x = x.view(-1, x.size(-1)) labels = labels.view(-1)", "head_logprob = F.log_softmax(head_logit, dim=1) if labels is None: y =", "super().__init__(**kw) cfg = self.get_cfg(kw) self.tok_emb = Adaptive(cfg.cutoffs, div_val=cfg.div_val, **kw) self.pos_emb", "m = None if mems is None else mems[i] ys", "= labels.view(-1) assert x.size(0) == labels.size(0) else: x = x.view(-1,", "+ 1] d_emb_i = d_embed // (div_val**i) self.out_projs.append(nn.Parameter(torch.FloatTensor(d_proj, d_emb_i))) self.out_layers.append(qc.Linear(d_emb_i,", "is not None else x_emb.shape[:2])[0] if cfg.PAD is None: n", "self.cluster_weight], dim=0) bias_i = torch.cat([bias_i, self.cluster_bias], dim=0) ws.append(weight_i) bs.append(bias_i) head_weight,", "= d_embed self.d_proj = d_proj self.cutoffs = cutoffs + [s_vocab]", "torch.sum(mask).item(): mask = mask == 1 i = self.get_minus_inf() if", "self.drop(y) attns = () if yo.attn else None hiddens =", "None], i).type_as(a) elif mask.dim() == 3: a = a.float().masked_fill(mask[:, :,", "x, labels=None, keep_order=False): if labels is not None: x =", "mems=m, yo=yo) y = ys[0] if yo.attn: attns += (ys[1],)", "copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #", "Authors. All Rights Reserved. # # Licensed under the Apache", "self.out_layers[0].weight, self.out_layers[0].bias, self.out_projs[0] ) return F.log_softmax(y, dim=-1) else: ws, bs", "self.cutoffs[0]] = head_logprob[:, : self.cutoffs[0]] else: weight_i, bias_i, proj_i =", "klen, rlen = x.size(0), k.size(0), r.size(0) q = q if", "= qc.Linear(n * h, m, bias=False, **kw) self.norm = qc.LayerNorm(m,", "qc.Linear(n * h, m, bias=False, **kw) self.norm = qc.LayerNorm(m, **kw)", "cfg.tie_projs: for i, tie_proj in enumerate(cfg.tie_projs): if tie_proj and cfg.div_val", "def post_proj(self, x): cfg = self.cfg b = (x.shape[:2] if", "logit = logit + bias return y def forward(self, x,", "self.get_cfg(kw) self.tok_emb = Adaptive(cfg.cutoffs, div_val=cfg.div_val, **kw) self.pos_emb = Positional(cfg.d_model, **kw)", "self.n_clusters == 0: y = self._compute_logit( x, self.out_layers[0].weight, self.out_layers[0].bias, self.out_projs[0]", "mlen = mems[0].size(0) if mems is not None else 0", "* h, bias=False) self.r_net = qc.Linear(m, n * h, bias=False)", "Apache License, Version 2.0 (the \"License\"); # you may not", "not None else x_emb.shape[:2])[0] if cfg.PAD is None: n =", "tail_logprob_i y[:, l_idx:r_idx] = logprob_i if labels is not None:", "either express or implied. # See the License for the", "mlen)[:, :, None] y = self.drop(y) attns = () if", "= lay(y, pos, **kw, dec_m=dec_m, head_m=head_m[i], mems=m, yo=yo) y =", "= tuple(x.transpose(0, 1).contiguous() for x in hiddens) y = y.transpose(0,", "a = self.drop_attn(F.softmax(a, dim=1)) if head_m is not None: a", "= F.linear(x, weight, bias=bias) # else: # logit = torch.einsum('bd,de,ev->bv',", "not None: x = x[..., :-1, :].contiguous() labels = labels[...,", "range(len(self.cutoffs)): if d_proj != d_embed: self.out_projs.append(nn.Parameter(torch.FloatTensor(d_proj, d_embed))) else: self.out_projs.append(None) self.out_layers.append(qc.Linear(d_embed,", "cfg.sample_softmax <= 0 self.proj = Projector( cfg.s_vocab, cfg.d_embed, cfg.d_model, cfg.cutoffs,", "[0] + self.cutoffs self.div_val = div_val self.shortlist_size = self.cutoffs[0] self.n_clusters", "else: # if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1:", "+ tail_logprob_i y[:, beg_idx, stop_idx] = logprob_i return y class", "== 2: a = a.float().masked_fill(mask[None, :, :, None], i).type_as(a) elif", "dim=-1) qlen, klen, rlen = x.size(0), k.size(0), r.size(0) q =", "= self.cutoffs[0] self.n_clusters = len(self.cutoffs) - 1 self.head_size = self.shortlist_size", "x.size(1), cfg.n_heads, cfg.d_head q = q.view(qlen, b, n, h) k", "mems is None else self.update_mems(hiddens, mems, mlen, n) if yo.attn:", "yo = self.get_y_opts(**kw) if x is None: assert x_emb is", "logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t())) # if bias is", "cfg.pre_norm else y) r = self.r_net(r) q, k, v =", "super().__init__(ps, [self.hs] + hs, **kw) cfg = self.get_cfg(kw) m, n,", "F.linear(x, proj.t().contiguous()) y = F.linear(x, weight, bias=bias) # else: #", "not None: logprob_i = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather( 1, target_i[:,", "distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR", "for i in range(len(cutoff_values) - 1): l_idx, r_idx = cutoff_values[i],", "else None ys = (y,) + ys[1:] + (loss,) return", "log_prob(self, x): if self.n_clusters == 0: y = self._compute_logit( x,", "beg_idx, stop_idx] = logprob_i return y class Layer(qc.Module): def __init__(self,", "0 else n dec_m = (torch.triu(ones, 1 + mlen) +", "k = k.view(klen, b, n, h) v = v.view(klen, b,", "mlen) + torch.tril(ones, -shift))[:, :, None] else: dec_m = torch.triu(ones,", "Positional from ..core.ffnet import Positionwise from ..prep.config.transfo_xl import PreTrained log", "n dec_m = (torch.triu(ones, 1 + mlen) + torch.tril(ones, -shift))[:,", "self.tok_emb(x) if x_emb is None else x_emb n, b =", "None] ).squeeze(1) else: logprob_i = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i", "= Attention(**kw) self.ff = Positionwise(**kw) def forward(self, x, r, dec_m=None,", "self._compute_logit( x, self.out_layers[0].weight, self.out_layers[0].bias, self.out_projs[0] ) if labels is not", "a.mul_(cfg.scale) if mask is not None and torch.sum(mask).item(): mask =", "cfg.d_model, cfg.n_heads, cfg.d_head cfg.scale = 1 / (h**0.5) self.qkv =", "x + self.drop(self.proj(y)) ys = (y,) if cfg.pre_norm else (self.norm(y),)", "None else () loss = xs.view(b, tgt - 1) if", "head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather( 1, target_i[:, None] ).squeeze(1) else: logprob_i", "core as qc from ..core import utils as qu from", "q_bias=None, ps={}, hs=[], **kw): super().__init__(ps, [self.hs] + hs, **kw) cfg", "None: assert x_emb is not None b, tgt = x_emb.size(0),", "if cfg.clamp_len > 0: pos.clamp_(max=cfg.clamp_len) pos = self.drop(self.pos_emb(pos)) ones =", "v)) y = y.contiguous().view(y.size(0), y.size(1), n * h) y =", "import torch from torch import nn from torch.nn import functional", "1 n = -1 if x is None else torch.ne(x,", "d_proj self.cutoffs = cutoffs + [s_vocab] self.cutoff_ends = [0] +", "self.cfg if cfg.mem_len > 0: p = next(self.parameters()) kw =", "logging from .. import core as qc from ..core import", "if yo.hidden else None head_m = self.get_head_m2(head_m, cfg.n_lays) for i,", "from transformers.utils import logging from .. import core as qc", ">= l_idx) & (labels < r_idx) indices_i = mask_i.nonzero().squeeze() if", "cfg.div_val != 1: if cfg.torchscript: self.proj.out_projs[i] = nn.Parameter(self.model.tok_emb.projs[i].clone()) else: self.proj.out_projs[i]", "BD a.mul_(cfg.scale) if mask is not None and torch.sum(mask).item(): mask", "nn.Parameter(torch.FloatTensor(n, h)) else: self.q_bias = q_bias self.r_bias = r_bias self.drop", "cfg = self.get_cfg(kw) m, n, h = cfg.d_model, cfg.n_heads, cfg.d_head", "x, self.out_layers[0].weight, self.out_layers[0].bias, self.out_projs[0] ) if labels is not None:", "bias is not None: # logit = logit + bias", "is None: self.q_bias = nn.Parameter(torch.FloatTensor(n, h)) self.r_bias = nn.Parameter(torch.FloatTensor(n, h))", "x in hiddens) y = y.transpose(0, 1).contiguous() ys = (y,", "labels.size(0) else: x = x.view(-1, x.size(-1)) if self.n_clusters == 0:", "weight, bias=bias) else: # if CUDA_MAJOR <= 9 and CUDA_MINOR", "x.new_empty((head_logit.size(0), self.s_vocab)) else: y = torch.zeros_like(labels, dtype=x.dtype, device=x.device) offset =", ") def tie_weights(self): cfg = self.cfg if cfg.tie_word_embeds: for i", "cfg.cutoffs, div_val=cfg.div_val, **kw ) def tie_weights(self): cfg = self.cfg if", "n * h, bias=False) if r_bias is None or q_bias", "ys, mlen, qlen): assert len(xs) == len(ys) e = mlen", "cfg.tie_word_embeds: for i in range(len(self.proj.out_layers)): self._tie_or_clone_weights(self.proj.out_layers[i], self.model.tok_emb.lays[i]) if cfg.tie_projs: for", "None: if (hasattr(self, \"keep_order\") and self.keep_order) or keep_order: y.index_copy_(0, indices_i,", "* head_m y = torch.einsum(\"ijbn,jbnd->ibnd\", (a, v)) y = y.contiguous().view(y.size(0),", "r_bias self.drop = qc.Dropout(cfg.drop, **kw) self.drop_attn = qc.Dropout(cfg.drop_attn, **kw) self.proj", "labels is None else () loss = xs.view(b, tgt -", "use this file except in compliance with the License. #", "self.attn(x, r, mask=dec_m, **kw) return (self.ff(ys[0]),) + ys[1:] class Attention(qc.Module):", "mask=dec_m, **kw) return (self.ff(ys[0]),) + ys[1:] class Attention(qc.Module): hs =", "self.r_bias, r))) a = AC + BD a.mul_(cfg.scale) if mask", "None: self.q_bias = nn.Parameter(torch.FloatTensor(n, h)) self.r_bias = nn.Parameter(torch.FloatTensor(n, h)) else:", "forward(self, x, x_emb=None, labels=None, **kw): yo = self.get_y_opts(**kw) if x", "self.out_projs = nn.ParameterList() if div_val == 1: for i in", "super().__init__() self.s_vocab = s_vocab self.d_embed = d_embed self.d_proj = d_proj", "head_proj = ws[0], bs[0], self.out_projs[0] head_logit = self._compute_logit(x, head_weight, head_bias,", "in range(cfg.n_lays): self.lays.append(Layer(q_bias=q_bias, r_bias=r_bias, **kw)) self.drop = qc.Dropout(cfg.drop, **kw) def", "return y class Layer(qc.Module): def __init__(self, **kw): super().__init__() self.attn =", "is not None else 0 klen = mlen + n", "= x_emb.size()[:-1] else: assert x_emb is None x = x.transpose(0,", "cutoffs, div_val=1, keep_order=False): super().__init__() self.s_vocab = s_vocab self.d_embed = d_embed", "y def forward(self, x, labels=None, keep_order=False): if labels is not", "if labels is not None else None ys = (y,)", "is not None b, tgt = x_emb.size(0), x_emb.size(1) else: b,", "head_m=None, x_emb=None, **kw): cfg = self.cfg yo = self.get_y_opts(**kw) if", "if labels is not None: mask_i = (labels >= l_idx)", "mask = mask == 1 i = self.get_minus_inf() if mask.dim()", "cluster_prob_idx] + tail_logprob_i.gather( 1, target_i[:, None] ).squeeze(1) else: logprob_i =", "= y.view(*s) y = y[1:].view_as(x) if zero_triu: ones = torch.ones((y.size(0),", "if labels is not None: x = x[..., :-1, :].contiguous()", "transformers.utils import logging from .. import core as qc from", "yo.attn: attns += (ys[1],) y = self.drop(y) mems = None", "as qc from ..core import utils as qu from ..core", "under the License. # ============================================================================= # https://arxiv.org/abs/1901.02860 # https://github.com/kimiyoung/transformer-xl import", "in compliance with the License. # You may obtain a", "else: # logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t())) # if", "software # distributed under the License is distributed on an", "= qc.LayerNorm(m, **kw) def rel_shift(self, x, zero_triu=False): s = (x.size(0),", "torch.einsum(\"ijbn,jbnd->ibnd\", (a, v)) y = y.contiguous().view(y.size(0), y.size(1), n * h)", "= self.qkv(self.norm(y) if cfg.pre_norm else y) r = self.r_net(r) q,", "i == 0: weight_i = torch.cat([weight_i, self.cluster_weight], dim=0) bias_i =", "qc.Linear(m, 3 * n * h, bias=False) self.r_net = qc.Linear(m,", "y[:, : self.cutoffs[0]] = head_logprob[:, : self.cutoffs[0]] else: weight_i, bias_i,", "None def update_mems(self, xs, ys, mlen, qlen): assert len(xs) ==", "l_idx) & (labels < r_idx) indices_i = mask_i.nonzero().squeeze() if indices_i.numel()", "x, r, mask=None, mems=None, head_m=None, **kw): cfg = self.cfg yo", "range(len(cutoff_values) - 1): beg_idx, stop_idx = cutoff_values[i], cutoff_values[i + 1]", "l_idx head_logprob_i = head_logprob.index_select(0, indices_i) hidden_i = x.index_select(0, indices_i) else:", "qc.Dropout(cfg.drop_attn, **kw) self.proj = qc.Linear(n * h, m, bias=False, **kw)", "import PreTrained log = logging.get_logger(__name__) class Model(PreTrained): def __init__(self, **kw):", "= (labels >= l_idx) & (labels < r_idx) indices_i =", "= self.get_cfg(kw) self.model = Model(**kw) assert cfg.sample_softmax <= 0 self.proj", "r = r.view(rlen, n, h) AC = torch.einsum(\"ibnd,jbnd->ijbn\", (q +", "else: b, tgt = x.size(0), x.size(1) ys = self.model(x, x_emb=x_emb,", "= ws[i], bs[i], self.out_projs[i] tail_logit_i = self._compute_logit(hidden_i, weight_i, bias_i, proj_i)", "indices_i.numel() == 0: continue target_i = labels.index_select(0, indices_i) - l_idx", "Model(**kw) self.proj = qc.Linear(cfg.d_embed, cfg.n_labels, bias=False, **kw) forward = qf.forward_seq", "0: continue target_i = labels.index_select(0, indices_i) - l_idx head_logprob_i =", "- 1) if labels is not None else None ys", "cfg.d_embed: if cfg.torchscript: self.proj.out_projs[i] = nn.Parameter(self.model.tok_emb.projs[0].clone()) else: self.proj.out_projs[i] = self.model.tok_emb.projs[0]", "= q.view(qlen, b, n, h) k = k.view(klen, b, n,", "x_emb=x_emb, **kw, yo=yo) xs = self.proj(ys[0][:, -tgt:], labels) y =", "l_idx)) self.keep_order = keep_order def _compute_logit(self, x, weight, bias, proj):", "None], i).type_as(a) a = self.drop_attn(F.softmax(a, dim=1)) if head_m is not", "0: if labels is not None: logprob_i = head_logprob_i.gather(1, target_i[:,", "= x.size() y = self.tok_emb(x) if x_emb is None else", "= cutoff_values[i], cutoff_values[i + 1] if labels is not None:", "torch.cat([y, x], dim=1) s = (x.size(1) + 1, x.size(0)) +", "**kw): ys = self.attn(x, r, mask=dec_m, **kw) return (self.ff(ys[0]),) +", "self.get_head_m2(head_m, cfg.n_lays) for i, lay in enumerate(self.lays): if yo.hidden: hiddens", "v = torch.chunk(a, 3, dim=-1) qlen, klen, rlen = x.size(0),", "mlen, n) if yo.attn: attns = tuple(x.permute(2, 3, 0, 1).contiguous()", "= self.tok_emb(x) if x_emb is None else x_emb n, b", "h) AC = torch.einsum(\"ibnd,jbnd->ijbn\", (q + self.q_bias, k)) BD =", "not None: a = a * head_m y = torch.einsum(\"ijbn,jbnd->ibnd\",", "None else torch.cat([mems, x], 0) y = self.qkv(self.norm(y) if cfg.pre_norm", "\"n_heads\"}, {\"drop_attn\": 0.0, \"eps\": 1e-5, \"pre_norm\": False}, ) def __init__(self,", "with the License. # You may obtain a copy of", "k.view(klen, b, n, h) v = v.view(klen, b, n, h)", "d_embed))) else: self.out_projs.append(None) self.out_layers.append(qc.Linear(d_embed, s_vocab)) else: for i in range(len(self.cutoffs)):", "= klen - cfg.mem_len shift = n - d if", "qo from ..core.embed import Adaptive, Positional from ..core.ffnet import Positionwise", "else None hiddens = () if yo.hidden else None head_m", "self.cluster_bias = nn.Parameter(torch.zeros(self.n_clusters)) self.out_layers = qc.Stack() self.out_projs = nn.ParameterList() if", "if cfg.torchscript: self.proj.out_projs[i] = nn.Parameter(self.model.tok_emb.projs[i].clone()) else: self.proj.out_projs[i] = self.model.tok_emb.projs[i] def", "= torch.arange(klen - 1, -1, -1.0, device=y.device, dtype=y.dtype) if cfg.clamp_len", "not None else 0 klen = mlen + n pos", ":-1, :].contiguous() labels = labels[..., 1:].contiguous() x = x.view(-1, x.size(-1))", "b == 1 n = -1 if x is None", "self.rel_shift(torch.einsum(\"ibnd,jnd->ijbn\", (q + self.r_bias, r))) a = AC + BD", "from ..core.embed import Adaptive, Positional from ..core.ffnet import Positionwise from", "1 self.head_size = self.shortlist_size + self.n_clusters if self.n_clusters > 0:", "== 0: y = self._compute_logit( x, self.out_layers[0].weight, self.out_layers[0].bias, self.out_projs[0] )", "{\"drop_attn\": 0.0, \"eps\": 1e-5, \"pre_norm\": False}, ) def __init__(self, r_bias=None,", "len(self.cutoffs) - 1 self.head_size = self.shortlist_size + self.n_clusters if self.n_clusters", "# Copyright 2022 Quantapix Authors. All Rights Reserved. # #", "Attention(**kw) self.ff = Positionwise(**kw) def forward(self, x, r, dec_m=None, **kw):", "- self.cfg.mem_len) with torch.no_grad(): return [torch.cat([ys[i], xs[i]], dim=0)[b:e].detach() for i", "**kw) self.proj = qc.Linear(n * h, m, bias=False, **kw) self.norm", "y def forward(self, x, r, mask=None, mems=None, head_m=None, **kw): cfg", "is None else x_emb n, b = s if mems", "express or implied. # See the License for the specific", "except in compliance with the License. # You may obtain", "2: a = a.float().masked_fill(mask[None, :, :, None], i).type_as(a) elif mask.dim()", "None: x_emb = x_emb.transpose(0, 1).contiguous() s = x_emb.size()[:-1] else: assert", "range(len(self.proj.out_layers)): self._tie_or_clone_weights(self.proj.out_layers[i], self.model.tok_emb.lays[i]) if cfg.tie_projs: for i, tie_proj in enumerate(cfg.tie_projs):", "<gh_stars>0 # Copyright 2022 Quantapix Authors. All Rights Reserved. #", "head_weight, head_bias, head_proj = ws[0], bs[0], self.out_projs[0] head_logit = self._compute_logit(x,", "__init__(self, s_vocab, d_embed, d_proj, cutoffs, div_val=1, keep_order=False): super().__init__() self.s_vocab =", "# Licensed under the Apache License, Version 2.0 (the \"License\");", "self.div_val == 1: l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i + 1]", "cfg.mem_len > 0: p = next(self.parameters()) kw = dict(dtype=p.dtype, device=p.device)", "x.size()[2:] y = y.view(*s) y = y[1:].view_as(x) if zero_triu: ones", "cfg = self.cfg if cfg.mem_len > 0: p = next(self.parameters())", "x.size(0), k.size(0), r.size(0) q = q if mems is None", "CUDA_MAJOR <= 9 and CUDA_MINOR <= 1: x = F.linear(x,", "None if mems is None else mems[i] ys = lay(y,", "self.drop = qc.Dropout(cfg.drop, **kw) self.drop_attn = qc.Dropout(cfg.drop_attn, **kw) self.proj =", "qc.Stack() self.out_projs = nn.ParameterList() if div_val == 1: for i", "logprob_i = head_logprob[:, -i] + tail_logprob_i y[:, beg_idx, stop_idx] =", "super().__init__() self.attn = Attention(**kw) self.ff = Positionwise(**kw) def forward(self, x,", ":, :, None], i).type_as(a) elif mask.dim() == 3: a =", "head_m is not None: a = a * head_m y", "CONDITIONS OF ANY KIND, either express or implied. # See", "b, n, h) k = k.view(klen, b, n, h) v", "permissions and # limitations under the License. # ============================================================================= #", "is None: n = -1 else: assert b == 1", "# else: # logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t())) #", "y = F.log_softmax(y, dim=-1) else: ws, bs = [], []", "for i, lay in enumerate(self.lays): if yo.hidden: hiddens += (y,)", "d if d > 0 else n dec_m = (torch.triu(ones,", "labels is not None: mask_i = (labels >= l_idx) &", "is not None: a = a * head_m y =", "+= logprob_i.size(0) return y def log_prob(self, x): if self.n_clusters ==", "= xs.view(b, tgt, -1) if labels is None else ()", "self.lays.append(Layer(q_bias=q_bias, r_bias=r_bias, **kw)) self.drop = qc.Dropout(cfg.drop, **kw) def init_mems(self, b):", "qlen) b = max(0, e - self.cfg.mem_len) with torch.no_grad(): return", "q[-qlen:] b, n, h = x.size(1), cfg.n_heads, cfg.d_head q =", "= self.attn(x, r, mask=dec_m, **kw) return (self.ff(ys[0]),) + ys[1:] class", "x[..., :-1, :].contiguous() labels = labels[..., 1:].contiguous() x = x.view(-1,", "r_bias = nn.Parameter(torch.FloatTensor(cfg.n_heads, cfg.d_head)) self.lays = qc.Stack() for _ in", "self.cutoff_ends[i], self.cutoff_ends[i + 1] d_emb_i = d_embed // (div_val**i) self.out_projs.append(nn.Parameter(torch.FloatTensor(d_proj,", "b, cfg.d_model, **kw) for _ in range(cfg.n_lays)] return None def", "= self.get_minus_inf() if mask.dim() == 2: a = a.float().masked_fill(mask[None, :,", "def update_mems(self, xs, ys, mlen, qlen): assert len(xs) == len(ys)", "= self.model(x, x_emb=x_emb, **kw, yo=yo) xs = self.proj(ys[0][:, -tgt:], labels)", "return y def log_prob(self, x): if self.n_clusters == 0: y", "offset + logprob_i.size(0)].copy_(-logprob_i) offset += logprob_i.size(0) return y def log_prob(self,", "stop_idx = cutoff_values[i], cutoff_values[i + 1] if i == 0:", "cfg.d_head)) self.lays = qc.Stack() for _ in range(cfg.n_lays): self.lays.append(Layer(q_bias=q_bias, r_bias=r_bias,", "(hidden, proj, weight.t())) # if bias is not None: #", "def __init__(self, **kw): super().__init__(**kw) cfg = self.get_cfg(kw) self.model = Model(**kw)", "for _ in range(cfg.n_lays)] return None def update_mems(self, xs, ys,", "def __init__(self, r_bias=None, q_bias=None, ps={}, hs=[], **kw): super().__init__(ps, [self.hs] +", "x in attns) if yo.hidden: hiddens += (y,) hiddens =", "if x is not None else x_emb.shape[:2])[0] if cfg.PAD is", "= 1 / (h**0.5) self.qkv = qc.Linear(m, 3 * n", "= tuple(x.permute(2, 3, 0, 1).contiguous() for x in attns) if", "x): cfg = self.cfg b = (x.shape[:2] if x is", "if d_proj != d_embed: self.out_projs.append(nn.Parameter(torch.FloatTensor(d_proj, d_embed))) else: self.out_projs.append(None) self.out_layers.append(qc.Linear(d_embed, s_vocab))", "hs, **kw) cfg = self.get_cfg(kw) m, n, h = cfg.d_model,", "x.size(-1)) labels = labels.view(-1) assert x.size(0) == labels.size(0) else: x", "<= 1: x = F.linear(x, proj.t().contiguous()) y = F.linear(x, weight,", "= logging.get_logger(__name__) class Model(PreTrained): def __init__(self, **kw): super().__init__(**kw) cfg =", "yo.attn else None hiddens = () if yo.hidden else None", "= ws[0], bs[0], self.out_projs[0] head_logit = self._compute_logit(x, head_weight, head_bias, head_proj)", "k, v = torch.chunk(a, 3, dim=-1) qlen, klen, rlen =", "return [torch.cat([ys[i], xs[i]], dim=0)[b:e].detach() for i in range(len(xs))] def forward(self,", "y.size(1))) y = y * torch.tril(ones, y.size(1) - y.size(0))[:, :,", "weight_i = torch.cat([weight_i, self.cluster_weight], dim=0) bias_i = torch.cat([bias_i, self.cluster_bias], dim=0)", "qc.Linear(cfg.d_embed, cfg.n_labels, bias=False, **kw) forward = qf.forward_seq def post_proj(self, x):", "- y.size(0))[:, :, None, None] return y def forward(self, x,", "+ max(0, qlen) b = max(0, e - self.cfg.mem_len) with", "0: pos.clamp_(max=cfg.clamp_len) pos = self.drop(self.pos_emb(pos)) ones = y.new_ones((n, klen), dtype=torch.uint8)", "dim=0) bias_i = torch.cat([bias_i, self.cluster_bias], dim=0) ws.append(weight_i) bs.append(bias_i) head_weight, head_bias,", "tail_logprob_i = F.log_softmax(tail_logit_i, dim=1) logprob_i = head_logprob[:, -i] + tail_logprob_i", "def forward(self, x, x_emb=None, labels=None, **kw): yo = self.get_y_opts(**kw) if", "continue target_i = labels.index_select(0, indices_i) - l_idx head_logprob_i = head_logprob.index_select(0,", "self.n_clusters = len(self.cutoffs) - 1 self.head_size = self.shortlist_size + self.n_clusters", "proj_i = ws[i], bs[i], self.out_projs[i] tail_logit_i = self._compute_logit(hidden_i, weight_i, bias_i,", "1 if labels is not None: logprob_i = head_logprob_i[:, cluster_prob_idx]", "**kw) def init_mems(self, b): cfg = self.cfg if cfg.mem_len >", "mems is None else torch.cat([mems, x], 0) y = self.qkv(self.norm(y)", "= q_bias self.r_bias = r_bias self.drop = qc.Dropout(cfg.drop, **kw) self.drop_attn", "F.log_softmax(head_logit, dim=1) if labels is None: y = x.new_empty((head_logit.size(0), self.s_vocab))", "qu from ..core import forward as qf from ..core import", "if self.div_val == 1: l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i +", "import output as qo from ..core.embed import Adaptive, Positional from", "is not None else None ys = (y,) + ys[1:]", "= self.rel_shift(torch.einsum(\"ibnd,jnd->ijbn\", (q + self.r_bias, r))) a = AC +", "y.size(1), n * h) y = x + self.drop(self.proj(y)) ys", "// (div_val**i) self.out_projs.append(nn.Parameter(torch.FloatTensor(d_proj, d_emb_i))) self.out_layers.append(qc.Linear(d_emb_i, r_idx - l_idx)) self.keep_order =", "tie_proj and cfg.div_val == 1 and cfg.d_model != cfg.d_embed: if", "labels is not None: y = -F.log_softmax(y, dim=-1).gather(1, labels.unsqueeze(1)).squeeze(1) else:", "+= (ys[1],) y = self.drop(y) mems = None if mems", "for i in range(len(self.cutoffs)): if self.div_val == 1: l_idx, r_idx", "= mask_i.nonzero().squeeze() if indices_i.numel() == 0: continue target_i = labels.index_select(0,", "pos = torch.arange(klen - 1, -1, -1.0, device=y.device, dtype=y.dtype) if", "<= 9 and CUDA_MINOR <= 1: x = F.linear(x, proj.t().contiguous())", "= F.log_softmax(tail_logit_i, dim=1) cluster_prob_idx = self.cutoffs[0] + i - 1", "None, None] return y def forward(self, x, r, mask=None, mems=None,", "xs, ys, mlen, qlen): assert len(xs) == len(ys) e =", "i in range(len(cutoff_values) - 1): beg_idx, stop_idx = cutoff_values[i], cutoff_values[i", "1:].contiguous() x = x.view(-1, x.size(-1)) labels = labels.view(-1) assert x.size(0)", "if indices_i.numel() == 0: continue target_i = labels.index_select(0, indices_i) -", "= (torch.triu(ones, 1 + mlen) + torch.tril(ones, -shift))[:, :, None]", "+ 1] if i == 0: y[:, : self.cutoffs[0]] =", "lay in enumerate(self.lays): if yo.hidden: hiddens += (y,) m =", "**kw, yo=yo) xs = self.proj(ys[0][:, -tgt:], labels) y = xs.view(b,", "= x.size(0), x.size(1) ys = self.model(x, x_emb=x_emb, **kw, yo=yo) xs", "for i in range(len(xs))] def forward(self, x, mems=None, head_m=None, x_emb=None,", "self.out_layers[0].bias, self.out_projs[0] ) return F.log_softmax(y, dim=-1) else: ws, bs =", "dim=1) if labels is None: y = x.new_empty((head_logit.size(0), self.s_vocab)) else:", "x, zero_triu=False): s = (x.size(0), 1) + x.size()[2:] y =", "cfg.torchscript: self.proj.out_projs[i] = nn.Parameter(self.model.tok_emb.projs[0].clone()) else: self.proj.out_projs[i] = self.model.tok_emb.projs[0] elif tie_proj", "self.q_bias = nn.Parameter(torch.FloatTensor(n, h)) self.r_bias = nn.Parameter(torch.FloatTensor(n, h)) else: self.q_bias", "self._compute_logit(x, head_weight, head_bias, head_proj) head_logprob = F.log_softmax(head_logit, dim=1) if labels", "and cfg.div_val == 1 and cfg.d_model != cfg.d_embed: if cfg.torchscript:", "import functional as F from transformers.utils import logging from ..", "(labels < r_idx) indices_i = mask_i.nonzero().squeeze() if indices_i.numel() == 0:", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "= x.new_empty((head_logit.size(0), self.s_vocab)) head_logprob = F.log_softmax(head_logit, dim=1) cutoff_values = [0]", "from .. import core as qc from ..core import utils", "assert x_emb is not None b, tgt = x_emb.size(0), x_emb.size(1)", "= x_emb.transpose(0, 1).contiguous() s = x_emb.size()[:-1] else: assert x_emb is", "cfg.d_model != cfg.d_embed: if cfg.torchscript: self.proj.out_projs[i] = nn.Parameter(self.model.tok_emb.projs[0].clone()) else: self.proj.out_projs[i]", "weight.t())) # if bias is not None: # logit =", "y[:, beg_idx, stop_idx] = logprob_i return y class Layer(qc.Module): def", "is not None: mask_i = (labels >= l_idx) & (labels", "q = q.view(qlen, b, n, h) k = k.view(klen, b,", "+ (loss,) return qo.LossMems(*ys) if yo.kw else ys class Projector(qc.Module):", "1 i = self.get_minus_inf() if mask.dim() == 2: a =", "= nn.ParameterList() if div_val == 1: for i in range(len(self.cutoffs)):", "forward(self, x, r, dec_m=None, **kw): ys = self.attn(x, r, mask=dec_m,", "bs[i], self.out_projs[i] tail_logit_i = self._compute_logit(hidden_i, weight_i, bias_i, proj_i) tail_logprob_i =", "head_m=None, **kw): cfg = self.cfg yo = self.get_y_opts(**kw) y =", "self.cutoffs[0]] else: weight_i, bias_i, proj_i = ws[i], bs[i], self.out_projs[i] tail_logit_i", "if r_bias is None or q_bias is None: self.q_bias =", "governing permissions and # limitations under the License. # =============================================================================", "= head_logprob[:, : self.cutoffs[0]] else: weight_i, bias_i, proj_i = ws[i],", "bias_i, proj_i) tail_logprob_i = F.log_softmax(tail_logit_i, dim=1) cluster_prob_idx = self.cutoffs[0] +", "Version 2.0 (the \"License\"); # you may not use this", "if cfg.mem_len > 0: p = next(self.parameters()) kw = dict(dtype=p.dtype,", "+ torch.tril(ones, -shift))[:, :, None] else: dec_m = torch.triu(ones, diagonal=1", "pos = self.drop(self.pos_emb(pos)) ones = y.new_ones((n, klen), dtype=torch.uint8) if cfg.same_length:", ") return F.log_softmax(y, dim=-1) else: ws, bs = [], []", "y = -F.log_softmax(y, dim=-1).gather(1, labels.unsqueeze(1)).squeeze(1) else: y = F.log_softmax(y, dim=-1)", "head_logit = self._compute_logit(x, head_weight, head_bias, head_proj) y = x.new_empty((head_logit.size(0), self.s_vocab))", "nn.Parameter(self.model.tok_emb.projs[0].clone()) else: self.proj.out_projs[i] = self.model.tok_emb.projs[0] elif tie_proj and cfg.div_val !=", "dim=1) logprob_i = head_logprob[:, -i] + tail_logprob_i y[:, beg_idx, stop_idx]", "self.out_layers[i].weight bias_i = self.out_layers[i].bias if i == 0: weight_i =", "q, k, v = torch.chunk(a, 3, dim=-1) qlen, klen, rlen", "m, bias=False, **kw) self.norm = qc.LayerNorm(m, **kw) def rel_shift(self, x,", "qf.forward_seq def post_proj(self, x): cfg = self.cfg b = (x.shape[:2]", "else 0 klen = mlen + n pos = torch.arange(klen", "def _compute_logit(self, x, weight, bias, proj): if proj is None:", "b): cfg = self.cfg if cfg.mem_len > 0: p =", "l_idx, r_idx = cutoff_values[i], cutoff_values[i + 1] if labels is", "def tie_weights(self): cfg = self.cfg if cfg.tie_word_embeds: for i in", "by applicable law or agreed to in writing, software #", "logprob_i = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather( 1, target_i[:, None] ).squeeze(1)", "n * h) y = x + self.drop(self.proj(y)) ys =", "head_logit = self._compute_logit(x, head_weight, head_bias, head_proj) head_logprob = F.log_softmax(head_logit, dim=1)", "= torch.ones((y.size(0), y.size(1))) y = y * torch.tril(ones, y.size(1) -", "in enumerate(self.lays): if yo.hidden: hiddens += (y,) m = None", "s_vocab self.d_embed = d_embed self.d_proj = d_proj self.cutoffs = cutoffs", "weight, bias, proj): if proj is None: y = F.linear(x,", "target_i[:, None]).squeeze(1) else: y[:, : self.cutoffs[0]] = head_logprob[:, : self.cutoffs[0]]", "None] return y def forward(self, x, r, mask=None, mems=None, head_m=None,", "ys = (y,) if cfg.pre_norm else (self.norm(y),) if yo.attn: ys", "= self.init_mems(b) mlen = mems[0].size(0) if mems is not None", "self.lays = qc.Stack() for _ in range(cfg.n_lays): self.lays.append(Layer(q_bias=q_bias, r_bias=r_bias, **kw))", "Adaptive(cfg.cutoffs, div_val=cfg.div_val, **kw) self.pos_emb = Positional(cfg.d_model, **kw) if cfg.untie_r: q_bias", "cfg.s_vocab, cfg.d_embed, cfg.d_model, cfg.cutoffs, div_val=cfg.div_val, **kw ) def tie_weights(self): cfg", "self.r_bias = r_bias self.drop = qc.Dropout(cfg.drop, **kw) self.drop_attn = qc.Dropout(cfg.drop_attn,", "= F.linear(x, weight, bias=bias) else: # if CUDA_MAJOR <= 9", "torch.cat([bias_i, self.cluster_bias], dim=0) ws.append(weight_i) bs.append(bias_i) head_weight, head_bias, head_proj = ws[0],", "Positional(cfg.d_model, **kw) if cfg.untie_r: q_bias = None r_bias = None", "torch import nn from torch.nn import functional as F from", "_ in range(cfg.n_lays): self.lays.append(Layer(q_bias=q_bias, r_bias=r_bias, **kw)) self.drop = qc.Dropout(cfg.drop, **kw)", "is None else torch.ne(x, cfg.PAD).sum(-1) - 1 return x[torch.arange(b, device=self.device),", "bias_i, proj_i = ws[i], bs[i], self.out_projs[i] tail_logit_i = self._compute_logit(x, weight_i,", "indices_i) hidden_i = x.index_select(0, indices_i) else: hidden_i = x if", "labels=None, **kw): yo = self.get_y_opts(**kw) if x is None: assert", "tgt = x.size(0), x.size(1) ys = self.model(x, x_emb=x_emb, **kw, yo=yo)", "= head_logprob[:, -i] + tail_logprob_i y[:, beg_idx, stop_idx] = logprob_i", "h, bias=False) if r_bias is None or q_bias is None:", "None else 0 klen = mlen + n pos =", "y = x if mems is None else torch.cat([mems, x],", "applicable law or agreed to in writing, software # distributed", "x_emb = x_emb.transpose(0, 1).contiguous() s = x_emb.size()[:-1] else: assert x_emb", "h) v = v.view(klen, b, n, h) r = r.view(rlen,", "mems is None else q[-qlen:] b, n, h = x.size(1),", "r_idx = self.cutoff_ends[i], self.cutoff_ends[i + 1] d_emb_i = d_embed //", "= self._compute_logit( x, self.out_layers[0].weight, self.out_layers[0].bias, self.out_projs[0] ) if labels is", "cfg.same_length: d = klen - cfg.mem_len shift = n -", "= self.r_net(r) q, k, v = torch.chunk(a, 3, dim=-1) qlen,", "cfg.PAD).sum(-1) - 1 return x[torch.arange(b, device=self.device), n] class LLMHead(PreTrained): def", "= self.drop_attn(F.softmax(a, dim=1)) if head_m is not None: a =", "F.linear(x, weight, bias=bias) else: # if CUDA_MAJOR <= 9 and", "mask.dim() == 3: a = a.float().masked_fill(mask[:, :, :, None], i).type_as(a)", "= qc.Linear(cfg.d_embed, cfg.n_labels, bias=False, **kw) forward = qf.forward_seq def post_proj(self,", "= max(0, e - self.cfg.mem_len) with torch.no_grad(): return [torch.cat([ys[i], xs[i]],", "1).contiguous() for x in attns) if yo.hidden: hiddens += (y,)", "else self.update_mems(hiddens, mems, mlen, n) if yo.attn: attns = tuple(x.permute(2,", "h = cfg.d_model, cfg.n_heads, cfg.d_head cfg.scale = 1 / (h**0.5)", "ws[0], bs[0], self.out_projs[0] head_logit = self._compute_logit(x, head_weight, head_bias, head_proj) head_logprob", "n pos = torch.arange(klen - 1, -1, -1.0, device=y.device, dtype=y.dtype)", "None: mask_i = (labels >= l_idx) & (labels < r_idx)", "if i == 0: weight_i = torch.cat([weight_i, self.cluster_weight], dim=0) bias_i", "= [], [] for i in range(len(self.cutoffs)): if self.div_val ==", "def rel_shift(self, x, zero_triu=False): s = (x.size(0), 1) + x.size()[2:]", "ForSeqClassifier(PreTrained): def __init__(self, **kw): super().__init__(**kw) cfg = self.get_cfg(kw) self.model =", "bias_i = self.out_layers[i].bias if i == 0: weight_i = torch.cat([weight_i,", "qc.LayerNorm(m, **kw) def rel_shift(self, x, zero_triu=False): s = (x.size(0), 1)", "# You may obtain a copy of the License at", "= Positionwise(**kw) def forward(self, x, r, dec_m=None, **kw): ys =", "in enumerate(cfg.tie_projs): if tie_proj and cfg.div_val == 1 and cfg.d_model", "= torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t())) # if bias is not", "ws[i], bs[i], self.out_projs[i] tail_logit_i = self._compute_logit(hidden_i, weight_i, bias_i, proj_i) tail_logprob_i", "= y.new_ones((n, klen), dtype=torch.uint8) if cfg.same_length: d = klen -", "None else mems[i] ys = lay(y, pos, **kw, dec_m=dec_m, head_m=head_m[i],", "cfg.scale = 1 / (h**0.5) self.qkv = qc.Linear(m, 3 *", "indices_i) - l_idx head_logprob_i = head_logprob.index_select(0, indices_i) hidden_i = x.index_select(0,", "\"d_model\", \"drop\", \"n_heads\"}, {\"drop_attn\": 0.0, \"eps\": 1e-5, \"pre_norm\": False}, )", "k.size(0), r.size(0) q = q if mems is None else", "== labels.size(0) else: x = x.view(-1, x.size(-1)) if self.n_clusters ==", "nn.ParameterList() if div_val == 1: for i in range(len(self.cutoffs)): if", "head_bias, head_proj) y = x.new_empty((head_logit.size(0), self.s_vocab)) head_logprob = F.log_softmax(head_logit, dim=1)", "n * h, bias=False) self.r_net = qc.Linear(m, n * h,", ":, None] else: dec_m = torch.triu(ones, diagonal=1 + mlen)[:, :,", "else: assert x_emb is None x = x.transpose(0, 1).contiguous() s", "is None else q[-qlen:] b, n, h = x.size(1), cfg.n_heads,", "- l_idx)) self.keep_order = keep_order def _compute_logit(self, x, weight, bias,", "indices_i = mask_i.nonzero().squeeze() if indices_i.numel() == 0: continue target_i =", "b, n, h) r = r.view(rlen, n, h) AC =", "x.index_select(0, indices_i) else: hidden_i = x if i == 0:", "cfg = self.get_cfg(kw) self.model = Model(**kw) self.proj = qc.Linear(cfg.d_embed, cfg.n_labels,", "self.n_clusters if self.n_clusters > 0: self.cluster_weight = nn.Parameter(torch.zeros(self.n_clusters, self.d_embed)) self.cluster_bias", "y) r = self.r_net(r) q, k, v = torch.chunk(a, 3,", "**kw ) def tie_weights(self): cfg = self.cfg if cfg.tie_word_embeds: for", "is not None: logprob_i = head_logprob_i.gather(1, target_i[:, None]).squeeze(1) else: y[:,", "= x.size(0), k.size(0), r.size(0) q = q if mems is", "cfg.n_heads, cfg.d_head q = q.view(qlen, b, n, h) k =", "..core import output as qo from ..core.embed import Adaptive, Positional", "forward(self, x, labels=None, keep_order=False): if labels is not None: x", "ws, bs = [], [] for i in range(len(self.cutoffs)): if", "y.size(1) - y.size(0))[:, :, None, None] return y def forward(self,", "\"License\"); # you may not use this file except in", "ws[0], bs[0], self.out_projs[0] head_logit = self._compute_logit(x, head_weight, head_bias, head_proj) y", "div_val=1, keep_order=False): super().__init__() self.s_vocab = s_vocab self.d_embed = d_embed self.d_proj", "cfg.torchscript: self.proj.out_projs[i] = nn.Parameter(self.model.tok_emb.projs[i].clone()) else: self.proj.out_projs[i] = self.model.tok_emb.projs[i] def init_mems(self,", "= nn.Parameter(torch.FloatTensor(n, h)) else: self.q_bias = q_bias self.r_bias = r_bias", "F.log_softmax(head_logit, dim=1) cutoff_values = [0] + self.cutoffs for i in", "nn.Parameter(torch.zeros(self.n_clusters, self.d_embed)) self.cluster_bias = nn.Parameter(torch.zeros(self.n_clusters)) self.out_layers = qc.Stack() self.out_projs =", "logprob_i.size(0)].copy_(-logprob_i) offset += logprob_i.size(0) return y def log_prob(self, x): if", "self.drop = qc.Dropout(cfg.drop, **kw) def init_mems(self, b): cfg = self.cfg", "d = klen - cfg.mem_len shift = n - d", "is not None: # logit = logit + bias return", "= logprob_i if labels is not None: if (hasattr(self, \"keep_order\")", "if yo.attn: attns += (ys[1],) y = self.drop(y) mems =", "enumerate(cfg.tie_projs): if tie_proj and cfg.div_val == 1 and cfg.d_model !=", "is None else () loss = xs.view(b, tgt - 1)", "= x.size(1), cfg.n_heads, cfg.d_head q = q.view(qlen, b, n, h)", "- 1, -1, -1.0, device=y.device, dtype=y.dtype) if cfg.clamp_len > 0:", "else x_emb n, b = s if mems is None:", "= x_emb.size(0), x_emb.size(1) else: b, tgt = x.size(0), x.size(1) ys", "+ 1] weight_i = self.out_layers[0].weight[l_idx:r_idx] bias_i = self.out_layers[0].bias[l_idx:r_idx] else: weight_i", "i = self.get_minus_inf() if mask.dim() == 2: a = a.float().masked_fill(mask[None,", "torch.tril(ones, y.size(1) - y.size(0))[:, :, None, None] return y def", "self.cfg yo = self.get_y_opts(**kw) if x is None: x_emb =", "super().__init__(**kw) cfg = self.get_cfg(kw) self.model = Model(**kw) self.proj = qc.Linear(cfg.d_embed,", "= F.log_softmax(head_logit, dim=1) if labels is None: y = x.new_empty((head_logit.size(0),", "mems = self.init_mems(b) mlen = mems[0].size(0) if mems is not", "cfg.clamp_len > 0: pos.clamp_(max=cfg.clamp_len) pos = self.drop(self.pos_emb(pos)) ones = y.new_ones((n,", "r.view(rlen, n, h) AC = torch.einsum(\"ibnd,jbnd->ijbn\", (q + self.q_bias, k))", "x.size(0)) + x.size()[2:] y = y.view(*s) y = y[1:].view_as(x) if", "y[offset : offset + logprob_i.size(0)].copy_(-logprob_i) offset += logprob_i.size(0) return y", "F.log_softmax(tail_logit_i, dim=1) logprob_i = head_logprob[:, -i] + tail_logprob_i y[:, beg_idx,", ":, None] y = self.drop(y) attns = () if yo.attn", "r_idx = cutoff_values[i], cutoff_values[i + 1] if labels is not", "klen - cfg.mem_len shift = n - d if d", "= None r_bias = None else: q_bias = nn.Parameter(torch.FloatTensor(cfg.n_heads, cfg.d_head))" ]
[ "source dir dest_root = args.dest_root # specify your destination dir", "im_meta[\"bb\"] = bbs[i] facial5points = [[landmarks[i][j], landmarks[i][j + 5]] for", "# im_meta['ref'] = \"/\".join(image_name.split('/')[-5:]) img_warped.save(image_name) li_meta.append(im_meta) meta[ref] = li_meta with", "meta[ref] = li_meta with open(source_root + \"cropped-meta.pkl\", \"wb\") as f:", "print(\"{} is discarded due to exception!\".format(imfile)) continue ref = imfile.replace(source_root,", "= i im_meta[\"landmarks\"] = landmarks[i] im_meta[\"bb\"] = bbs[i] facial5points =", "np from PIL import Image from tqdm import tqdm from", "imfiles = [ f for f in glob.glob(f\"{source_root}F????/MID*/faces/msceleb*\") if Path(f).is_file()", "detect_faces(img) except Exception: print(\"{} is discarded due to exception!\".format(imfile)) continue", "ref = imfile.replace(source_root, \"\") print(\"Processing\\t{}\".format(imfile)) img = Image.open(imfile) try: #", "will be discarded print(\"{} is discarded due to non-detected landmarks!\".format(imfile))", ") args = parser.parse_args() source_root = args.source_root # specify your", "parser.parse_args() source_root = args.source_root # specify your source dir dest_root", "padding scale = crop_size / 112.0 reference = get_reference_facial_points(default_square=True) *", "meta = {} # for subfolder in tqdm(os.listdir(source_root)): for imfile", "bbs, landmarks = detect_faces(img) except Exception: print(\"{} is discarded due", "ref = imfile.replace(source_root, \"\") ndetections = len(landmarks) if ( ndetections", "detect_faces if __name__ == \"__main__\": parser = argparse.ArgumentParser(description=\"face alignment\") parser.add_argument(", "imfile in tqdm(imfiles): ref = imfile.replace(source_root, \"\") print(\"Processing\\t{}\".format(imfile)) img =", "img will be discarded print(\"{} is discarded due to non-detected", "facial5points = [[landmarks[i][j], landmarks[i][j + 5]] for j in range(5)]", "imfile.replace(\"images\", \"cropped\").replace( \".jpg\", \"-{:02d}.jpg\".format(i) ) # im_meta['ref'] = \"/\".join(image_name.split('/')[-5:]) img_warped.save(image_name)", "import detect_faces if __name__ == \"__main__\": parser = argparse.ArgumentParser(description=\"face alignment\")", "your destination dir\", default=\"../../data/fiw-videos/new-processed/\", type=str, ) parser.add_argument( \"-crop_size\", \"--crop_size\", help=\"specify", "= \"/\".join(image_name.split('/')[-5:]) img_warped.save(image_name) li_meta.append(im_meta) meta[ref] = li_meta with open(source_root +", "\"-crop_size\", \"--crop_size\", help=\"specify size of aligned faces, align and crop", "bbs[i] facial5points = [[landmarks[i][j], landmarks[i][j + 5]] for j in", "pathlib import Path import numpy as np from PIL import", "specify your source dir dest_root = args.dest_root # specify your", "np.array(img), facial5points, reference, crop_size=(crop_size, crop_size), ) img_warped = Image.fromarray(warped_face) image_name", "Image.open(imfile) try: # Handle exception bbs, landmarks = detect_faces(img) except", "specify your destination dir crop_size = ( args.crop_size ) #", "in tqdm(imfiles): ref = imfile.replace(source_root, \"\") print(\"Processing\\t{}\".format(imfile)) img = Image.open(imfile)", "except Exception: print(\"{} is discarded due to exception!\".format(imfile)) continue ref", "\".jpg\", \"-{:02d}.jpg\".format(i) ) # im_meta['ref'] = \"/\".join(image_name.split('/')[-5:]) img_warped.save(image_name) li_meta.append(im_meta) meta[ref]", "j in range(5)] warped_face = warp_and_crop_face( np.array(img), facial5points, reference, crop_size=(crop_size,", "{} im_meta[\"face\"] = i im_meta[\"landmarks\"] = landmarks[i] im_meta[\"bb\"] = bbs[i]", "[ f for f in glob.glob(f\"{source_root}F????/MID*/faces/msceleb*\") if Path(f).is_file() ] #", "Path(f).is_file() ] # images = {imfile.replace(source_root, ''): Image.open(imfile) for imfile", "img_warped.save(image_name) li_meta.append(im_meta) meta[ref] = li_meta with open(source_root + \"cropped-meta.pkl\", \"wb\")", "= {} im_meta[\"face\"] = i im_meta[\"landmarks\"] = landmarks[i] im_meta[\"bb\"] =", "args.source_root # specify your source dir dest_root = args.dest_root #", "Image.open(imfile) for imfile in imfiles} meta = {} # for", "== 0 ): # If the landmarks cannot be detected,", "os import pickle from pathlib import Path import numpy as", "args = parser.parse_args() source_root = args.source_root # specify your source", "imfiles} meta = {} # for subfolder in tqdm(os.listdir(source_root)): for", "for subfolder in tqdm(os.listdir(source_root)): for imfile in tqdm(imfiles): ref =", "= warp_and_crop_face( np.array(img), facial5points, reference, crop_size=(crop_size, crop_size), ) img_warped =", ") # im_meta['ref'] = \"/\".join(image_name.split('/')[-5:]) img_warped.save(image_name) li_meta.append(im_meta) meta[ref] = li_meta", "landmarks = detect_faces(img) except Exception: print(\"{} is discarded due to", "landmarks[i][j + 5]] for j in range(5)] warped_face = warp_and_crop_face(", "parser.add_argument( \"-dest_root\", \"--dest_root\", help=\"specify your destination dir\", default=\"../../data/fiw-videos/new-processed/\", type=str, )", "exception!\".format(imfile)) continue ref = imfile.replace(source_root, \"\") ndetections = len(landmarks) if", "src.align.align_trans import get_reference_facial_points, warp_and_crop_face # sys.path.append(\"../../\") from src.align.detector import detect_faces", "im_meta[\"face\"] = i im_meta[\"landmarks\"] = landmarks[i] im_meta[\"bb\"] = bbs[i] facial5points", "with padding scale = crop_size / 112.0 reference = get_reference_facial_points(default_square=True)", "= imfile.replace(source_root, \"\") ndetections = len(landmarks) if ( ndetections ==", "f for f in glob.glob(f\"{source_root}F????/MID*/faces/msceleb*\") if Path(f).is_file() ] # images", "\"-{:02d}.jpg\".format(i) ) # im_meta['ref'] = \"/\".join(image_name.split('/')[-5:]) img_warped.save(image_name) li_meta.append(im_meta) meta[ref] =", "= detect_faces(img) except Exception: print(\"{} is discarded due to exception!\".format(imfile))", "im_meta = {} im_meta[\"face\"] = i im_meta[\"landmarks\"] = landmarks[i] im_meta[\"bb\"]", "your destination dir crop_size = ( args.crop_size ) # specify", "# specify your destination dir crop_size = ( args.crop_size )", "and crop with padding scale = crop_size / 112.0 reference", "is discarded due to exception!\".format(imfile)) continue ref = imfile.replace(source_root, \"\")", "align and crop with padding scale = crop_size / 112.0", "discarded due to exception!\".format(imfile)) continue ref = imfile.replace(source_root, \"\") ndetections", "ndetections == 0 ): # If the landmarks cannot be", "is discarded due to non-detected landmarks!\".format(imfile)) meta[ref] = [] continue", "\"__main__\": parser = argparse.ArgumentParser(description=\"face alignment\") parser.add_argument( \"-source_root\", \"--source_root\", help=\"specify your", "+ 5]] for j in range(5)] warped_face = warp_and_crop_face( np.array(img),", "= args.dest_root # specify your destination dir crop_size = (", "{imfile.replace(source_root, ''): Image.open(imfile) for imfile in imfiles} meta = {}", "import numpy as np from PIL import Image from tqdm", "= ( args.crop_size ) # specify size of aligned faces,", "\"--dest_root\", help=\"specify your destination dir\", default=\"../../data/fiw-videos/new-processed/\", type=str, ) parser.add_argument( \"-crop_size\",", ". -name '*.DS_Store' -type f -delete\") os.chdir(cwd) imfiles = [", "''): Image.open(imfile) for imfile in imfiles} meta = {} #", "__name__ == \"__main__\": parser = argparse.ArgumentParser(description=\"face alignment\") parser.add_argument( \"-source_root\", \"--source_root\",", "type=str, ) parser.add_argument( \"-dest_root\", \"--dest_root\", help=\"specify your destination dir\", default=\"../../data/fiw-videos/new-processed/\",", "\"-dest_root\", \"--dest_root\", help=\"specify your destination dir\", default=\"../../data/fiw-videos/new-processed/\", type=str, ) parser.add_argument(", "crop with padding\", default=112, type=int, ) args = parser.parse_args() source_root", "= {imfile.replace(source_root, ''): Image.open(imfile) for imfile in imfiles} meta =", "to exception!\".format(imfile)) continue ref = imfile.replace(source_root, \"\") ndetections = len(landmarks)", "== \"__main__\": parser = argparse.ArgumentParser(description=\"face alignment\") parser.add_argument( \"-source_root\", \"--source_root\", help=\"specify", "parser.add_argument( \"-crop_size\", \"--crop_size\", help=\"specify size of aligned faces, align and", "faces, align and crop with padding scale = crop_size /", "imfile.replace(source_root, \"\") print(\"Processing\\t{}\".format(imfile)) img = Image.open(imfile) try: # Handle exception", "print(\"{} is discarded due to non-detected landmarks!\".format(imfile)) meta[ref] = []", "for imfile in tqdm(imfiles): ref = imfile.replace(source_root, \"\") print(\"Processing\\t{}\".format(imfile)) img", "meta[ref] = [] continue li_meta = [] for i in", "src.align.detector import detect_faces if __name__ == \"__main__\": parser = argparse.ArgumentParser(description=\"face", "aligned faces, align and crop with padding scale = crop_size", "cwd = os.getcwd() # delete '.DS_Store' existed in the source_root", "\"\") print(\"Processing\\t{}\".format(imfile)) img = Image.open(imfile) try: # Handle exception bbs,", "the img will be discarded print(\"{} is discarded due to", "] # images = {imfile.replace(source_root, ''): Image.open(imfile) for imfile in", "range(ndetections): im_meta = {} im_meta[\"face\"] = i im_meta[\"landmarks\"] = landmarks[i]", "in range(5)] warped_face = warp_and_crop_face( np.array(img), facial5points, reference, crop_size=(crop_size, crop_size),", "Image.fromarray(warped_face) image_name = imfile.replace(\"images\", \"cropped\").replace( \".jpg\", \"-{:02d}.jpg\".format(i) ) # im_meta['ref']", "args.crop_size ) # specify size of aligned faces, align and", "): # If the landmarks cannot be detected, the img", "help=\"specify size of aligned faces, align and crop with padding\",", "warp_and_crop_face # sys.path.append(\"../../\") from src.align.detector import detect_faces if __name__ ==", "= [ f for f in glob.glob(f\"{source_root}F????/MID*/faces/msceleb*\") if Path(f).is_file() ]", "due to exception!\".format(imfile)) continue ref = imfile.replace(source_root, \"\") ndetections =", "= Image.open(imfile) try: # Handle exception bbs, landmarks = detect_faces(img)", "import get_reference_facial_points, warp_and_crop_face # sys.path.append(\"../../\") from src.align.detector import detect_faces if", "import tqdm from src.align.align_trans import get_reference_facial_points, warp_and_crop_face # sys.path.append(\"../../\") from", "= args.source_root # specify your source dir dest_root = args.dest_root", "# for subfolder in tqdm(os.listdir(source_root)): for imfile in tqdm(imfiles): ref", "tqdm from src.align.align_trans import get_reference_facial_points, warp_and_crop_face # sys.path.append(\"../../\") from src.align.detector", "due to non-detected landmarks!\".format(imfile)) meta[ref] = [] continue li_meta =", "dir crop_size = ( args.crop_size ) # specify size of", "crop_size), ) img_warped = Image.fromarray(warped_face) image_name = imfile.replace(\"images\", \"cropped\").replace( \".jpg\",", "dir dest_root = args.dest_root # specify your destination dir crop_size", "your source dir dest_root = args.dest_root # specify your destination", "with padding\", default=112, type=int, ) args = parser.parse_args() source_root =", "for imfile in imfiles} meta = {} # for subfolder", "size of aligned faces, align and crop with padding\", default=112,", "import pickle from pathlib import Path import numpy as np", "tqdm import tqdm from src.align.align_trans import get_reference_facial_points, warp_and_crop_face # sys.path.append(\"../../\")", "os.system(\"find . -name '*.DS_Store' -type f -delete\") os.chdir(cwd) imfiles =", "img = Image.open(imfile) try: # Handle exception bbs, landmarks =", "continue ref = imfile.replace(source_root, \"\") ndetections = len(landmarks) if (", "landmarks!\".format(imfile)) meta[ref] = [] continue li_meta = [] for i", "the landmarks cannot be detected, the img will be discarded", "crop_size=(crop_size, crop_size), ) img_warped = Image.fromarray(warped_face) image_name = imfile.replace(\"images\", \"cropped\").replace(", "Path import numpy as np from PIL import Image from", "scale cwd = os.getcwd() # delete '.DS_Store' existed in the", "os.chdir(source_root) os.system(\"find . -name '*.DS_Store' -type f -delete\") os.chdir(cwd) imfiles", "os.getcwd() # delete '.DS_Store' existed in the source_root os.chdir(source_root) os.system(\"find", "for f in glob.glob(f\"{source_root}F????/MID*/faces/msceleb*\") if Path(f).is_file() ] # images =", "# Handle exception bbs, landmarks = detect_faces(img) except Exception: print(\"{}", "for j in range(5)] warped_face = warp_and_crop_face( np.array(img), facial5points, reference,", "aligned faces, align and crop with padding\", default=112, type=int, )", "scale = crop_size / 112.0 reference = get_reference_facial_points(default_square=True) * scale", "[] for i in range(ndetections): im_meta = {} im_meta[\"face\"] =", "get_reference_facial_points(default_square=True) * scale cwd = os.getcwd() # delete '.DS_Store' existed", "i im_meta[\"landmarks\"] = landmarks[i] im_meta[\"bb\"] = bbs[i] facial5points = [[landmarks[i][j],", "range(5)] warped_face = warp_and_crop_face( np.array(img), facial5points, reference, crop_size=(crop_size, crop_size), )", "source_root = args.source_root # specify your source dir dest_root =", "and crop with padding\", default=112, type=int, ) args = parser.parse_args()", "li_meta = [] for i in range(ndetections): im_meta = {}", "be discarded print(\"{} is discarded due to non-detected landmarks!\".format(imfile)) meta[ref]", "from pathlib import Path import numpy as np from PIL", "# delete '.DS_Store' existed in the source_root os.chdir(source_root) os.system(\"find .", "tqdm(os.listdir(source_root)): for imfile in tqdm(imfiles): ref = imfile.replace(source_root, \"\") print(\"Processing\\t{}\".format(imfile))", "# sys.path.append(\"../../\") from src.align.detector import detect_faces if __name__ == \"__main__\":", "# If the landmarks cannot be detected, the img will", "im_meta[\"landmarks\"] = landmarks[i] im_meta[\"bb\"] = bbs[i] facial5points = [[landmarks[i][j], landmarks[i][j", "= parser.parse_args() source_root = args.source_root # specify your source dir", "dir\", default=\"../../data/fiw-videos/new-processed/\", type=str, ) parser.add_argument( \"-dest_root\", \"--dest_root\", help=\"specify your destination", "detected, the img will be discarded print(\"{} is discarded due", "0 ): # If the landmarks cannot be detected, the", "{} # for subfolder in tqdm(os.listdir(source_root)): for imfile in tqdm(imfiles):", "default=\"../../data/fiw-videos/new-processed/\", type=str, ) parser.add_argument( \"-dest_root\", \"--dest_root\", help=\"specify your destination dir\",", "( args.crop_size ) # specify size of aligned faces, align", "os.chdir(cwd) imfiles = [ f for f in glob.glob(f\"{source_root}F????/MID*/faces/msceleb*\") if", "pickle from pathlib import Path import numpy as np from", "= argparse.ArgumentParser(description=\"face alignment\") parser.add_argument( \"-source_root\", \"--source_root\", help=\"specify your source dir\",", ") parser.add_argument( \"-crop_size\", \"--crop_size\", help=\"specify size of aligned faces, align", "default=112, type=int, ) args = parser.parse_args() source_root = args.source_root #", "in glob.glob(f\"{source_root}F????/MID*/faces/msceleb*\") if Path(f).is_file() ] # images = {imfile.replace(source_root, ''):", "if Path(f).is_file() ] # images = {imfile.replace(source_root, ''): Image.open(imfile) for", "print(\"Processing\\t{}\".format(imfile)) img = Image.open(imfile) try: # Handle exception bbs, landmarks", "\"\") ndetections = len(landmarks) if ( ndetections == 0 ):", "for i in range(ndetections): im_meta = {} im_meta[\"face\"] = i", "in range(ndetections): im_meta = {} im_meta[\"face\"] = i im_meta[\"landmarks\"] =", "landmarks[i] im_meta[\"bb\"] = bbs[i] facial5points = [[landmarks[i][j], landmarks[i][j + 5]]", "import glob import os import pickle from pathlib import Path", "crop with padding scale = crop_size / 112.0 reference =", "argparse.ArgumentParser(description=\"face alignment\") parser.add_argument( \"-source_root\", \"--source_root\", help=\"specify your source dir\", default=\"../../data/fiw-videos/new-processed/\",", "\"cropped\").replace( \".jpg\", \"-{:02d}.jpg\".format(i) ) # im_meta['ref'] = \"/\".join(image_name.split('/')[-5:]) img_warped.save(image_name) li_meta.append(im_meta)", "crop_size / 112.0 reference = get_reference_facial_points(default_square=True) * scale cwd =", "# specify your source dir dest_root = args.dest_root # specify", "continue li_meta = [] for i in range(ndetections): im_meta =", "of aligned faces, align and crop with padding\", default=112, type=int,", "img_warped = Image.fromarray(warped_face) image_name = imfile.replace(\"images\", \"cropped\").replace( \".jpg\", \"-{:02d}.jpg\".format(i) )", "exception bbs, landmarks = detect_faces(img) except Exception: print(\"{} is discarded", "Handle exception bbs, landmarks = detect_faces(img) except Exception: print(\"{} is", "facial5points, reference, crop_size=(crop_size, crop_size), ) img_warped = Image.fromarray(warped_face) image_name =", "-delete\") os.chdir(cwd) imfiles = [ f for f in glob.glob(f\"{source_root}F????/MID*/faces/msceleb*\")", "crop_size = ( args.crop_size ) # specify size of aligned", ") img_warped = Image.fromarray(warped_face) image_name = imfile.replace(\"images\", \"cropped\").replace( \".jpg\", \"-{:02d}.jpg\".format(i)", "= Image.fromarray(warped_face) image_name = imfile.replace(\"images\", \"cropped\").replace( \".jpg\", \"-{:02d}.jpg\".format(i) ) #", "sys.path.append(\"../../\") from src.align.detector import detect_faces if __name__ == \"__main__\": parser", "glob.glob(f\"{source_root}F????/MID*/faces/msceleb*\") if Path(f).is_file() ] # images = {imfile.replace(source_root, ''): Image.open(imfile)", "as np from PIL import Image from tqdm import tqdm", "alignment\") parser.add_argument( \"-source_root\", \"--source_root\", help=\"specify your source dir\", default=\"../../data/fiw-videos/new-processed/\", type=str,", "destination dir crop_size = ( args.crop_size ) # specify size", "source_root os.chdir(source_root) os.system(\"find . -name '*.DS_Store' -type f -delete\") os.chdir(cwd)", "source dir\", default=\"../../data/fiw-videos/new-processed/\", type=str, ) parser.add_argument( \"-dest_root\", \"--dest_root\", help=\"specify your", "destination dir\", default=\"../../data/fiw-videos/new-processed/\", type=str, ) parser.add_argument( \"-crop_size\", \"--crop_size\", help=\"specify size", "f -delete\") os.chdir(cwd) imfiles = [ f for f in", "discarded print(\"{} is discarded due to non-detected landmarks!\".format(imfile)) meta[ref] =", "= landmarks[i] im_meta[\"bb\"] = bbs[i] facial5points = [[landmarks[i][j], landmarks[i][j +", "import os import pickle from pathlib import Path import numpy", "help=\"specify your source dir\", default=\"../../data/fiw-videos/new-processed/\", type=str, ) parser.add_argument( \"-dest_root\", \"--dest_root\",", "faces, align and crop with padding\", default=112, type=int, ) args", "\"-source_root\", \"--source_root\", help=\"specify your source dir\", default=\"../../data/fiw-videos/new-processed/\", type=str, ) parser.add_argument(", "i in range(ndetections): im_meta = {} im_meta[\"face\"] = i im_meta[\"landmarks\"]", "reference = get_reference_facial_points(default_square=True) * scale cwd = os.getcwd() # delete", "in the source_root os.chdir(source_root) os.system(\"find . -name '*.DS_Store' -type f", "parser.add_argument( \"-source_root\", \"--source_root\", help=\"specify your source dir\", default=\"../../data/fiw-videos/new-processed/\", type=str, )", "* scale cwd = os.getcwd() # delete '.DS_Store' existed in", "# specify size of aligned faces, align and crop with", "'*.DS_Store' -type f -delete\") os.chdir(cwd) imfiles = [ f for", "= crop_size / 112.0 reference = get_reference_facial_points(default_square=True) * scale cwd", "your source dir\", default=\"../../data/fiw-videos/new-processed/\", type=str, ) parser.add_argument( \"-dest_root\", \"--dest_root\", help=\"specify", "# images = {imfile.replace(source_root, ''): Image.open(imfile) for imfile in imfiles}", "ndetections = len(landmarks) if ( ndetections == 0 ): #", "glob import os import pickle from pathlib import Path import", "= [] continue li_meta = [] for i in range(ndetections):", "= [] for i in range(ndetections): im_meta = {} im_meta[\"face\"]", "cannot be detected, the img will be discarded print(\"{} is", "\"/\".join(image_name.split('/')[-5:]) img_warped.save(image_name) li_meta.append(im_meta) meta[ref] = li_meta with open(source_root + \"cropped-meta.pkl\",", "images = {imfile.replace(source_root, ''): Image.open(imfile) for imfile in imfiles} meta", "args.dest_root # specify your destination dir crop_size = ( args.crop_size", "= os.getcwd() # delete '.DS_Store' existed in the source_root os.chdir(source_root)", "padding\", default=112, type=int, ) args = parser.parse_args() source_root = args.source_root", "of aligned faces, align and crop with padding scale =", "Image from tqdm import tqdm from src.align.align_trans import get_reference_facial_points, warp_and_crop_face", "get_reference_facial_points, warp_and_crop_face # sys.path.append(\"../../\") from src.align.detector import detect_faces if __name__", "= [[landmarks[i][j], landmarks[i][j + 5]] for j in range(5)] warped_face", "= get_reference_facial_points(default_square=True) * scale cwd = os.getcwd() # delete '.DS_Store'", "imfile in imfiles} meta = {} # for subfolder in", "type=int, ) args = parser.parse_args() source_root = args.source_root # specify", "112.0 reference = get_reference_facial_points(default_square=True) * scale cwd = os.getcwd() #", "help=\"specify your destination dir\", default=\"../../data/fiw-videos/new-processed/\", type=str, ) parser.add_argument( \"-crop_size\", \"--crop_size\",", "[[landmarks[i][j], landmarks[i][j + 5]] for j in range(5)] warped_face =", ") # specify size of aligned faces, align and crop", "parser = argparse.ArgumentParser(description=\"face alignment\") parser.add_argument( \"-source_root\", \"--source_root\", help=\"specify your source", "discarded due to non-detected landmarks!\".format(imfile)) meta[ref] = [] continue li_meta", "5]] for j in range(5)] warped_face = warp_and_crop_face( np.array(img), facial5points,", "= imfile.replace(\"images\", \"cropped\").replace( \".jpg\", \"-{:02d}.jpg\".format(i) ) # im_meta['ref'] = \"/\".join(image_name.split('/')[-5:])", "-type f -delete\") os.chdir(cwd) imfiles = [ f for f", "-name '*.DS_Store' -type f -delete\") os.chdir(cwd) imfiles = [ f", "( ndetections == 0 ): # If the landmarks cannot", "try: # Handle exception bbs, landmarks = detect_faces(img) except Exception:", "import argparse import glob import os import pickle from pathlib", ") parser.add_argument( \"-dest_root\", \"--dest_root\", help=\"specify your destination dir\", default=\"../../data/fiw-videos/new-processed/\", type=str,", "'.DS_Store' existed in the source_root os.chdir(source_root) os.system(\"find . -name '*.DS_Store'", "existed in the source_root os.chdir(source_root) os.system(\"find . -name '*.DS_Store' -type", "= bbs[i] facial5points = [[landmarks[i][j], landmarks[i][j + 5]] for j", "default=\"../../data/fiw-videos/new-processed/\", type=str, ) parser.add_argument( \"-crop_size\", \"--crop_size\", help=\"specify size of aligned", "= len(landmarks) if ( ndetections == 0 ): # If", "= li_meta with open(source_root + \"cropped-meta.pkl\", \"wb\") as f: pickle.dump(meta,", "from src.align.detector import detect_faces if __name__ == \"__main__\": parser =", "from tqdm import tqdm from src.align.align_trans import get_reference_facial_points, warp_and_crop_face #", "if ( ndetections == 0 ): # If the landmarks", "warp_and_crop_face( np.array(img), facial5points, reference, crop_size=(crop_size, crop_size), ) img_warped = Image.fromarray(warped_face)", "li_meta with open(source_root + \"cropped-meta.pkl\", \"wb\") as f: pickle.dump(meta, f)", "[] continue li_meta = [] for i in range(ndetections): im_meta", "align and crop with padding\", default=112, type=int, ) args =", "imfile.replace(source_root, \"\") ndetections = len(landmarks) if ( ndetections == 0", "len(landmarks) if ( ndetections == 0 ): # If the", "if __name__ == \"__main__\": parser = argparse.ArgumentParser(description=\"face alignment\") parser.add_argument( \"-source_root\",", "delete '.DS_Store' existed in the source_root os.chdir(source_root) os.system(\"find . -name", "be detected, the img will be discarded print(\"{} is discarded", "= {} # for subfolder in tqdm(os.listdir(source_root)): for imfile in", "PIL import Image from tqdm import tqdm from src.align.align_trans import", "warped_face = warp_and_crop_face( np.array(img), facial5points, reference, crop_size=(crop_size, crop_size), ) img_warped", "landmarks cannot be detected, the img will be discarded print(\"{}", "reference, crop_size=(crop_size, crop_size), ) img_warped = Image.fromarray(warped_face) image_name = imfile.replace(\"images\",", "im_meta['ref'] = \"/\".join(image_name.split('/')[-5:]) img_warped.save(image_name) li_meta.append(im_meta) meta[ref] = li_meta with open(source_root", "numpy as np from PIL import Image from tqdm import", "in tqdm(os.listdir(source_root)): for imfile in tqdm(imfiles): ref = imfile.replace(source_root, \"\")", "If the landmarks cannot be detected, the img will be", "argparse import glob import os import pickle from pathlib import", "non-detected landmarks!\".format(imfile)) meta[ref] = [] continue li_meta = [] for", "type=str, ) parser.add_argument( \"-crop_size\", \"--crop_size\", help=\"specify size of aligned faces,", "dir\", default=\"../../data/fiw-videos/new-processed/\", type=str, ) parser.add_argument( \"-crop_size\", \"--crop_size\", help=\"specify size of", "from src.align.align_trans import get_reference_facial_points, warp_and_crop_face # sys.path.append(\"../../\") from src.align.detector import", "from PIL import Image from tqdm import tqdm from src.align.align_trans", "\"--crop_size\", help=\"specify size of aligned faces, align and crop with", "f in glob.glob(f\"{source_root}F????/MID*/faces/msceleb*\") if Path(f).is_file() ] # images = {imfile.replace(source_root,", "/ 112.0 reference = get_reference_facial_points(default_square=True) * scale cwd = os.getcwd()", "size of aligned faces, align and crop with padding scale", "to non-detected landmarks!\".format(imfile)) meta[ref] = [] continue li_meta = []", "dest_root = args.dest_root # specify your destination dir crop_size =", "import Image from tqdm import tqdm from src.align.align_trans import get_reference_facial_points,", "\"--source_root\", help=\"specify your source dir\", default=\"../../data/fiw-videos/new-processed/\", type=str, ) parser.add_argument( \"-dest_root\",", "specify size of aligned faces, align and crop with padding", "= imfile.replace(source_root, \"\") print(\"Processing\\t{}\".format(imfile)) img = Image.open(imfile) try: # Handle", "image_name = imfile.replace(\"images\", \"cropped\").replace( \".jpg\", \"-{:02d}.jpg\".format(i) ) # im_meta['ref'] =", "tqdm(imfiles): ref = imfile.replace(source_root, \"\") print(\"Processing\\t{}\".format(imfile)) img = Image.open(imfile) try:", "li_meta.append(im_meta) meta[ref] = li_meta with open(source_root + \"cropped-meta.pkl\", \"wb\") as", "Exception: print(\"{} is discarded due to exception!\".format(imfile)) continue ref =", "import Path import numpy as np from PIL import Image", "in imfiles} meta = {} # for subfolder in tqdm(os.listdir(source_root)):", "the source_root os.chdir(source_root) os.system(\"find . -name '*.DS_Store' -type f -delete\")", "subfolder in tqdm(os.listdir(source_root)): for imfile in tqdm(imfiles): ref = imfile.replace(source_root," ]
[ "srtvoiceext import extract if __name__ == '__main__': ext = extract('video.mkv',", "import extract if __name__ == '__main__': ext = extract('video.mkv', 'subtitles.srt',", "extract if __name__ == '__main__': ext = extract('video.mkv', 'subtitles.srt', 'outdir')", "from srtvoiceext import extract if __name__ == '__main__': ext =" ]
[ "ReadOnlyDict') def __delitem__(self, key): raise TypeError('Cannot modify ReadOnlyDict') def __iter__(self):", "return iter(self.store) def __len__(self): return len(self.store) def __str__(self): return 'ReadOnlyDict(%s)'", "return len(self.store) def __str__(self): return 'ReadOnlyDict(%s)' % self.store def __repr__(self):", "key): raise TypeError('Cannot modify ReadOnlyDict') def __iter__(self): return iter(self.store) def", "key, value): raise TypeError('Cannot modify ReadOnlyDict') def __delitem__(self, key): raise", "len(self.store) def __str__(self): return 'ReadOnlyDict(%s)' % self.store def __repr__(self): return", "import collections class ReadOnlyDict(collections.MutableMapping): def __init__(self, store): self.store = store", "def __setitem__(self, key, value): raise TypeError('Cannot modify ReadOnlyDict') def __delitem__(self,", "def __iter__(self): return iter(self.store) def __len__(self): return len(self.store) def __str__(self):", "raise TypeError('Cannot modify ReadOnlyDict') def __iter__(self): return iter(self.store) def __len__(self):", "def __len__(self): return len(self.store) def __str__(self): return 'ReadOnlyDict(%s)' % self.store", "= store def __getitem__(self, key): return self.store[key] def __setitem__(self, key,", "def __str__(self): return 'ReadOnlyDict(%s)' % self.store def __repr__(self): return 'ReadOnlyDict(%r)'", "return self.store[key] def __setitem__(self, key, value): raise TypeError('Cannot modify ReadOnlyDict')", "def __getitem__(self, key): return self.store[key] def __setitem__(self, key, value): raise", "raise TypeError('Cannot modify ReadOnlyDict') def __delitem__(self, key): raise TypeError('Cannot modify", "store): self.store = store def __getitem__(self, key): return self.store[key] def", "return 'ReadOnlyDict(%s)' % self.store def __repr__(self): return 'ReadOnlyDict(%r)' % self.store", "value): raise TypeError('Cannot modify ReadOnlyDict') def __delitem__(self, key): raise TypeError('Cannot", "TypeError('Cannot modify ReadOnlyDict') def __iter__(self): return iter(self.store) def __len__(self): return", "__str__(self): return 'ReadOnlyDict(%s)' % self.store def __repr__(self): return 'ReadOnlyDict(%r)' %", "TypeError('Cannot modify ReadOnlyDict') def __delitem__(self, key): raise TypeError('Cannot modify ReadOnlyDict')", "__iter__(self): return iter(self.store) def __len__(self): return len(self.store) def __str__(self): return", "def __delitem__(self, key): raise TypeError('Cannot modify ReadOnlyDict') def __iter__(self): return", "collections class ReadOnlyDict(collections.MutableMapping): def __init__(self, store): self.store = store def", "store def __getitem__(self, key): return self.store[key] def __setitem__(self, key, value):", "self.store = store def __getitem__(self, key): return self.store[key] def __setitem__(self,", "__delitem__(self, key): raise TypeError('Cannot modify ReadOnlyDict') def __iter__(self): return iter(self.store)", "key): return self.store[key] def __setitem__(self, key, value): raise TypeError('Cannot modify", "self.store[key] def __setitem__(self, key, value): raise TypeError('Cannot modify ReadOnlyDict') def", "modify ReadOnlyDict') def __delitem__(self, key): raise TypeError('Cannot modify ReadOnlyDict') def", "__init__(self, store): self.store = store def __getitem__(self, key): return self.store[key]", "ReadOnlyDict') def __iter__(self): return iter(self.store) def __len__(self): return len(self.store) def", "class ReadOnlyDict(collections.MutableMapping): def __init__(self, store): self.store = store def __getitem__(self,", "__len__(self): return len(self.store) def __str__(self): return 'ReadOnlyDict(%s)' % self.store def", "ReadOnlyDict(collections.MutableMapping): def __init__(self, store): self.store = store def __getitem__(self, key):", "iter(self.store) def __len__(self): return len(self.store) def __str__(self): return 'ReadOnlyDict(%s)' %", "__getitem__(self, key): return self.store[key] def __setitem__(self, key, value): raise TypeError('Cannot", "modify ReadOnlyDict') def __iter__(self): return iter(self.store) def __len__(self): return len(self.store)", "__setitem__(self, key, value): raise TypeError('Cannot modify ReadOnlyDict') def __delitem__(self, key):", "def __init__(self, store): self.store = store def __getitem__(self, key): return" ]
[ "self.map_box[:] = map_with_attr def handle_input(self, _input): if _input == \"ctrl", "for b in self.parent.bodies][self.focus_position] self.parent.update_body(new_body) except: pass class ButtonLabel(urwid.SelectableIcon): def", "b in _frames} idx = -1 _title = _frames[idx] self.active_body", "{player.movement_speed}\\n\") _right.append(f\"Monsterized {player.MP:<2d}\\n\") self.box[:] = [urwid.Text(_top), urwid.Columns([urwid.Text(_left), urwid.Text(_right)], dividechars =", "for b, val in _bonus.items(): if b == \"dmg_reduction\": _top", "label): ''' set_text is invoked by Button.set_label ''' self.__super.set_text(label) self._cursor_position", "_input in (\"-\", \"=\"): self.select_item(_input) self.update_footer() elif _input == self.mind.key_map[\"status-menu\"]", "MyButton(label, borders=borders, disabled=disabled) btn._label.align = align if cmd: if user_args:", "+= [\"╚\" +\"═\"*width+\"╝\"] if self.player.inventory.selection: i = self.player.inventory.selection X_OFFSET =", "= {b : globals()[f\"{b}Frame\"](self, mind) for b in _frames} idx", "selectable(self): return False def update_body(self, _title): self.active_body = self.bodies[_title] self.contents[\"body\"]", "urwid.Text(\"\") i = self.player.inventory.selection _text = [] _text += [i.eq_description,", "x in range(height): _marker_box += [\"║\"] for y in range(width):", "[\"Menu commands\\n\\n\", f\"tab:open/close\\n\",f\"0/9-=:select item\\n\", f\"ctrl+p:respawn\\n\", f\"ctrl+a:inventory\\n\", f\"ctrl+s:status\\n\", f\"ctrl+d:help\\n\", f\"ctrl+e:equipment\\n\"] columns", "including an empty string, can be set and displayed -", "_marker = [\"[\", (obj.color, f\"{obj.marker[0]}\"), \"]\"] elif obj.is_equipment and not", "self._disabled # @disabled.setter # def disabled(self, value): # if self._disabled", "def start_game_frame(self): self.bodies[\"Game\"] = GameFrame(self, self.mind) self.update_body(\"Game\", no_title=True) class IntroFrame(UiFrame):", "= self.player.inventory.get(_input) def update_header(self): widgets = [] for p in", "({c.mod:<+2d})\\n\"] elif self.parent.parent.menu_width > 36: _name = c.name[0].upper() + c.name[1:6]", "\"Bard\": \"The noisy bard\\n\\nCharisma +1, Dexterity +1, Intelligence +1, Hit", "player = self.player x, y, z = player.position _top =", "elif not i.is_equipped: _text += [(\"green\", f\"{'Enter:equip':<14s}\")] elif i.is_equipped: _text", "+1, Hit points +4\\nCharge and parry\", \"Dwarf\": \"The short dwarf\\n\\nStrength", "= urwid.SimpleFocusListWalker(line) urwid.connect_signal(walker, \"modified\", self.update_description) self.listbox = SelectableListBox(walker) header =", "if self.menu_view: self.menu.on_update() def handle_input(self, _input): if _input == \"tab\":", "disabled=disabled) btn._label.align = align if cmd: if user_args: urwid.connect_signal(btn, \"click\",", "= \"\\n 0 \" elif i == 10: _num =", "!= (80, 24): inv_btns.append(urwid.Text(\"\\nSET TERMINAL\\nTO 80X24\", align=\"center\")) self.contents[\"footer\"] = (SelectableColumns(inv_btns,", "self.mind.screen_size[1]//8) @property def menu_width(self): if self.menu_view: return min(MAX_MENU_WIDTH, (3*self.mind.screen_size[0])//7) return", "self.choices: btn = attr_button(c, self.select_class) line.append(btn) walker = urwid.SimpleFocusListWalker(line) urwid.connect_signal(walker,", "\"standout\"), (\"cyan\",\"light cyan\",\"black\"), (\"cyan_line\",\"light cyan\",\"white\", \"standout\"), (\"name\",\"white\",\"black\"), ] class UiFrame(urwid.Frame):", "on_update(self): self.update_header() self.update_body() self.update_footer() class StatusFrame(UiFrame): def __init__(self, parent, mind):", "try: self.focus_position += 1 if self.focus_position >= self.widget_size: self.focus_position -=", "_input = 11 self.player.inventory.selection = self.player.inventory.get(_input) def update_header(self): widgets =", "= 0 inv_btns = [] for i, obj in self.player.inventory.content.items():", "\"negative\"][-int(c.temp_bonus < 0) + int(c.temp_bonus > 0)] if self.parent.parent.menu_width >", "self.menu.update_body(\"Help\") elif _input == self.mind.key_map[\"equipment-menu\"] and self.menu_view: self.menu.update_body(\"Equipment\") elif _input", "pass def dispatch_event(self, event_type, *args): self.mind.get_GUI_event(event_type, *args) def register_event(self, event_type,", "@property def player(self): if self.mind.avatar.uuid in self.mind.master.players: return self.mind.master.players[self.mind.avatar.uuid] else:", "class UiFrame(urwid.Frame): def __init__(self, parent, mind, *args, **kargs): self.parent =", "= self.player x, y, z = player.position _top = f\"{player.name:<12s}", "\"The mighty warrior\\n\\nStrength +1, Hit points +4\\nCharge and parry\", \"Dwarf\":", "@property def header_height(self): return MIN_HEADER_HEIGHT#max(MIN_HEADER_HEIGHT, self.mind.screen_size[1]//8) @property def menu_width(self): if", "dwarf\\n\\nStrength +1, Constitution +1, Hit points +6\\nDemolish and parry\", \"Wizard\":", "self.contents[\"header\"] = (self.default_header, None) else: i = self.player.inventory.selection self.contents[\"header\"] =", "visible_map = [line[w:w+self.parent.body_width] for line in _map] h = max(0,", "globals()[f\"{b}Frame\"](self, mind) for b in _frames} idx = -1 _title", "self.parent.parent.menu_width > 40: _name = c.name[0].upper() + c.name[1:] _left +=", "self.update_header() self.update_body() self.update_footer() class StatusFrame(UiFrame): def __init__(self, parent, mind): box", "self.map = MapFrame(self, mind) self.menu = MenuFrame(self, mind) super().__init__(parent, mind,", "self.footer_content_size = _size def on_update(self): self.update_header() if self.footer_content_size != len(self.player.inventory.all):", "None) self.footer_content_size = _size def on_update(self): self.update_header() if self.footer_content_size !=", "_title): self.active_body = self.bodies[_title] self.contents[\"body\"] = (urwid.LineBox(self.active_body, title=_title), None) class", "for k, act in self.mind.key_map.items() if act.startswith(\"class_ability\")] for i, act", "min_dmg = max(1, base + min_dmg) max_dmg = max(1, base", "40: _name = c.name[0].upper() + c.name[1:] _left += [f\"{_name:<12} \",", "for line in _map] h = max(0, x - self.parent.body_height//2)", "_input == \"tab\": self.menu_view = not self.menu_view elif _input ==", "class MyButton(urwid.Button): ''' - override __init__ to use our ButtonLabel", "= max(0, y - self.parent.body_width//3) visible_map = [line[w:w+self.parent.body_width] for line", "(\"common\",\"white\",\"black\"), (\"common_line\",\"black\",\"white\",\"standout\"), (\"uncommon\",\"dark cyan\",\"black\"), (\"uncommon_line\",\"dark cyan\",\"white\",\"standout\"), (\"rare\",\"yellow\",\"black\"), (\"rare_line\",\"yellow\",\"white\",\"standout\"), (\"unique\",\"light magenta\",\"black\"),", "for i, obj in self.player.inventory.content.items(): if obj: _size += 1", "strings and variable width - any string, including an empty", "self.parent = parent self.mind = mind urwid.AttrMap(self,\"frame\") super().__init__(*args, **kargs) @property", "class HelpFrame(UiFrame): def __init__(self, parent, mind): self.mind = mind map_commands", "and self.player.inventory.selection: self.player.actions[\"drop\"].use(self.player, obj=self.player.inventory.selection) self.update_footer() elif _input.isnumeric() or _input in", "parent, mind): # urwid.Padding(urwid.BigText(('top', \"Hack\\'n\\'SSH\"), urwid.HalfBlock5x4Font())), self.choices = (\"Warrior\", \"Dwarf\",", "== self.mind.key_map[\"equipment-menu\"] and self.menu_view: self.menu.update_body(\"Equipment\") elif _input == self.mind.key_map[\"inventory-menu\"] and", "super().__init__(*args, **kargs) @property def player(self): if self.mind.avatar.uuid in self.mind.master.players: return", "= [] for c in self.choices: btn = attr_button(c, self.select_class)", "_text += [i.eq_description, f\"\\nEncumbrance:{i.encumbrance}\\n\"] return urwid.Text(_text) def update_header(self): if not", "= player.full_eqp_bonus(eqp, b) if b not in _bonus: _bonus[b] =", "red\", \"white\", \"standout\"), (\"reversed\", \"standout\", \"\"), (\"common\",\"white\",\"black\"), (\"common_line\",\"black\",\"white\",\"standout\"), (\"uncommon\",\"dark cyan\",\"black\"),", "self.menu = MenuFrame(self, mind) super().__init__(parent, mind, urwid.Columns([(self.map_width, self.map), (self.menu_width, self.menu)],", "== value: # return # if self.disabled: # urwid.AttrMap(self, \"disabled\")", "update_description(self): index = min(self.listbox.focus_position, len(self.choices)-1) choice = self.choices[index] self.contents[\"body\"] =", "in self.mind.master.players: return self.mind.master.players[self.mind.avatar.uuid] else: return None @property def connection(self):", "(self.menu_width, self.menu)] self.contents[\"body\"] = (urwid.Columns(_columns, focus_column=1), None) @property def header_list(self):", "f\" ({c.mod:<+2d})\\n\"] elif self.parent.parent.menu_width > 36: _name = c.name[0].upper() +", "_right.append(f\"Reduction {player.dmg_reduction:<3d}\\n\") _right.append(f\"Encumb \") if player.inventory.encumbrance == EXTRA_ENCUMBRANCE_MULTI*player.encumbrance: _right.append((\"red\", f\"{player.inventory.encumbrance:>2d}\"))", "+\"═\"*width+\"╝\"] if self.player.inventory.selection: i = self.player.inventory.selection X_OFFSET = 2 Y_OFFSET", "\"center\", user_args = None, borders=True, disabled=False): btn = MyButton(label, borders=borders,", "try: self.focus_position += 1 except: pass def focus_previous(self): try: self.focus_position", "cmd=None, attr_map=None, focus_map = \"line\", align = \"center\", user_args =", "(\"positive\",\"light green\",\"black\"), (\"negative\",\"dark red\",\"black\"), (\"white\",\"white\",\"black\"), (\"disabled\",\"dark red\",\"black\"), (\"red\",\"dark red\",\"black\"), (\"green\",\"light", "[i.eq_description, f\"\\nEncumbrance:{i.encumbrance}\\n\"] return urwid.Text(_text) def update_header(self): if not self.player.inventory.selection: self.contents[\"header\"]", "+= [(\"red\", f\"{'Cannot equip':<14s}\")] elif not i.is_equipped: _text += [(\"green\",", "self.menu_view: self.menu.update_body(\"Equipment\") elif _input == self.mind.key_map[\"inventory-menu\"] and self.menu_view: self.menu.update_body(\"Inventory\") else:", "FrameColumns(urwid.Columns): def __init__(self, parent, widget_list, dividechars=0): self.widget_size = len(widget_list) super(FrameColumns,", "mod, distance from rpg_game.constants import * from urwid import raw_display", "= 4 for m, pos in zip(i.in_inventory_markers, i.in_inventory_marker_positions): x, y", "> 2: self.layer_view = -1 elif _input in self.mind.key_map: _action", "@property def menu_width(self): if self.menu_view: return min(MAX_MENU_WIDTH, (3*self.mind.screen_size[0])//7) return 0", "- any string, including an empty string, can be set", "map_with_attr def handle_input(self, _input): if _input == \"ctrl f\": self.debug_view", "= {\"Warrior\": \"The mighty warrior\\n\\nStrength +1, Hit points +4\\nCharge and", "widgets: self.header_widget.body[:] = widgets class MapFrame(UiFrame): def __init__(self, parent, mind):", "if act.startswith(\"class_ability\")] for i, act in enumerate(self.player.class_actions): k = class_action_keys[i]", "self.choices = (\"Warrior\", \"Dwarf\", \"Wizard\", \"Thief\", \"Bard\") self.descriptions = {\"Warrior\":", "- 2 @property def menu_view(self): return self._menu_view @menu_view.setter def menu_view(self,", "FOOTER_HEIGHT = 4 PALETTE = [ (\"line\", 'black', 'white', \"standout\"),", "else: i = self.player.inventory.selection _text = [] if not i.requisites(self.player):", "self.menu)], focus_column=1), header=_header, footer=None, focus_part=\"body\") self.menu_view = True self.update_footer() self.header_widget", "\"Dwarf\", \"Wizard\", \"Thief\", \"Bard\") self.descriptions = {\"Warrior\": \"The mighty warrior\\n\\nStrength", "in self.player.inventory.content.items(): if obj: _size += 1 if obj.is_equipment and", "MIN_HEADER_HEIGHT = 3 MAX_MENU_WIDTH = 48 FOOTER_HEIGHT = 4 PALETTE", "= int(_input)-1 elif _input == \"0\": s_input = 9 elif", "+ c.name[1:] _left += [f\"{_name:<12} \", (state, f\"{c.value:>2d}\"), f\" ({c.mod:<+2d})\\n\"]", "val _top = \"\" for b, val in _bonus.items(): if", "# urwid.AttrMap(self, \"disabled\") # else: # urwid.AttrMap(self, None, \"line\") def", "= [(self.map_width, self.map), (self.menu_width, self.menu)] self.contents[\"body\"] = (urwid.Columns(_columns, focus_column=1), None)", "None) else: self.contents[\"body\"] = (urwid.LineBox(self.active_body, title=title), None) else: self.contents[\"body\"] =", "elif i == 11: _num = \"\\n = \" if", "urwid.SimpleFocusListWalker(line) urwid.connect_signal(walker, \"modified\", self.update_description) self.listbox = SelectableListBox(walker) header = urwid.LineBox(urwid.BoxAdapter(self.listbox,", "on_press=None, user_data=None, borders=True, disabled=False): self._label = ButtonLabel(\"\") if borders: cols", "= urwid.Columns([urwid.Text(map_commands, wrap=\"clip\"), urwid.Text(menu_commands, wrap=\"clip\")], dividechars = 1) super().__init__(parent, mind,", "# return self._disabled # @disabled.setter # def disabled(self, value): #", "if self.parent.parent.menu_width > 40: _name = c.name[0].upper() + c.name[1:] _left", "self.mind.key_map[\"inventory-menu\"] and self.menu_view: self.menu.update_body(\"Inventory\") else: self.map.handle_input(_input) def select_item(self, _input): if", "c.name[0].upper() + c.name[1:] _left += [f\"{_name:<12} \", (state, f\"{c.value:>2d}\"), f\"", "= pos _marker_box[(x+X_OFFSET)*(width+2)+y+Y_OFFSET] = (i.color, m) self.box[:] = [urwid.Columns([(width+2, urwid.Text(_marker_box)),", "cmd: if user_args: urwid.connect_signal(btn, \"click\", cmd, user_args = user_args) else:", "self.update_body() self.update_footer() class StatusFrame(UiFrame): def __init__(self, parent, mind): box =", "elif obj.is_equipment and not obj.is_equipped: _marker = [\"]\", (obj.color, f\"{obj.marker[0]}\"),", "else: self.contents[\"body\"] = (urwid.LineBox(self.active_body, title=title), None) else: self.contents[\"body\"] = (self.active_body,", "\"\\n 0 \" elif i == 10: _num = \"\\n", "self.widget_size new_body = [b for b in self.parent.bodies][self.focus_position] self.parent.update_body(new_body) except:", "disabled(self): # return self._disabled # @disabled.setter # def disabled(self, value):", "= urwid.Text([(\"green\", f\"{'Enter:use/eqp':<14s}\"), (\"yellow\", \"Q:drop\")], align=\"center\") super().__init__(parent, mind, box, header=self.default_header,", "_frames} idx = -1 _title = _frames[idx] self.active_body = self.bodies[_title]", "def map_width(self): if self.menu_view: return self.mind.screen_size[0] - self.menu_width return self.mind.screen_size[0]", "not weapon: min_dmg, max_dmg = (1, 4) else: number, value", "+2\\nSneak attack, hide and trap\", \"Bard\": \"The noisy bard\\n\\nCharisma +1,", "= parent self.mind = mind urwid.AttrMap(self,\"frame\") super().__init__(*args, **kargs) @property def", "= parent def focus_next(self): try: self.focus_position += 1 if self.focus_position", "focus_map=focus_map) def create_button(label, cmd=None, align = \"center\", user_args = None,", "else: _equipment += [urwid.Text([f\"{_name}: \"])] _bonus = {} for eqp", "[] base = player.STR.mod weapon = player.equipment[\"main_hand\"] if not weapon:", "\"black\"), (\"other\", \"light blue\", \"black\"), (\"monster\", \"dark red\", \"black\"), (\"fatigued\",", "obj: _equipment += [urwid.Text([f\"{_name}: \", (obj.color, f\"{obj.name}\")])] else: _equipment +=", "= box.body self.default_header = urwid.Text(\"0/9-= to select\\n\\n\", align=\"center\") self.default_footer =", "self.active_body.handle_input(_input) # def exit(self): # self.disconnect() # self.mind.disconnect()#should use dispatch", "callback) def disconnect(self): pass def restart(self): pass def focus_next(self): pass", "header_height = self.parent.header_height + 2 tot_rows = self.mind.screen_size[1] return (tot_rows", "try: self.focus_position -= 1 except: pass class FrameColumns(urwid.Columns): def __init__(self,", "f\"\\n {i+1} \" elif i == 9: _num = \"\\n", "for s in CHARACTERISTICS: c = getattr(player, s) state =", "class GUI(UiFrame): def __init__(self, parent, mind): self.bodies = {\"Intro\" :", "set_text is invoked by Button.set_label ''' self.__super.set_text(label) self._cursor_position = len(label)", "time, os, copy from rpg_game.utils import log, mod, distance from", "footer=None, focus_part=\"body\") self.menu_view = True self.update_footer() self.header_widget = self.header.original_widget.box_widget self.footer_content_size", "self.bodies[_title] self.contents[\"body\"] = (urwid.LineBox(self.active_body, title=_title), None) class InventoryFrame(UiFrame): def __init__(self,", "FOOTER_HEIGHT - 2 @property def menu_view(self): return self._menu_view @menu_view.setter def", "disconnect(self): pass def restart(self): pass def focus_next(self): pass def focus_previous(self):", "EXTRA_ENCUMBRANCE_MULTI*player.encumbrance: _right.append((\"red\", f\"{player.inventory.encumbrance:>2d}\")) elif player.inventory.encumbrance > player.encumbrance: _right.append((\"yellow\", f\"{player.inventory.encumbrance:>2d}\")) else:", "self.mind.key_map.items() if act.startswith(\"class_ability\")] for i, act in enumerate(self.player.class_actions): k =", "except IndexError: pass def focus_previous(self): try: self.focus_position -= 1 except", "self.update_description) self.listbox = SelectableListBox(walker) header = urwid.LineBox(urwid.BoxAdapter(self.listbox, len(self.choices)+1)) super().__init__(parent, mind,", "def body_height(self): return self.mind.screen_size[1] - self.header_height - FOOTER_HEIGHT - 2", "return # if self.disabled: # urwid.AttrMap(self, \"disabled\") # else: #", "urwid.Columns([urwid.Text(\"\")]) box = urwid.ListBox(urwid.SimpleListWalker([columns])) self.box = box.body self.default_header = urwid.Text(\"0/9-=", "self.player.inventory.selection self.contents[\"header\"] = (urwid.Text([(i.color, f\"{i.name}\\n\"), f\"{i.description}\\n\"], align=\"center\"), None) def update_footer(self):", "\"black\"), (\"monster\", \"dark red\", \"black\"), (\"fatigued\", \"dark red\", \"white\", \"standout\"),", "self.focus_position += 1 except IndexError: pass def focus_previous(self): try: self.focus_position", "def __init__(self, widget_list, focus_column=None, dividechars=0): super().__init__(widget_list, dividechars, focus_column) def focus_next(self):", "_marker = [\"]\", (obj.color, f\"{obj.marker[0]}\"), \"[\"] elif obj.is_consumable: _marker =", "ice wall\", \"Thief\": \"The sneaky thief\\n\\nDexterity +1, Intelligence +1, Hit", "\"Q:drop\")] self.contents[\"footer\"] = (urwid.Text(_text, align=\"center\"), None) def update_body(self): side =", "box.body self.default_header = urwid.Text(\"0/9-= to select\\n\\n\", align=\"center\") self.default_footer = urwid.Text([(\"green\",", "= (urwid.Text([(i.color, f\"{i.name}\\n\"), f\"{i.description}\\n\"], align=\"center\"), None) def update_footer(self): if not", "= len(widget_list) super(FrameColumns, self).__init__(widget_list, dividechars) self.parent = parent def focus_next(self):", "parent, mind): _frames = (\"Inventory\", \"Status\", \"Equipment\", \"Help\") self.bodies =", "self.default_header = urwid.Text(\"0/9-= to select\\n\\n\", align=\"center\") self.default_footer = urwid.Text([(\"green\", f\"{'Enter:use/eqp':<14s}\"),", "inv_btns.append(urwid.Text(\"\\nSET TERMINAL\\nTO 80X24\", align=\"center\")) self.contents[\"footer\"] = (SelectableColumns(inv_btns, dividechars=0), None) self.footer_content_size", "+= [\".\"] _marker_box += [\"║\\n\"] _marker_box += [\"╚\" +\"═\"*width+\"╝\"] if", "_size += 1 if obj.is_equipment and obj.is_equipped: _marker = [\"[\",", "self._menu_view = value _columns = [(self.map_width, self.map), (self.menu_width, self.menu)] self.contents[\"body\"]", "urwid.Text(menu_commands, wrap=\"clip\")], dividechars = 1) super().__init__(parent, mind, urwid.ListBox(urwid.SimpleListWalker([columns]))) class SelectableListBox(urwid.ListBox):", "_equipment class HelpFrame(UiFrame): def __init__(self, parent, mind): self.mind = mind", "self.mind) self.update_body(\"Game\", no_title=True) class IntroFrame(UiFrame): def __init__(self, parent, mind): #", "align, user_args = user_args, borders=borders, disabled=disabled) return urwid.AttrMap(btn, attr_map, focus_map=focus_map)", "green\",\"white\",\"standout\"), (\"normal\",\"white\",\"black\"), (\"positive\",\"light green\",\"black\"), (\"negative\",\"dark red\",\"black\"), (\"white\",\"white\",\"black\"), (\"disabled\",\"dark red\",\"black\"), (\"red\",\"dark", "not self.menu_view elif _input == \"enter\" and self.player.inventory.selection: self.player.use_quick_item(self.player.inventory.selection) self.update_footer()", "= MenuFrame(self, mind) super().__init__(parent, mind, urwid.Columns([(self.map_width, self.map), (self.menu_width, self.menu)], focus_column=1),", "\"] else: _marker = [f\" \"] if i < 9:", "self.player.inventory.selection: self.contents[\"header\"] = (self.default_header, None) else: i = self.player.inventory.selection self.contents[\"header\"]", "None) else: i = self.player.inventory.selection _text = [] if not", "\"standout\"), (\"top\",\"white\",\"black\"), (\"frame\",\"white\",\"white\"), (\"player\", \"light green\", \"black\"), (\"other\", \"light blue\",", "inv_btns = [] for i, obj in self.player.inventory.content.items(): if obj:", "'black', 'white', \"standout\"), (\"top\",\"white\",\"black\"), (\"frame\",\"white\",\"white\"), (\"player\", \"light green\", \"black\"), (\"other\",", "*args): self.mind.get_GUI_event(event_type, *args) def register_event(self, event_type, callback): self.mind.register_GUI_event(event_type, callback) def", "self.active_body.on_update() def handle_input(self, _input): # print(\"HANDLING\", _input) self.active_body.handle_input(_input) # def", "update_header(self): if not self.player.inventory.selection: self.contents[\"header\"] = (self.default_header, None) else: i", "0 @property def header_height(self): return MIN_HEADER_HEIGHT#max(MIN_HEADER_HEIGHT, self.mind.screen_size[1]//8) @property def menu_width(self):", "elif _input == \"=\": _input = 11 self.player.inventory.selection = self.player.inventory.get(_input)", "_name = _name[0].upper() + _name[1:] if obj: _equipment += [urwid.Text([f\"{_name}:", "class ButtonLabel(urwid.SelectableIcon): def set_text(self, label): ''' set_text is invoked by", "plain strings and variable width - any string, including an", "(\"reversed\", \"standout\", \"\"), (\"common\",\"white\",\"black\"), (\"common_line\",\"black\",\"white\",\"standout\"), (\"uncommon\",\"dark cyan\",\"black\"), (\"uncommon_line\",\"dark cyan\",\"white\",\"standout\"), (\"rare\",\"yellow\",\"black\"),", ": IntroFrame(self, mind)} self.active_body = self.bodies[\"Intro\"] super().__init__(parent, mind, self.active_body) def", "range(height): _marker_box += [\"║\"] for y in range(width): _marker_box +=", "+= f\"{b}:{val} \" _top += \"\\n\" self.box[:] = [urwid.Text(_top)] +", "self.mind.connections[self.mind.avatar.uuid] else: return None def handle_input(self, _input): pass def on_update(self):", "label # @property # def disabled(self): # return self._disabled #", "(\"red\",\"dark red\",\"black\"), (\"green\",\"light green\",\"black\"), (\"yellow\",\"yellow\",\"black\"), (\"brown\",\"brown\",\"black\"), (\"white_line\",\"black\",\"white\", \"standout\"), (\"red_line\",\"dark red\",\"white\",", "_right.append(f\"Speed {player.movement_speed}\\n\") _right.append(f\"Monsterized {player.MP:<2d}\\n\") self.box[:] = [urwid.Text(_top), urwid.Columns([urwid.Text(_left), urwid.Text(_right)], dividechars", "urwid.Text(self.button_right))], dividechars=1) else: cols = urwid.Columns([self._label], dividechars=0) super(urwid.Button, self).__init__(cols) self.disabled", "(urwid.LineBox(self.active_body, title=title), None) else: self.contents[\"body\"] = (self.active_body, None) class GUI(UiFrame):", "= min(self.listbox.focus_position, len(self.choices)-1) choice = self.choices[index] self.contents[\"body\"] = (urwid.ListBox(urwid.SimpleListWalker([urwid.Text(self.descriptions[choice])])), None)", "== \"-\": _input = 10 elif _input == \"=\": _input", "new_body = [b for b in self.parent.bodies][self.focus_position] self.parent.update_body(new_body) except: pass", "cyan\",\"black\"), (\"uncommon_line\",\"dark cyan\",\"white\",\"standout\"), (\"rare\",\"yellow\",\"black\"), (\"rare_line\",\"yellow\",\"white\",\"standout\"), (\"unique\",\"light magenta\",\"black\"), (\"unique_line\",\"light magenta\",\"white\",\"standout\"), (\"set\",\"light", "self.mind.screen_size != (80, 24): self.update_footer() self.map.on_update() if self.menu_view: self.menu.on_update() def", "self.choices[index] self.contents[\"body\"] = (urwid.ListBox(urwid.SimpleListWalker([urwid.Text(self.descriptions[choice])])), None) class GameFrame(UiFrame): def __init__(self, parent,", "\", (obj.color, f\"{obj.name}\")])] else: _equipment += [urwid.Text([f\"{_name}: \"])] _bonus =", "def __init__(self, parent, mind): # urwid.Padding(urwid.BigText(('top', \"Hack\\'n\\'SSH\"), urwid.HalfBlock5x4Font())), self.choices =", "\"[\" button_right = \"]\" def __init__(self, label, on_press=None, user_data=None, borders=True,", "create_button(label, cmd=None, align = \"center\", user_args = None, borders=True, disabled=False):", "f\"{i.name}\\n\"), f\"{i.description}\\n\"], align=\"center\"), None) def update_footer(self): if not self.player.inventory.selection: self.contents[\"footer\"]", "green\",\"black\"), (\"yellow\",\"yellow\",\"black\"), (\"brown\",\"brown\",\"black\"), (\"white_line\",\"black\",\"white\", \"standout\"), (\"red_line\",\"dark red\",\"white\", \"standout\"), (\"green_line\",\"light green\",\"white\",", "value): # if self._disabled == value: # return # if", "\"0\": s_input = 9 elif _input == \"-\": _input =", "self.menu_view: self.menu.update_body(\"Status\") elif _input == self.mind.key_map[\"help-menu\"] and self.menu_view: self.menu.update_body(\"Help\") elif", "(urwid.LineBox(self.active_body), None) else: self.contents[\"body\"] = (urwid.LineBox(self.active_body, title=title), None) else: self.contents[\"body\"]", "= (urwid.LineBox(self.active_body, title=title), None) else: self.contents[\"body\"] = (self.active_body, None) class", "\"Wizard\": \"The opportune wizard\\n\\nIntelligence +1\\n Fireball, teleport and ice wall\",", "[ (\"line\", 'black', 'white', \"standout\"), (\"top\",\"white\",\"black\"), (\"frame\",\"white\",\"white\"), (\"player\", \"light green\",", "= urwid.Text(\"║\") width = 8 height = 6 _marker_box =", "f\"{obj.marker[0]}\"), \")\"] else: _marker = [f\" {obj.marker[0]} \"] else: _marker", "(\"yellow\", \"Q:drop\")], align=\"center\") super().__init__(parent, mind, box, header=self.default_header, footer=self.default_footer) @property def", "if self.focus_position >= self.widget_size: self.focus_position -= self.widget_size new_body = [b", "box) def on_update(self): player = self.player _equipment = [] for", "make button_left and button_right plain strings and variable width -", "line in _map] h = max(0, x - self.parent.body_height//2) if", "btn = MyButton(label, borders=borders, disabled=disabled) btn._label.align = align if cmd:", "parent, mind): self.mind = mind _header = urwid.LineBox(urwid.BoxAdapter(SelectableListBox(urwid.SimpleFocusListWalker([urwid.Text(\"\")])), self.header_height)) self._menu_view", "= not self.menu_view elif _input == \"enter\" and self.player.inventory.selection: self.player.use_quick_item(self.player.inventory.selection)", "weapon = player.equipment[\"main_hand\"] if not weapon: min_dmg, max_dmg = (1,", "''' set_text is invoked by Button.set_label ''' self.__super.set_text(label) self._cursor_position =", "class_action_keys = [k for k, act in self.mind.key_map.items() if act.startswith(\"class_ability\")]", "_equipment += [urwid.Text([f\"{_name}: \"])] _bonus = {} for eqp in", "self.mind.register_GUI_event(event_type, callback) def disconnect(self): pass def restart(self): pass def focus_next(self):", "f\": self.debug_view = not self.debug_view elif _input == \"ctrl v\":", "\"light blue\", \"black\"), (\"monster\", \"dark red\", \"black\"), (\"fatigued\", \"dark red\",", "ButtonLabel(urwid.SelectableIcon): def set_text(self, label): ''' set_text is invoked by Button.set_label", "copy from rpg_game.utils import log, mod, distance from rpg_game.constants import", "_input): pass def on_update(self): pass def dispatch_event(self, event_type, *args): self.mind.get_GUI_event(event_type,", "f\"{'Cannot equip':<14s}\")] elif not i.is_equipped: _text += [(\"green\", f\"{'Enter:equip':<14s}\")] elif", "self.debug_view = not self.debug_view elif _input == \"ctrl v\": self.layer_view", "on_press, user_data) self.set_label(label) self.lllavel = label # @property # def", "(\"yellow\",\"yellow\",\"black\"), (\"brown\",\"brown\",\"black\"), (\"white_line\",\"black\",\"white\", \"standout\"), (\"red_line\",\"dark red\",\"white\", \"standout\"), (\"green_line\",\"light green\",\"white\", \"standout\"),", "self.default_footer = urwid.Text([(\"green\", f\"{'Enter:use/eqp':<14s}\"), (\"yellow\", \"Q:drop\")], align=\"center\") super().__init__(parent, mind, box,", "val = player.full_eqp_bonus(eqp, b) if b not in _bonus: _bonus[b]", "self.menu_view: return min(MAX_MENU_WIDTH, (3*self.mind.screen_size[0])//7) return 0 @property def map_width(self): if", "super(urwid.Button, self).__init__(cols) self.disabled = disabled if on_press: urwid.connect_signal(self, 'click', on_press,", "trap\", \"Bard\": \"The noisy bard\\n\\nCharisma +1, Dexterity +1, Intelligence +1,", "and self.menu_view: self.menu.update_body(\"Help\") elif _input == self.mind.key_map[\"equipment-menu\"] and self.menu_view: self.menu.update_body(\"Equipment\")", "points +4\\nCharge and parry\", \"Dwarf\": \"The short dwarf\\n\\nStrength +1, Constitution", "urwid.LineBox(urwid.BoxAdapter(SelectableListBox(urwid.SimpleFocusListWalker([urwid.Text(\"\")])), self.header_height)) self._menu_view = True self.map = MapFrame(self, mind) self.menu", "_input == \"enter\" and self.player.inventory.selection: self.player.use_quick_item(self.player.inventory.selection) self.update_footer() elif _input ==", "obj.is_equipment and obj.is_equipped: _marker = [\"[\", (obj.color, f\"{obj.marker[0]}\"), \"]\"] elif", "= player.position _top = f\"{player.name:<12s} {player.game_class.name:<10s}\\nLev:{player.level:<2d} Exp:{player.exp:<4d} {player.location.name}@({x},{y})\\n\" _left =", "0 inv_btns = [] for i, obj in self.player.inventory.content.items(): if", "if not self.player.inventory.selection: self.contents[\"footer\"] = (self.default_footer, None) else: i =", "if user_args: urwid.connect_signal(btn, \"click\", cmd, user_args = user_args) else: urwid.connect_signal(btn,", "i = self.player.inventory.selection _text = [] _text += [i.eq_description, f\"\\nEncumbrance:{i.encumbrance}\\n\"]", "self.contents[\"footer\"] = (SelectableColumns(inv_btns, dividechars=0), None) self.footer_content_size = _size def on_update(self):", "< 0) + int(c.temp_bonus > 0)] if self.parent.parent.menu_width > 40:", "self.__super.set_text(label) self._cursor_position = len(label) + 1 class MyButton(urwid.Button): ''' -", "len(self.button_left), urwid.Text(self.button_left)), self._label, ('fixed', len(self.button_right), urwid.Text(self.button_right))], dividechars=1) else: cols =", "return urwid.Text(\"\") i = self.player.inventory.selection _text = [] _text +=", "self.player.inventory.selection = self.player.inventory.get(_input) def update_header(self): widgets = [] for p", "\"click\", cmd, user_args = user_args) else: urwid.connect_signal(btn, \"click\", cmd) return", "thief\\n\\nDexterity +1, Intelligence +1, Hit points +2\\nSneak attack, hide and", "= \"center\", user_args = None, borders=True, disabled=False): btn = MyButton(label,", "0 \" elif i == 10: _num = \"\\n -", "f\"a:attack\\n\", f\"q:pickup\\n\"] class_action_keys = [k for k, act in self.mind.key_map.items()", "self.layer_view = -1 elif _input in self.mind.key_map: _action = self.mind.key_map[_input]", "_input.isnumeric() or _input in (\"-\", \"=\"): self.select_item(_input) self.update_footer() elif _input", "[\"║\"] for y in range(width): _marker_box += [\".\"] _marker_box +=", "def update_body(self, _title): self.active_body = self.bodies[_title] self.contents[\"body\"] = (urwid.LineBox(self.active_body, title=_title),", "self.mind.master.players: return self.mind.master.players[self.mind.avatar.uuid] else: return None @property def connection(self): if", "box = urwid.ListBox(urwid.SimpleListWalker([urwid.Text(\"\")])) self.box = box.body super().__init__(parent, mind, box) def", "else: _right.append((\"white\", f\"{player.inventory.encumbrance:>2d}\")) _right.append(f\"/{player.encumbrance:<2d}\\n\") _right.append(f\"Speed {player.movement_speed}\\n\") _right.append(f\"Monsterized {player.MP:<2d}\\n\") self.box[:] =", "= self.layer_view + 1 if self.layer_view > 2: self.layer_view =", "b) if b not in _bonus: _bonus[b] = val else:", "= player.STR.mod weapon = player.equipment[\"main_hand\"] if not weapon: min_dmg, max_dmg", "elif i == 9: _num = \"\\n 0 \" elif", "focus_previous(self): pass def update_body(self, title, no_title=False, boxed=False): self.active_body = self.bodies[title]", "self.focus_position -= 1 except IndexError: pass class SelectableColumns(urwid.Columns): def __init__(self,", "def handle_input(self, _input): # print(\"HANDLING\", _input) self.active_body.handle_input(_input) # def exit(self):", "urwid.Columns([(self.map_width, self.map), (self.menu_width, self.menu)], focus_column=1), header=_header, footer=None, focus_part=\"body\") self.menu_view =", "self.menu_view: self.menu.update_body(\"Help\") elif _input == self.mind.key_map[\"equipment-menu\"] and self.menu_view: self.menu.update_body(\"Equipment\") elif", "urwid.Text([(\"green\", f\"{'Enter:use/eqp':<14s}\"), (\"yellow\", \"Q:drop\")], align=\"center\") super().__init__(parent, mind, box, header=self.default_header, footer=self.default_footer)", "_marker = [f\" {obj.marker[0]} \"] else: _marker = [f\" \"]", "otherwise, we leave Button behaviour unchanged ''' button_left = \"[\"", "self.menu_view = not self.menu_view elif _input == \"enter\" and self.player.inventory.selection:", "def focus_previous(self): try: self.focus_position -= 1 except: pass class FrameColumns(urwid.Columns):", "update_header(self): widgets = [] for p in self.header_list: widgets.append(urwid.AttrMap(urwid.AttrMap(urwid.Text(p.status, wrap=\"clip\"),", "super().__init__(parent, mind, box) def on_update(self): player = self.player x, y,", "__init__(self, body): super(SelectableListBox, self).__init__(body) def focus_next(self): try: self.focus_position += 1", "connection(self): if self.mind.avatar.uuid in self.mind.connections: return self.mind.connections[self.mind.avatar.uuid] else: return None", "self.mind = mind _header = urwid.LineBox(urwid.BoxAdapter(SelectableListBox(urwid.SimpleFocusListWalker([urwid.Text(\"\")])), self.header_height)) self._menu_view = True", "def update_body(self, title, no_title=False, boxed=False): self.active_body = self.bodies[title] if boxed:", "choice = self.choices[index] self.contents[\"body\"] = (urwid.ListBox(urwid.SimpleListWalker([urwid.Text(self.descriptions[choice])])), None) class GameFrame(UiFrame): def", "magenta\",\"black\"), (\"unique_line\",\"light magenta\",\"white\",\"standout\"), (\"set\",\"light green\",\"black\"), (\"set_line\",\"light green\",\"white\",\"standout\"), (\"normal\",\"white\",\"black\"), (\"positive\",\"light green\",\"black\"),", "urwid.ListBox(urwid.SimpleListWalker([urwid.Text(\"\")])) self.box = box.body super().__init__(parent, mind, box) def on_update(self): player", "align=\"center\")) self.contents[\"footer\"] = (SelectableColumns(inv_btns, dividechars=0), None) self.footer_content_size = _size def", "user_data) self.set_label(label) self.lllavel = label # @property # def disabled(self):", "super().__init__(parent, mind, box, header=self.default_header, footer=self.default_footer) @property def selection_data(self): if not", "boxed: if no_title: self.contents[\"body\"] = (urwid.LineBox(self.active_body), None) else: self.contents[\"body\"] =", "dividechars = 1) ] class EquipmentFrame(UiFrame): def __init__(self, parent, mind):", "f\"\\nEncumbrance:{i.encumbrance}\\n\"] return urwid.Text(_text) def update_header(self): if not self.player.inventory.selection: self.contents[\"header\"] =", "_top += f\"Reduction:{val} \" else: _top += f\"{b}:{val} \" _top", "(\"fatigued\", \"dark red\", \"white\", \"standout\"), (\"reversed\", \"standout\", \"\"), (\"common\",\"white\",\"black\"), (\"common_line\",\"black\",\"white\",\"standout\"),", "def __init__(self, parent, mind): _frames = (\"Inventory\", \"Status\", \"Equipment\", \"Help\")", "mind, *args, **kargs): self.parent = parent self.mind = mind urwid.AttrMap(self,\"frame\")", "return self.mind.master.players[self.mind.avatar.uuid] else: return None @property def connection(self): if self.mind.avatar.uuid", "obj.is_equipped: _marker = [\"[\", (obj.color, f\"{obj.marker[0]}\"), \"]\"] elif obj.is_equipment and", "= self.player.inventory.selection _text = [] _text += [i.eq_description, f\"\\nEncumbrance:{i.encumbrance}\\n\"] return", "widgets = [] for p in self.header_list: widgets.append(urwid.AttrMap(urwid.AttrMap(urwid.Text(p.status, wrap=\"clip\"), {self.player.id:\"player\"}),", "self.contents[\"footer\"] = (urwid.Text(_text, align=\"center\"), None) def update_body(self): side = urwid.Text(\"║\")", "act.startswith(\"class_ability\")] for i, act in enumerate(self.player.class_actions): k = class_action_keys[i] map_commands.append(f\"{k}:{self.player.class_actions[act].description.lower()}\\n\")", "self._cursor_position = len(label) + 1 class MyButton(urwid.Button): ''' - override", "else: _marker = [f\" \"] if i < 9: _num", "HelpFrame(UiFrame): def __init__(self, parent, mind): self.mind = mind map_commands =", "\") if player.inventory.encumbrance == EXTRA_ENCUMBRANCE_MULTI*player.encumbrance: _right.append((\"red\", f\"{player.inventory.encumbrance:>2d}\")) elif player.inventory.encumbrance >", "distance(self.player.position, ent.position)) def update_footer(self): _size = 0 inv_btns = []", "[] if not i.requisites(self.player): _text += [(\"red\", f\"{'Cannot equip':<14s}\")] elif", "# if self._disabled == value: # return # if self.disabled:", "_bonus = {} for eqp in player.equipment_set: for b in", "(\"-\", \"=\"): self.select_item(_input) self.update_footer() elif _input == self.mind.key_map[\"status-menu\"] and self.menu_view:", "> 0: _input = int(_input)-1 elif _input == \"0\": s_input", "= c.name[0].upper() + c.name[1:] _left += [f\"{_name:<12} \", (state, f\"{c.value:>2d}\"),", "def menu_view(self): return self._menu_view @menu_view.setter def menu_view(self, value): self._menu_view =", "value _columns = [(self.map_width, self.map), (self.menu_width, self.menu)] self.contents[\"body\"] = (urwid.Columns(_columns,", "select_item(self, _input): if _input.isnumeric() and int(_input) > 0: _input =", "class SelectableListBox(urwid.ListBox): def __init__(self, body): super(SelectableListBox, self).__init__(body) def focus_next(self): try:", "footer=self.default_footer) @property def selection_data(self): if not self.player.inventory.selection: return urwid.Text(\"\") i", "if self.player.inventory.selection: i = self.player.inventory.selection X_OFFSET = 2 Y_OFFSET =", "[\"╚\" +\"═\"*width+\"╝\"] if self.player.inventory.selection: i = self.player.inventory.selection X_OFFSET = 2", "__init__(self, label, on_press=None, user_data=None, borders=True, disabled=False): self._label = ButtonLabel(\"\") if", "self.active_body = self.bodies[_title] self.contents[\"body\"] = (urwid.LineBox(self.active_body, title=_title), None) class InventoryFrame(UiFrame):", "1) ] class EquipmentFrame(UiFrame): def __init__(self, parent, mind): box =", "= [] for t, obj in player.equipment.items(): _name = t.replace(\"_\",", "None) def update_body(self): side = urwid.Text(\"║\") width = 8 height", "+= f\"Reduction:{val} \" else: _top += f\"{b}:{val} \" _top +=", "obj: _size += 1 if obj.is_equipment and obj.is_equipped: _marker =", "x, y, z = self.player.position w = max(0, y -", "self.player.inventory.selection X_OFFSET = 2 Y_OFFSET = 4 for m, pos", "\"The sneaky thief\\n\\nDexterity +1, Intelligence +1, Hit points +2\\nSneak attack,", "_input.isnumeric() and int(_input) > 0: _input = int(_input)-1 elif _input", "super().__init__(parent, mind, urwid.LineBox(self.active_body, title=_title)) def on_update(self): self.active_body.on_update() def selectable(self): return", "_header = urwid.LineBox(urwid.BoxAdapter(SelectableListBox(urwid.SimpleFocusListWalker([urwid.Text(\"\")])), self.header_height)) self._menu_view = True self.map = MapFrame(self,", "def select_class(self, button): index = min(self.listbox.focus_position, len(self.choices)-1) choice = self.choices[index]", "\")\"] else: _marker = [f\" {obj.marker[0]} \"] else: _marker =", "super().__init__(parent, mind, urwid.ListBox(urwid.SimpleListWalker([urwid.Text(self.descriptions[\"Warrior\"])])), header=header, focus_part=\"header\") def select_class(self, button): index =", "+1, Constitution +1, Hit points +6\\nDemolish and parry\", \"Wizard\": \"The", "\"Bard\") self.descriptions = {\"Warrior\": \"The mighty warrior\\n\\nStrength +1, Hit points", "self.debug_view elif _input == \"ctrl v\": self.layer_view = self.layer_view +", "act in enumerate(self.player.class_actions): k = class_action_keys[i] map_commands.append(f\"{k}:{self.player.class_actions[act].description.lower()}\\n\") menu_commands = [\"Menu", "len(label) + 1 class MyButton(urwid.Button): ''' - override __init__ to", "if obj: _size += 1 if obj.is_equipment and obj.is_equipped: _marker", "\"standout\"), (\"reversed\", \"standout\", \"\"), (\"common\",\"white\",\"black\"), (\"common_line\",\"black\",\"white\",\"standout\"), (\"uncommon\",\"dark cyan\",\"black\"), (\"uncommon_line\",\"dark cyan\",\"white\",\"standout\"),", "1, number * value) min_dmg = max(1, base + min_dmg)", "self.mind = mind map_commands = [\"Map commands\\n\\n\", f\"←→↑↓:move\\n\", f\"shift+←→↑↓:dash\\n\", f\"a:attack\\n\",", "borders=borders, disabled=disabled) return urwid.AttrMap(btn, attr_map, focus_map=focus_map) def create_button(label, cmd=None, align", "(\"top\",\"white\",\"black\"), (\"frame\",\"white\",\"white\"), (\"player\", \"light green\", \"black\"), (\"other\", \"light blue\", \"black\"),", "len(visible_map): visible_map = visible_map[len(visible_map)-self.parent.body_height:] else: visible_map = visible_map[h:h+self.parent.body_height] map_with_attr =", "Button.set_label ''' self.__super.set_text(label) self._cursor_position = len(label) + 1 class MyButton(urwid.Button):", "= self.bodies[title] if boxed: if no_title: self.contents[\"body\"] = (urwid.LineBox(self.active_body), None)", "self.on_update() @property def visible_range(self): header_height = self.parent.header_height + 2 tot_rows", "__init__(self, parent, mind): _frames = (\"Inventory\", \"Status\", \"Equipment\", \"Help\") self.bodies", "_marker_box += [\".\"] _marker_box += [\"║\\n\"] _marker_box += [\"╚\" +\"═\"*width+\"╝\"]", "class GameFrame(UiFrame): def __init__(self, parent, mind): self.mind = mind _header", "= align, user_args = user_args, borders=borders, disabled=disabled) return urwid.AttrMap(btn, attr_map,", "points +2\\nSneak attack, hide and trap\", \"Bard\": \"The noisy bard\\n\\nCharisma", "on_update(self): if self.layer_view == -1: _map = copy.deepcopy(self.player.location.map) else: _map", "self.layer_view = self.layer_view + 1 if self.layer_view > 2: self.layer_view", "green\",\"white\", \"standout\"), (\"yellow_line\",\"yellow\",\"white\", \"standout\"), (\"cyan\",\"light cyan\",\"black\"), (\"cyan_line\",\"light cyan\",\"white\", \"standout\"), (\"name\",\"white\",\"black\"),", "= f\"\\n {i+1} \" elif i == 9: _num =", "disabled=False): btn = create_button(label, cmd=cmd, align = align, user_args =", "# def disabled(self): # return self._disabled # @disabled.setter # def", "else: _marker = [f\" {obj.marker[0]} \"] else: _marker = [f\"", "k, act in self.mind.key_map.items() if act.startswith(\"class_ability\")] for i, act in", "self.parent.start_game_frame() def update_description(self): index = min(self.listbox.focus_position, len(self.choices)-1) choice = self.choices[index]", "disabled=disabled) return urwid.AttrMap(btn, attr_map, focus_map=focus_map) def create_button(label, cmd=None, align =", "= class_action_keys[i] map_commands.append(f\"{k}:{self.player.class_actions[act].description.lower()}\\n\") menu_commands = [\"Menu commands\\n\\n\", f\"tab:open/close\\n\",f\"0/9-=:select item\\n\", f\"ctrl+p:respawn\\n\",", "= [\"normal\", \"positive\", \"negative\"][-int(c.temp_bonus < 0) + int(c.temp_bonus > 0)]", "= (SelectableColumns(inv_btns, dividechars=0), None) self.footer_content_size = _size def on_update(self): self.update_header()", "else: _marker += [(\"top\", _num)] btn = urwid.Text(_marker, align=\"center\") inv_btns.append((5,", "_text += [(\"red\", f\"{'Cannot equip':<14s}\")] elif not i.is_equipped: _text +=", "self.player _equipment = [] for t, obj in player.equipment.items(): _name", "\"dark red\", \"black\"), (\"fatigued\", \"dark red\", \"white\", \"standout\"), (\"reversed\", \"standout\",", "if obj.is_equipment and obj.is_equipped: _marker = [\"[\", (obj.color, f\"{obj.marker[0]}\"), \"]\"]", "return min(MAX_MENU_WIDTH, (3*self.mind.screen_size[0])//7) return 0 @property def map_width(self): if self.menu_view:", "height = 6 _marker_box = [\"╔\" +\"═\"*width+\"╗\\n\"] for x in", "# @property # def disabled(self): # return self._disabled # @disabled.setter", "\"Help\") self.bodies = {b : globals()[f\"{b}Frame\"](self, mind) for b in", "Button behaviour unchanged ''' button_left = \"[\" button_right = \"]\"", "disabled=False): btn = MyButton(label, borders=borders, disabled=disabled) btn._label.align = align if", "+1, Dexterity +1, Intelligence +1, Hit points +2\\nSing and summon\"}", "_text = [] if not i.requisites(self.player): _text += [(\"red\", f\"{'Cannot", "+1, Hit points +2\\nSing and summon\"} line = [] for", "super().__init__(parent, mind, box) def on_update(self): player = self.player _equipment =", "parent, mind): columns = urwid.Columns([urwid.Text(\"\")]) box = urwid.ListBox(urwid.SimpleListWalker([columns])) self.box =", "dividechars=0): super().__init__(widget_list, dividechars, focus_column) def focus_next(self): try: self.focus_position += 1", "= [urwid.AttrMap(urwid.AttrMap(urwid.Text(line, wrap=\"clip\"), {self.player.id:\"player\"}), {p.id:\"other\" for i, p in self.mind.master.players.items()})", "self.player.inventory.selection: self.player.actions[\"drop\"].use(self.player, obj=self.player.inventory.selection) self.update_footer() elif _input.isnumeric() or _input in (\"-\",", "self.mind.master.new_player(self.mind.avatar.uuid, choice) self.parent.start_game_frame() def update_description(self): index = min(self.listbox.focus_position, len(self.choices)-1) choice", "36: _name = c.name[0].upper() + c.name[1:6] _left += [f\"{_name:<6} \",", "attr_map=None, focus_map = \"line\", align = \"center\", user_args = None,", "self.mind.master.players[self.mind.avatar.uuid] else: return None @property def connection(self): if self.mind.avatar.uuid in", "\"black\"), (\"fatigued\", \"dark red\", \"white\", \"standout\"), (\"reversed\", \"standout\", \"\"), (\"common\",\"white\",\"black\"),", "self.descriptions = {\"Warrior\": \"The mighty warrior\\n\\nStrength +1, Hit points +4\\nCharge", "mind) self.menu = MenuFrame(self, mind) super().__init__(parent, mind, urwid.Columns([(self.map_width, self.map), (self.menu_width,", "None) else: i = self.player.inventory.selection self.contents[\"header\"] = (urwid.Text([(i.color, f\"{i.name}\\n\"), f\"{i.description}\\n\"],", "{min_dmg:>3d}-{max_dmg:<3d}\\n\") _right.append(f\"Reduction {player.dmg_reduction:<3d}\\n\") _right.append(f\"Encumb \") if player.inventory.encumbrance == EXTRA_ENCUMBRANCE_MULTI*player.encumbrance: _right.append((\"red\",", "except IndexError: pass class SelectableColumns(urwid.Columns): def __init__(self, widget_list, focus_column=None, dividechars=0):", "for eqp in player.equipment_set: for b in set(list(eqp.bonus.keys()) + list(eqp.set_bonus.keys())):", "obj.is_equipped: _marker = [\"]\", (obj.color, f\"{obj.marker[0]}\"), \"[\"] elif obj.is_consumable: _marker", "handle_input(self, _input): if _input == \"ctrl f\": self.debug_view = not", "z = self.player.position w = max(0, y - self.parent.body_width//3) visible_map", "y, z = player.position _top = f\"{player.name:<12s} {player.game_class.name:<10s}\\nLev:{player.level:<2d} Exp:{player.exp:<4d} {player.location.name}@({x},{y})\\n\"", "= weapon.dmg min_dmg, max_dmg = (number * 1, number *", "IntroFrame(self, mind)} self.active_body = self.bodies[\"Intro\"] super().__init__(parent, mind, self.active_body) def on_update(self):", "widget_list, dividechars=0): self.widget_size = len(widget_list) super(FrameColumns, self).__init__(widget_list, dividechars) self.parent =", "@property def menu_view(self): return self._menu_view @menu_view.setter def menu_view(self, value): self._menu_view", "self.layer_view > 2: self.layer_view = -1 elif _input in self.mind.key_map:", "\"positive\", \"negative\"][-int(c.temp_bonus < 0) + int(c.temp_bonus > 0)] if self.parent.parent.menu_width", "self.update_footer() class StatusFrame(UiFrame): def __init__(self, parent, mind): box = urwid.ListBox(urwid.SimpleListWalker([urwid.Text(\"\")]))", "= self.player _equipment = [] for t, obj in player.equipment.items():", "[\"Map commands\\n\\n\", f\"←→↑↓:move\\n\", f\"shift+←→↑↓:dash\\n\", f\"a:attack\\n\", f\"q:pickup\\n\"] class_action_keys = [k for", "48 FOOTER_HEIGHT = 4 PALETTE = [ (\"line\", 'black', 'white',", "in _frames} idx = -1 _title = _frames[idx] self.active_body =", "__init__(self, parent, mind): self.bodies = {\"Intro\" : IntroFrame(self, mind)} self.active_body", "focus_column=None, dividechars=0): super().__init__(widget_list, dividechars, focus_column) def focus_next(self): try: self.focus_position +=", "0 @property def map_width(self): if self.menu_view: return self.mind.screen_size[0] - self.menu_width", "\" elif i == 10: _num = \"\\n - \"", "@property def body_height(self): return self.mind.screen_size[1] - self.header_height - FOOTER_HEIGHT -", "self.map.handle_input(_input) def select_item(self, _input): if _input.isnumeric() and int(_input) > 0:", "self.parent.bodies][self.focus_position] self.parent.update_body(new_body) except: pass class ButtonLabel(urwid.SelectableIcon): def set_text(self, label): '''", "header=header, focus_part=\"header\") def select_class(self, button): index = min(self.listbox.focus_position, len(self.choices)-1) choice", "urwid.connect_signal(self, 'click', on_press, user_data) self.set_label(label) self.lllavel = label # @property", "def on_update(self): player = self.player x, y, z = player.position", "encoding: utf-8 import urwid import time, os, copy from rpg_game.utils", "if borders: cols = urwid.Columns([ ('fixed', len(self.button_left), urwid.Text(self.button_left)), self._label, ('fixed',", "i == 10: _num = \"\\n - \" elif i", "+= [(\"green\", f\"{'Enter:unequip':<14s}\")] elif i.is_consumable: _text += [(\"green\", f\"{'Enter:use':<14s}\")] _text", "== self.mind.key_map[\"inventory-menu\"] and self.menu_view: self.menu.update_body(\"Inventory\") else: self.map.handle_input(_input) def select_item(self, _input):", "= 1) super().__init__(parent, mind, urwid.ListBox(urwid.SimpleListWalker([columns]))) class SelectableListBox(urwid.ListBox): def __init__(self, body):", "_text += [(\"green\", f\"{'Enter:use':<14s}\")] _text += [(\"yellow\", \"Q:drop\")] self.contents[\"footer\"] =", "blue\", \"black\"), (\"monster\", \"dark red\", \"black\"), (\"fatigued\", \"dark red\", \"white\",", "= getattr(player, s) state = [\"normal\", \"positive\", \"negative\"][-int(c.temp_bonus < 0)", "we leave Button behaviour unchanged ''' button_left = \"[\" button_right", "super().__init__(widget_list, dividechars, focus_column) def focus_next(self): try: self.focus_position += 1 except:", "\"standout\"), (\"name\",\"white\",\"black\"), ] class UiFrame(urwid.Frame): def __init__(self, parent, mind, *args,", "_bonus[b] += val _top = \"\" for b, val in", "not obj.is_equipped: _marker = [\"]\", (obj.color, f\"{obj.marker[0]}\"), \"[\"] elif obj.is_consumable:", "walker = urwid.SimpleFocusListWalker(line) urwid.connect_signal(walker, \"modified\", self.update_description) self.listbox = SelectableListBox(walker) header", "+= [f\"{s:<3} \", (state, f\"{c.value:>2d}\"), f\" ({c.mod:<+2d})\\n\"] _right = []", "9: _num = \"\\n 0 \" elif i == 10:", "enumerate(self.player.class_actions): k = class_action_keys[i] map_commands.append(f\"{k}:{self.player.class_actions[act].description.lower()}\\n\") menu_commands = [\"Menu commands\\n\\n\", f\"tab:open/close\\n\",f\"0/9-=:select", "parent, mind, *args, **kargs): self.parent = parent self.mind = mind", "== self.mind.key_map[\"help-menu\"] and self.menu_view: self.menu.update_body(\"Help\") elif _input == self.mind.key_map[\"equipment-menu\"] and", "= self.choices[index] self.contents[\"body\"] = (urwid.ListBox(urwid.SimpleListWalker([urwid.Text(self.descriptions[choice])])), None) class GameFrame(UiFrame): def __init__(self,", "urwid.Columns([urwid.Text(map_commands, wrap=\"clip\"), urwid.Text(menu_commands, wrap=\"clip\")], dividechars = 1) super().__init__(parent, mind, urwid.ListBox(urwid.SimpleListWalker([columns])))", "disabled(self, value): # if self._disabled == value: # return #", "self.set_label(label) self.lllavel = label # @property # def disabled(self): #", "def update_header(self): if not self.player.inventory.selection: self.contents[\"header\"] = (self.default_header, None) else:", "update_body(self, title, no_title=False, boxed=False): self.active_body = self.bodies[title] if boxed: if", "_num)] else: _marker += [(\"top\", _num)] btn = urwid.Text(_marker, align=\"center\")", "urwid.LineBox(urwid.BoxAdapter(self.listbox, len(self.choices)+1)) super().__init__(parent, mind, urwid.ListBox(urwid.SimpleListWalker([urwid.Text(self.descriptions[\"Warrior\"])])), header=header, focus_part=\"header\") def select_class(self, button):", "index = min(self.listbox.focus_position, len(self.choices)-1) choice = self.choices[index] self.contents[\"body\"] = (urwid.ListBox(urwid.SimpleListWalker([urwid.Text(self.descriptions[choice])])),", "self.bodies = {b : globals()[f\"{b}Frame\"](self, mind) for b in _frames}", "(\"Inventory\", \"Status\", \"Equipment\", \"Help\") self.bodies = {b : globals()[f\"{b}Frame\"](self, mind)", "\"dark red\", \"white\", \"standout\"), (\"reversed\", \"standout\", \"\"), (\"common\",\"white\",\"black\"), (\"common_line\",\"black\",\"white\",\"standout\"), (\"uncommon\",\"dark", "\"Status\", \"Equipment\", \"Help\") self.bodies = {b : globals()[f\"{b}Frame\"](self, mind) for", "mind)} self.active_body = self.bodies[\"Intro\"] super().__init__(parent, mind, self.active_body) def on_update(self): self.active_body.on_update()", "# encoding: utf-8 import urwid import time, os, copy from", "instead of urwid.SelectableIcon - make button_left and button_right plain strings", "-1 _title = _frames[idx] self.active_body = self.bodies[_title] super().__init__(parent, mind, urwid.LineBox(self.active_body,", "+= self.widget_size new_body = [b for b in self.parent.bodies][self.focus_position] self.parent.update_body(new_body)", "header_list(self): return sorted([ent for k, ent in self.player.location.entities.items() if distance(self.player.position,", "UiFrame(urwid.Frame): def __init__(self, parent, mind, *args, **kargs): self.parent = parent", "self._menu_view @menu_view.setter def menu_view(self, value): self._menu_view = value _columns =", "urwid.Text(_marker, align=\"center\") inv_btns.append((5, urwid.LineBox(btn))) if self.mind.screen_size != (80, 24): inv_btns.append(urwid.Text(\"\\nSET", "+= [f\"{_name:<12} \", (state, f\"{c.value:>2d}\"), f\" ({c.mod:<+2d})\\n\"] elif self.parent.parent.menu_width >", "_left += [f\"{_name:<6} \", (state, f\"{c.value:>2d}\"), f\" ({c.mod:<+2d})\\n\"] else: _left", "- make button_left and button_right plain strings and variable width", "CHARACTERISTICS: c = getattr(player, s) state = [\"normal\", \"positive\", \"negative\"][-int(c.temp_bonus", "[f\" \"] if i < 9: _num = f\"\\n {i+1}", "def handle_input(self, _input): if _input == \"ctrl f\": self.debug_view =", "except: pass def focus_previous(self): try: self.focus_position -= 1 except: pass", "item\\n\", f\"ctrl+p:respawn\\n\", f\"ctrl+a:inventory\\n\", f\"ctrl+s:status\\n\", f\"ctrl+d:help\\n\", f\"ctrl+e:equipment\\n\"] columns = urwid.Columns([urwid.Text(map_commands, wrap=\"clip\"),", "b not in _bonus: _bonus[b] = val else: _bonus[b] +=", "utf-8 import urwid import time, os, copy from rpg_game.utils import", "return self.mind.screen_size[1] - self.header_height - FOOTER_HEIGHT - 2 @property def", "f\"{'Enter:use':<14s}\")] _text += [(\"yellow\", \"Q:drop\")] self.contents[\"footer\"] = (urwid.Text(_text, align=\"center\"), None)", "+= \"\\n\" self.box[:] = [urwid.Text(_top)] + _equipment class HelpFrame(UiFrame): def", "(\"disabled\",\"dark red\",\"black\"), (\"red\",\"dark red\",\"black\"), (\"green\",\"light green\",\"black\"), (\"yellow\",\"yellow\",\"black\"), (\"brown\",\"brown\",\"black\"), (\"white_line\",\"black\",\"white\", \"standout\"),", "return urwid.Text(_text) def update_header(self): if not self.player.inventory.selection: self.contents[\"header\"] = (self.default_header,", "not self.player.inventory.selection: self.contents[\"header\"] = (self.default_header, None) else: i = self.player.inventory.selection", "== \"=\": _input = 11 self.player.inventory.selection = self.player.inventory.get(_input) def update_header(self):", "min_dmg, max_dmg = (number * 1, number * value) min_dmg", "_right.append((\"yellow\", f\"{player.inventory.encumbrance:>2d}\")) else: _right.append((\"white\", f\"{player.inventory.encumbrance:>2d}\")) _right.append(f\"/{player.encumbrance:<2d}\\n\") _right.append(f\"Speed {player.movement_speed}\\n\") _right.append(f\"Monsterized {player.MP:<2d}\\n\")", "start_game_frame(self): self.bodies[\"Game\"] = GameFrame(self, self.mind) self.update_body(\"Game\", no_title=True) class IntroFrame(UiFrame): def", "self.menu_view elif _input == \"enter\" and self.player.inventory.selection: self.player.use_quick_item(self.player.inventory.selection) self.update_footer() elif", "\" elif i == 11: _num = \"\\n = \"", "[(\"line\", _num)] else: _marker += [(\"top\", _num)] btn = urwid.Text(_marker,", "return sorted([ent for k, ent in self.player.location.entities.items() if distance(self.player.position, ent.position)", "pass def focus_previous(self): try: self.focus_position -= 1 except IndexError: pass", "urwid.Text(_text) def update_header(self): if not self.player.inventory.selection: self.contents[\"header\"] = (self.default_header, None)", "= SelectableListBox(walker) header = urwid.LineBox(urwid.BoxAdapter(self.listbox, len(self.choices)+1)) super().__init__(parent, mind, urwid.ListBox(urwid.SimpleListWalker([urwid.Text(self.descriptions[\"Warrior\"])])), header=header,", "1 if self.focus_position < 0: self.focus_position += self.widget_size new_body =", "and self.player.inventory.selection: self.player.use_quick_item(self.player.inventory.selection) self.update_footer() elif _input == \"Q\" and self.player.inventory.selection:", "= (i.color, m) self.box[:] = [urwid.Columns([(width+2, urwid.Text(_marker_box)), self.selection_data], dividechars=1)] def", "in range(height): _marker_box += [\"║\"] for y in range(width): _marker_box", "{b : globals()[f\"{b}Frame\"](self, mind) for b in _frames} idx =", "f\"tab:open/close\\n\",f\"0/9-=:select item\\n\", f\"ctrl+p:respawn\\n\", f\"ctrl+a:inventory\\n\", f\"ctrl+s:status\\n\", f\"ctrl+d:help\\n\", f\"ctrl+e:equipment\\n\"] columns = urwid.Columns([urwid.Text(map_commands,", "value = weapon.dmg min_dmg, max_dmg = (number * 1, number", "self.mind.avatar.uuid in self.mind.connections: return self.mind.connections[self.mind.avatar.uuid] else: return None def handle_input(self,", "1 if obj.is_equipment and obj.is_equipped: _marker = [\"[\", (obj.color, f\"{obj.marker[0]}\"),", "+= [urwid.Text([f\"{_name}: \"])] _bonus = {} for eqp in player.equipment_set:", "-1 self.debug_view = False super().__init__(parent, mind, map_box) self.on_update() @property def", "map_box) self.on_update() @property def visible_range(self): header_height = self.parent.header_height + 2", "warrior\\n\\nStrength +1, Hit points +4\\nCharge and parry\", \"Dwarf\": \"The short", "@property def visible_range(self): header_height = self.parent.header_height + 2 tot_rows =", "user_args, borders=borders, disabled=disabled) return urwid.AttrMap(btn, attr_map, focus_map=focus_map) def create_button(label, cmd=None,", "v\": self.layer_view = self.layer_view + 1 if self.layer_view > 2:", "= self.player.location.layer_from_entities(self.layer_view, self.debug_view) x, y, z = self.player.position w =", "focus_next(self): try: self.focus_position += 1 except IndexError: pass def focus_previous(self):", "_text += [(\"green\", f\"{'Enter:equip':<14s}\")] elif i.is_equipped: _text += [(\"green\", f\"{'Enter:unequip':<14s}\")]", "else: _top += f\"{b}:{val} \" _top += \"\\n\" self.box[:] =", "self).__init__(cols) self.disabled = disabled if on_press: urwid.connect_signal(self, 'click', on_press, user_data)", "# urwid.AttrMap(self, None, \"line\") def selectable(self): return not self.disabled def", "self.update_footer() self.map.on_update() if self.menu_view: self.menu.on_update() def handle_input(self, _input): if _input", "= [] if not i.requisites(self.player): _text += [(\"red\", f\"{'Cannot equip':<14s}\")]", "_top = \"\" for b, val in _bonus.items(): if b", "number, value = weapon.dmg min_dmg, max_dmg = (number * 1,", "act in self.mind.key_map.items() if act.startswith(\"class_ability\")] for i, act in enumerate(self.player.class_actions):", "_top += f\"{b}:{val} \" _top += \"\\n\" self.box[:] = [urwid.Text(_top)]", "3 MAX_MENU_WIDTH = 48 FOOTER_HEIGHT = 4 PALETTE = [", "user_args = None, borders=True, disabled=False): btn = create_button(label, cmd=cmd, align", "from urwid import raw_display SIZE = lambda scr=raw_display.Screen(): scr.get_cols_rows() MIN_HEADER_HEIGHT", "-1 elif _input in self.mind.key_map: _action = self.mind.key_map[_input] self.player.handle_input(_action) class", "(80, 24): inv_btns.append(urwid.Text(\"\\nSET TERMINAL\\nTO 80X24\", align=\"center\")) self.contents[\"footer\"] = (SelectableColumns(inv_btns, dividechars=0),", "= [\"]\", (obj.color, f\"{obj.marker[0]}\"), \"[\"] elif obj.is_consumable: _marker = [\"(\",", "restart(self): self.update_body(\"Intro\", no_title=True) def start_game_frame(self): self.bodies[\"Game\"] = GameFrame(self, self.mind) self.update_body(\"Game\",", "= max(0, x - self.parent.body_height//2) if h+self.parent.body_height >= len(visible_map): visible_map", "len(self.choices)-1) choice = self.choices[index] self.mind.master.new_player(self.mind.avatar.uuid, choice) self.parent.start_game_frame() def update_description(self): index", "mighty warrior\\n\\nStrength +1, Hit points +4\\nCharge and parry\", \"Dwarf\": \"The", "(urwid.Columns(_columns, focus_column=1), None) @property def header_list(self): return sorted([ent for k,", "4) else: number, value = weapon.dmg min_dmg, max_dmg = (number", "[urwid.AttrMap(urwid.AttrMap(urwid.Text(line, wrap=\"clip\"), {self.player.id:\"player\"}), {p.id:\"other\" for i, p in self.mind.master.players.items()}) for", "= urwid.Text(_marker, align=\"center\") inv_btns.append((5, urwid.LineBox(btn))) if self.mind.screen_size != (80, 24):", "_input): if _input == \"tab\": self.menu_view = not self.menu_view elif", "({c.mod:<+2d})\\n\"] _right = [] base = player.STR.mod weapon = player.equipment[\"main_hand\"]", "= urwid.Columns([ ('fixed', len(self.button_left), urwid.Text(self.button_left)), self._label, ('fixed', len(self.button_right), urwid.Text(self.button_right))], dividechars=1)", "dividechars = 1) super().__init__(parent, mind, urwid.ListBox(urwid.SimpleListWalker([columns]))) class SelectableListBox(urwid.ListBox): def __init__(self,", "(\"green_line\",\"light green\",\"white\", \"standout\"), (\"yellow_line\",\"yellow\",\"white\", \"standout\"), (\"cyan\",\"light cyan\",\"black\"), (\"cyan_line\",\"light cyan\",\"white\", \"standout\"),", "__init__(self, parent, mind): self.mind = mind _header = urwid.LineBox(urwid.BoxAdapter(SelectableListBox(urwid.SimpleFocusListWalker([urwid.Text(\"\")])), self.header_height))", "def __init__(self, body): super(SelectableListBox, self).__init__(body) def focus_next(self): try: self.focus_position +=", "unchanged ''' button_left = \"[\" button_right = \"]\" def __init__(self,", "title=_title)) def on_update(self): self.active_body.on_update() def selectable(self): return False def update_body(self,", "+= val _top = \"\" for b, val in _bonus.items():", "class StatusFrame(UiFrame): def __init__(self, parent, mind): box = urwid.ListBox(urwid.SimpleListWalker([urwid.Text(\"\")])) self.box", "if self.disabled: # urwid.AttrMap(self, \"disabled\") # else: # urwid.AttrMap(self, None,", "parry\", \"Dwarf\": \"The short dwarf\\n\\nStrength +1, Constitution +1, Hit points", "max_dmg = max(1, base + max_dmg) _right.append(f\"Damage {min_dmg:>3d}-{max_dmg:<3d}\\n\") _right.append(f\"Reduction {player.dmg_reduction:<3d}\\n\")", "sorted([ent for k, ent in self.player.location.entities.items() if distance(self.player.position, ent.position) <=", "= 3 MAX_MENU_WIDTH = 48 FOOTER_HEIGHT = 4 PALETTE =", "align=\"center\") self.default_footer = urwid.Text([(\"green\", f\"{'Enter:use/eqp':<14s}\"), (\"yellow\", \"Q:drop\")], align=\"center\") super().__init__(parent, mind,", "self.header_widget = self.header.original_widget.box_widget self.footer_content_size = 0 @property def header_height(self): return", "\"white\", \"standout\"), (\"reversed\", \"standout\", \"\"), (\"common\",\"white\",\"black\"), (\"common_line\",\"black\",\"white\",\"standout\"), (\"uncommon\",\"dark cyan\",\"black\"), (\"uncommon_line\",\"dark", "(\"set\",\"light green\",\"black\"), (\"set_line\",\"light green\",\"white\",\"standout\"), (\"normal\",\"white\",\"black\"), (\"positive\",\"light green\",\"black\"), (\"negative\",\"dark red\",\"black\"), (\"white\",\"white\",\"black\"),", "return (tot_rows - header_height - FOOTER_HEIGHT) def on_update(self): if self.layer_view", "= [b for b in self.parent.bodies][self.focus_position] self.parent.update_body(new_body) except: pass class", "green\", \"black\"), (\"other\", \"light blue\", \"black\"), (\"monster\", \"dark red\", \"black\"),", "if player.inventory.encumbrance == EXTRA_ENCUMBRANCE_MULTI*player.encumbrance: _right.append((\"red\", f\"{player.inventory.encumbrance:>2d}\")) elif player.inventory.encumbrance > player.encumbrance:", "elif player.inventory.encumbrance > player.encumbrance: _right.append((\"yellow\", f\"{player.inventory.encumbrance:>2d}\")) else: _right.append((\"white\", f\"{player.inventory.encumbrance:>2d}\")) _right.append(f\"/{player.encumbrance:<2d}\\n\")", "try: self.focus_position += 1 except IndexError: pass def focus_previous(self): try:", "= max(1, base + max_dmg) _right.append(f\"Damage {min_dmg:>3d}-{max_dmg:<3d}\\n\") _right.append(f\"Reduction {player.dmg_reduction:<3d}\\n\") _right.append(f\"Encumb", "\"modified\", self.update_description) self.listbox = SelectableListBox(walker) header = urwid.LineBox(urwid.BoxAdapter(self.listbox, len(self.choices)+1)) super().__init__(parent,", "pass def focus_previous(self): try: self.focus_position -= 1 if self.focus_position <", "self.lllavel = label # @property # def disabled(self): # return", "mind) super().__init__(parent, mind, urwid.Columns([(self.map_width, self.map), (self.menu_width, self.menu)], focus_column=1), header=_header, footer=None,", "box.body super().__init__(parent, mind, box) def on_update(self): player = self.player x,", "\"=\"): self.select_item(_input) self.update_footer() elif _input == self.mind.key_map[\"status-menu\"] and self.menu_view: self.menu.update_body(\"Status\")", "b in self.parent.bodies][self.focus_position] self.parent.update_body(new_body) except: pass def focus_previous(self): try: self.focus_position", "len(self.player.inventory.all): self.update_footer() if self.mind.screen_size != (80, 24): self.update_footer() self.map.on_update() if", "_marker = [f\" \"] if i < 9: _num =", "dividechars=0), None) self.footer_content_size = _size def on_update(self): self.update_header() if self.footer_content_size", "self.mind.screen_size[0] - self.menu_width return self.mind.screen_size[0] @property def body_width(self): return self.mind.screen_size[0]", "self.debug_view = False super().__init__(parent, mind, map_box) self.on_update() @property def visible_range(self):", "-= 1 except IndexError: pass class SelectableColumns(urwid.Columns): def __init__(self, widget_list,", "on_update(self): self.update_header() if self.footer_content_size != len(self.player.inventory.all): self.update_footer() if self.mind.screen_size !=", "import * from urwid import raw_display SIZE = lambda scr=raw_display.Screen():", "(obj.color, f\"{obj.marker[0]}\"), \"[\"] elif obj.is_consumable: _marker = [\"(\", (obj.color, f\"{obj.marker[0]}\"),", "parent, mind): map_box = urwid.ListBox(urwid.SimpleListWalker([urwid.Text(\"\")])) self.map_box = map_box.body self.layer_view =", "and summon\"} line = [] for c in self.choices: btn", "visible_map = visible_map[len(visible_map)-self.parent.body_height:] else: visible_map = visible_map[h:h+self.parent.body_height] map_with_attr = [urwid.AttrMap(urwid.AttrMap(urwid.Text(line,", "class FrameColumns(urwid.Columns): def __init__(self, parent, widget_list, dividechars=0): self.widget_size = len(widget_list)", "mind urwid.AttrMap(self,\"frame\") super().__init__(*args, **kargs) @property def player(self): if self.mind.avatar.uuid in", "= 48 FOOTER_HEIGHT = 4 PALETTE = [ (\"line\", 'black',", "self.contents[\"body\"] = (urwid.LineBox(self.active_body, title=title), None) else: self.contents[\"body\"] = (self.active_body, None)", "= self.choices[index] self.mind.master.new_player(self.mind.avatar.uuid, choice) self.parent.start_game_frame() def update_description(self): index = min(self.listbox.focus_position,", "no_title=False, boxed=False): self.active_body = self.bodies[title] if boxed: if no_title: self.contents[\"body\"]", "*args, **kargs): self.parent = parent self.mind = mind urwid.AttrMap(self,\"frame\") super().__init__(*args,", "line in visible_map] self.map_box[:] = map_with_attr def handle_input(self, _input): if", "Dexterity +1, Intelligence +1, Hit points +2\\nSing and summon\"} line", "for x in range(height): _marker_box += [\"║\"] for y in", "and displayed - otherwise, we leave Button behaviour unchanged '''", "borders=True, disabled=False): self._label = ButtonLabel(\"\") if borders: cols = urwid.Columns([", "disabled=False): self._label = ButtonLabel(\"\") if borders: cols = urwid.Columns([ ('fixed',", "= disabled if on_press: urwid.connect_signal(self, 'click', on_press, user_data) self.set_label(label) self.lllavel", "_left += [f\"{s:<3} \", (state, f\"{c.value:>2d}\"), f\" ({c.mod:<+2d})\\n\"] _right =", "self.mind.master.players.items()})) if widgets: self.header_widget.body[:] = widgets class MapFrame(UiFrame): def __init__(self,", ": globals()[f\"{b}Frame\"](self, mind) for b in _frames} idx = -1", "def body_width(self): return self.mind.screen_size[0] @property def body_height(self): return self.mind.screen_size[1] -", "summon\"} line = [] for c in self.choices: btn =", "urwid.LineBox(self.active_body, title=_title)) def on_update(self): self.active_body.on_update() def selectable(self): return False def", "except: pass def focus_previous(self): try: self.focus_position -= 1 if self.focus_position", "== self.mind.key_map[\"status-menu\"] and self.menu_view: self.menu.update_body(\"Status\") elif _input == self.mind.key_map[\"help-menu\"] and", "mind): self.bodies = {\"Intro\" : IntroFrame(self, mind)} self.active_body = self.bodies[\"Intro\"]", "= self.mind.key_map[_input] self.player.handle_input(_action) class MenuFrame(UiFrame): def __init__(self, parent, mind): _frames", "= [] for s in CHARACTERISTICS: c = getattr(player, s)", "= MapFrame(self, mind) self.menu = MenuFrame(self, mind) super().__init__(parent, mind, urwid.Columns([(self.map_width,", "\"\\n\" self.box[:] = [urwid.Text(_top)] + _equipment class HelpFrame(UiFrame): def __init__(self,", "_input == \"ctrl f\": self.debug_view = not self.debug_view elif _input", "self.player.actions[\"drop\"].use(self.player, obj=self.player.inventory.selection) self.update_footer() elif _input.isnumeric() or _input in (\"-\", \"=\"):", "f\"{'Enter:use/eqp':<14s}\"), (\"yellow\", \"Q:drop\")], align=\"center\") super().__init__(parent, mind, box, header=self.default_header, footer=self.default_footer) @property", "select_class(self, button): index = min(self.listbox.focus_position, len(self.choices)-1) choice = self.choices[index] self.mind.master.new_player(self.mind.avatar.uuid,", "elif _input == \"enter\" and self.player.inventory.selection: self.player.use_quick_item(self.player.inventory.selection) self.update_footer() elif _input", "_input == \"-\": _input = 10 elif _input == \"=\":", "widgets class MapFrame(UiFrame): def __init__(self, parent, mind): map_box = urwid.ListBox(urwid.SimpleListWalker([urwid.Text(\"\")]))", "(\"uncommon\",\"dark cyan\",\"black\"), (\"uncommon_line\",\"dark cyan\",\"white\",\"standout\"), (\"rare\",\"yellow\",\"black\"), (\"rare_line\",\"yellow\",\"white\",\"standout\"), (\"unique\",\"light magenta\",\"black\"), (\"unique_line\",\"light magenta\",\"white\",\"standout\"),", "h = max(0, x - self.parent.body_height//2) if h+self.parent.body_height >= len(visible_map):", "self._disabled == value: # return # if self.disabled: # urwid.AttrMap(self,", "def disabled(self, value): # if self._disabled == value: # return", "\" _top += \"\\n\" self.box[:] = [urwid.Text(_top)] + _equipment class", "elif _input == self.mind.key_map[\"inventory-menu\"] and self.menu_view: self.menu.update_body(\"Inventory\") else: self.map.handle_input(_input) def", "in self.parent.bodies][self.focus_position] self.parent.update_body(new_body) except: pass def focus_previous(self): try: self.focus_position -=", "pass class ButtonLabel(urwid.SelectableIcon): def set_text(self, label): ''' set_text is invoked", "f\"{player.inventory.encumbrance:>2d}\")) _right.append(f\"/{player.encumbrance:<2d}\\n\") _right.append(f\"Speed {player.movement_speed}\\n\") _right.append(f\"Monsterized {player.MP:<2d}\\n\") self.box[:] = [urwid.Text(_top), urwid.Columns([urwid.Text(_left),", "== 11: _num = \"\\n = \" if obj and", "f\"ctrl+s:status\\n\", f\"ctrl+d:help\\n\", f\"ctrl+e:equipment\\n\"] columns = urwid.Columns([urwid.Text(map_commands, wrap=\"clip\"), urwid.Text(menu_commands, wrap=\"clip\")], dividechars", "in self.mind.connections: return self.mind.connections[self.mind.avatar.uuid] else: return None def handle_input(self, _input):", "+6\\nDemolish and parry\", \"Wizard\": \"The opportune wizard\\n\\nIntelligence +1\\n Fireball, teleport", "title=title), None) else: self.contents[\"body\"] = (self.active_body, None) class GUI(UiFrame): def", "- \" elif i == 11: _num = \"\\n =", "i, act in enumerate(self.player.class_actions): k = class_action_keys[i] map_commands.append(f\"{k}:{self.player.class_actions[act].description.lower()}\\n\") menu_commands =", "getattr(player, s) state = [\"normal\", \"positive\", \"negative\"][-int(c.temp_bonus < 0) +", "9 elif _input == \"-\": _input = 10 elif _input", "if self.footer_content_size != len(self.player.inventory.all): self.update_footer() if self.mind.screen_size != (80, 24):", "urwid.Text(\"0/9-= to select\\n\\n\", align=\"center\") self.default_footer = urwid.Text([(\"green\", f\"{'Enter:use/eqp':<14s}\"), (\"yellow\", \"Q:drop\")],", "for b in set(list(eqp.bonus.keys()) + list(eqp.set_bonus.keys())): val = player.full_eqp_bonus(eqp, b)", "self.mind.key_map[\"equipment-menu\"] and self.menu_view: self.menu.update_body(\"Equipment\") elif _input == self.mind.key_map[\"inventory-menu\"] and self.menu_view:", "for c in self.choices: btn = attr_button(c, self.select_class) line.append(btn) walker", "if no_title: self.contents[\"body\"] = (urwid.LineBox(self.active_body), None) else: self.contents[\"body\"] = (urwid.LineBox(self.active_body,", "= mind urwid.AttrMap(self,\"frame\") super().__init__(*args, **kargs) @property def player(self): if self.mind.avatar.uuid", "obj in self.player.inventory.content.items(): if obj: _size += 1 if obj.is_equipment", "button_right plain strings and variable width - any string, including", "in self.player.location.entities.items() if distance(self.player.position, ent.position) <= 3 and ent.status], key=lambda", "None @property def connection(self): if self.mind.avatar.uuid in self.mind.connections: return self.mind.connections[self.mind.avatar.uuid]", "= self.bodies[_title] super().__init__(parent, mind, urwid.LineBox(self.active_body, title=_title)) def on_update(self): self.active_body.on_update() def", "\"]\"] elif obj.is_equipment and not obj.is_equipped: _marker = [\"]\", (obj.color,", "self.update_footer() elif _input.isnumeric() or _input in (\"-\", \"=\"): self.select_item(_input) self.update_footer()", "urwid.Columns([ ('fixed', len(self.button_left), urwid.Text(self.button_left)), self._label, ('fixed', len(self.button_right), urwid.Text(self.button_right))], dividechars=1) else:", "button_right = \"]\" def __init__(self, label, on_press=None, user_data=None, borders=True, disabled=False):", "_input): # print(\"HANDLING\", _input) self.active_body.handle_input(_input) # def exit(self): # self.disconnect()", "[] for s in CHARACTERISTICS: c = getattr(player, s) state", "return None @property def connection(self): if self.mind.avatar.uuid in self.mind.connections: return", "''' - override __init__ to use our ButtonLabel instead of", "urwid.Text(\"║\") width = 8 height = 6 _marker_box = [\"╔\"", "f\"{c.value:>2d}\"), f\" ({c.mod:<+2d})\\n\"] elif self.parent.parent.menu_width > 36: _name = c.name[0].upper()", "cmd, user_args = user_args) else: urwid.connect_signal(btn, \"click\", cmd) return btn", "_right.append(f\"/{player.encumbrance:<2d}\\n\") _right.append(f\"Speed {player.movement_speed}\\n\") _right.append(f\"Monsterized {player.MP:<2d}\\n\") self.box[:] = [urwid.Text(_top), urwid.Columns([urwid.Text(_left), urwid.Text(_right)],", "def focus_previous(self): pass def update_body(self, title, no_title=False, boxed=False): self.active_body =", "= max(1, base + min_dmg) max_dmg = max(1, base +", "self.active_body) def on_update(self): self.active_body.on_update() def handle_input(self, _input): # print(\"HANDLING\", _input)", "val in _bonus.items(): if b == \"dmg_reduction\": _top += f\"Reduction:{val}", "_bonus[b] = val else: _bonus[b] += val _top = \"\"", "return MIN_HEADER_HEIGHT#max(MIN_HEADER_HEIGHT, self.mind.screen_size[1]//8) @property def menu_width(self): if self.menu_view: return min(MAX_MENU_WIDTH,", "= visible_map[len(visible_map)-self.parent.body_height:] else: visible_map = visible_map[h:h+self.parent.body_height] map_with_attr = [urwid.AttrMap(urwid.AttrMap(urwid.Text(line, wrap=\"clip\"),", "no_title=True) class IntroFrame(UiFrame): def __init__(self, parent, mind): # urwid.Padding(urwid.BigText(('top', \"Hack\\'n\\'SSH\"),", "!= len(self.player.inventory.all): self.update_footer() if self.mind.screen_size != (80, 24): self.update_footer() self.map.on_update()", "= [\"╔\" +\"═\"*width+\"╗\\n\"] for x in range(height): _marker_box += [\"║\"]", "def handle_input(self, _input): pass def on_update(self): pass def dispatch_event(self, event_type,", "def update_body(self): side = urwid.Text(\"║\") width = 8 height =", "super().__init__(parent, mind, map_box) self.on_update() @property def visible_range(self): header_height = self.parent.header_height", "\"standout\"), (\"red_line\",\"dark red\",\"white\", \"standout\"), (\"green_line\",\"light green\",\"white\", \"standout\"), (\"yellow_line\",\"yellow\",\"white\", \"standout\"), (\"cyan\",\"light", "'white', \"standout\"), (\"top\",\"white\",\"black\"), (\"frame\",\"white\",\"white\"), (\"player\", \"light green\", \"black\"), (\"other\", \"light", "else: return None @property def connection(self): if self.mind.avatar.uuid in self.mind.connections:", "[f\"{_name:<12} \", (state, f\"{c.value:>2d}\"), f\" ({c.mod:<+2d})\\n\"] elif self.parent.parent.menu_width > 36:", "class EquipmentFrame(UiFrame): def __init__(self, parent, mind): box = urwid.ListBox(urwid.SimpleListWalker([urwid.Text(\"\")])) self.box", "len(widget_list) super(FrameColumns, self).__init__(widget_list, dividechars) self.parent = parent def focus_next(self): try:", "MapFrame(UiFrame): def __init__(self, parent, mind): map_box = urwid.ListBox(urwid.SimpleListWalker([urwid.Text(\"\")])) self.map_box =", "'click', on_press, user_data) self.set_label(label) self.lllavel = label # @property #", "def update_footer(self): _size = 0 inv_btns = [] for i,", "= True self.update_footer() self.header_widget = self.header.original_widget.box_widget self.footer_content_size = 0 @property", "Hit points +2\\nSing and summon\"} line = [] for c", "== 9: _num = \"\\n 0 \" elif i ==", "= [\"Menu commands\\n\\n\", f\"tab:open/close\\n\",f\"0/9-=:select item\\n\", f\"ctrl+p:respawn\\n\", f\"ctrl+a:inventory\\n\", f\"ctrl+s:status\\n\", f\"ctrl+d:help\\n\", f\"ctrl+e:equipment\\n\"]", "== \"0\": s_input = 9 elif _input == \"-\": _input", "# print(\"HANDLING\", _input) self.active_body.handle_input(_input) # def exit(self): # self.disconnect() #", "if self.menu_view: return min(MAX_MENU_WIDTH, (3*self.mind.screen_size[0])//7) return 0 @property def map_width(self):", "('fixed', len(self.button_left), urwid.Text(self.button_left)), self._label, ('fixed', len(self.button_right), urwid.Text(self.button_right))], dividechars=1) else: cols", "(urwid.LineBox(self.active_body, title=_title), None) class InventoryFrame(UiFrame): def __init__(self, parent, mind): columns", "(\"monster\", \"dark red\", \"black\"), (\"fatigued\", \"dark red\", \"white\", \"standout\"), (\"reversed\",", "list(eqp.set_bonus.keys())): val = player.full_eqp_bonus(eqp, b) if b not in _bonus:", "+= 1 except IndexError: pass def focus_previous(self): try: self.focus_position -=", "header=self.default_header, footer=self.default_footer) @property def selection_data(self): if not self.player.inventory.selection: return urwid.Text(\"\")", "= self.player.position w = max(0, y - self.parent.body_width//3) visible_map =", "\"Q\" and self.player.inventory.selection: self.player.actions[\"drop\"].use(self.player, obj=self.player.inventory.selection) self.update_footer() elif _input.isnumeric() or _input", "self.contents[\"body\"] = (urwid.LineBox(self.active_body), None) else: self.contents[\"body\"] = (urwid.LineBox(self.active_body, title=title), None)", "[urwid.Text([f\"{_name}: \", (obj.color, f\"{obj.name}\")])] else: _equipment += [urwid.Text([f\"{_name}: \"])] _bonus", "i.is_consumable: _text += [(\"green\", f\"{'Enter:use':<14s}\")] _text += [(\"yellow\", \"Q:drop\")] self.contents[\"footer\"]", "copy.deepcopy(self.player.location.map) else: _map = self.player.location.layer_from_entities(self.layer_view, self.debug_view) x, y, z =", "urwid.Columns([urwid.Text(_left), urwid.Text(_right)], dividechars = 1) ] class EquipmentFrame(UiFrame): def __init__(self,", "10: _num = \"\\n - \" elif i == 11:", "_marker_box[(x+X_OFFSET)*(width+2)+y+Y_OFFSET] = (i.color, m) self.box[:] = [urwid.Columns([(width+2, urwid.Text(_marker_box)), self.selection_data], dividechars=1)]", "-= 1 if self.focus_position < 0: self.focus_position += self.widget_size new_body", "if distance(self.player.position, ent.position) <= 3 and ent.status], key=lambda ent: distance(self.player.position,", "(\"white_line\",\"black\",\"white\", \"standout\"), (\"red_line\",\"dark red\",\"white\", \"standout\"), (\"green_line\",\"light green\",\"white\", \"standout\"), (\"yellow_line\",\"yellow\",\"white\", \"standout\"),", "in enumerate(self.player.class_actions): k = class_action_keys[i] map_commands.append(f\"{k}:{self.player.class_actions[act].description.lower()}\\n\") menu_commands = [\"Menu commands\\n\\n\",", "not i.is_equipped: _text += [(\"green\", f\"{'Enter:equip':<14s}\")] elif i.is_equipped: _text +=", "\" \") _name = _name[0].upper() + _name[1:] if obj: _equipment", "> player.encumbrance: _right.append((\"yellow\", f\"{player.inventory.encumbrance:>2d}\")) else: _right.append((\"white\", f\"{player.inventory.encumbrance:>2d}\")) _right.append(f\"/{player.encumbrance:<2d}\\n\") _right.append(f\"Speed {player.movement_speed}\\n\")", "= [\"(\", (obj.color, f\"{obj.marker[0]}\"), \")\"] else: _marker = [f\" {obj.marker[0]}", "= (urwid.Text(_text, align=\"center\"), None) def update_body(self): side = urwid.Text(\"║\") width", "urwid.AttrMap(btn, attr_map, focus_map=focus_map) def create_button(label, cmd=None, align = \"center\", user_args", "align = align, user_args = user_args, borders=borders, disabled=disabled) return urwid.AttrMap(btn,", "= visible_map[h:h+self.parent.body_height] map_with_attr = [urwid.AttrMap(urwid.AttrMap(urwid.Text(line, wrap=\"clip\"), {self.player.id:\"player\"}), {p.id:\"other\" for i,", "= 8 height = 6 _marker_box = [\"╔\" +\"═\"*width+\"╗\\n\"] for", "use our ButtonLabel instead of urwid.SelectableIcon - make button_left and", "\"tab\": self.menu_view = not self.menu_view elif _input == \"enter\" and", "dividechars=1)] def on_update(self): self.update_header() self.update_body() self.update_footer() class StatusFrame(UiFrame): def __init__(self,", "player.full_eqp_bonus(eqp, b) if b not in _bonus: _bonus[b] = val", "+= [(\"green\", f\"{'Enter:use':<14s}\")] _text += [(\"yellow\", \"Q:drop\")] self.contents[\"footer\"] = (urwid.Text(_text,", "header_height(self): return MIN_HEADER_HEIGHT#max(MIN_HEADER_HEIGHT, self.mind.screen_size[1]//8) @property def menu_width(self): if self.menu_view: return", "green\",\"black\"), (\"negative\",\"dark red\",\"black\"), (\"white\",\"white\",\"black\"), (\"disabled\",\"dark red\",\"black\"), (\"red\",\"dark red\",\"black\"), (\"green\",\"light green\",\"black\"),", "def __init__(self, parent, mind, *args, **kargs): self.parent = parent self.mind", "False def update_body(self, _title): self.active_body = self.bodies[_title] self.contents[\"body\"] = (urwid.LineBox(self.active_body,", "try: self.focus_position -= 1 if self.focus_position < 0: self.focus_position +=", "import raw_display SIZE = lambda scr=raw_display.Screen(): scr.get_cols_rows() MIN_HEADER_HEIGHT = 3", "= urwid.Columns([urwid.Text(\"\")]) box = urwid.ListBox(urwid.SimpleListWalker([columns])) self.box = box.body self.default_header =", "(obj.color, f\"{obj.marker[0]}\"), \"]\"] elif obj.is_equipment and not obj.is_equipped: _marker =", "self.widget_size = len(widget_list) super(FrameColumns, self).__init__(widget_list, dividechars) self.parent = parent def", "on_update(self): self.active_body.on_update() def selectable(self): return False def update_body(self, _title): self.active_body", "min_dmg) max_dmg = max(1, base + max_dmg) _right.append(f\"Damage {min_dmg:>3d}-{max_dmg:<3d}\\n\") _right.append(f\"Reduction", "__init__(self, parent, mind): # urwid.Padding(urwid.BigText(('top', \"Hack\\'n\\'SSH\"), urwid.HalfBlock5x4Font())), self.choices = (\"Warrior\",", "self.active_body.on_update() def selectable(self): return False def update_body(self, _title): self.active_body =", "def menu_view(self, value): self._menu_view = value _columns = [(self.map_width, self.map),", "- self.parent.body_height//2) if h+self.parent.body_height >= len(visible_map): visible_map = visible_map[len(visible_map)-self.parent.body_height:] else:", "import urwid import time, os, copy from rpg_game.utils import log,", "btn = attr_button(c, self.select_class) line.append(btn) walker = urwid.SimpleFocusListWalker(line) urwid.connect_signal(walker, \"modified\",", "= mind map_commands = [\"Map commands\\n\\n\", f\"←→↑↓:move\\n\", f\"shift+←→↑↓:dash\\n\", f\"a:attack\\n\", f\"q:pickup\\n\"]", "self.mind.key_map[_input] self.player.handle_input(_action) class MenuFrame(UiFrame): def __init__(self, parent, mind): _frames =", "if b not in _bonus: _bonus[b] = val else: _bonus[b]", "mind) for b in _frames} idx = -1 _title =", "\"=\": _input = 11 self.player.inventory.selection = self.player.inventory.get(_input) def update_header(self): widgets", "else: _bonus[b] += val _top = \"\" for b, val", "if on_press: urwid.connect_signal(self, 'click', on_press, user_data) self.set_label(label) self.lllavel = label", "= not self.debug_view elif _input == \"ctrl v\": self.layer_view =", "menu_commands = [\"Menu commands\\n\\n\", f\"tab:open/close\\n\",f\"0/9-=:select item\\n\", f\"ctrl+p:respawn\\n\", f\"ctrl+a:inventory\\n\", f\"ctrl+s:status\\n\", f\"ctrl+d:help\\n\",", "return self._menu_view @menu_view.setter def menu_view(self, value): self._menu_view = value _columns", "_marker_box += [\"║\"] for y in range(width): _marker_box += [\".\"]", "(\"green\",\"light green\",\"black\"), (\"yellow\",\"yellow\",\"black\"), (\"brown\",\"brown\",\"black\"), (\"white_line\",\"black\",\"white\", \"standout\"), (\"red_line\",\"dark red\",\"white\", \"standout\"), (\"green_line\",\"light", "+1, Hit points +6\\nDemolish and parry\", \"Wizard\": \"The opportune wizard\\n\\nIntelligence", "urwid.ListBox(urwid.SimpleListWalker([columns])) self.box = box.body self.default_header = urwid.Text(\"0/9-= to select\\n\\n\", align=\"center\")", "self.map.on_update() if self.menu_view: self.menu.on_update() def handle_input(self, _input): if _input ==", "self.mind.key_map[\"help-menu\"] and self.menu_view: self.menu.update_body(\"Help\") elif _input == self.mind.key_map[\"equipment-menu\"] and self.menu_view:", "self.select_class) line.append(btn) walker = urwid.SimpleFocusListWalker(line) urwid.connect_signal(walker, \"modified\", self.update_description) self.listbox =", "GameFrame(UiFrame): def __init__(self, parent, mind): self.mind = mind _header =", "self.layer_view == -1: _map = copy.deepcopy(self.player.location.map) else: _map = self.player.location.layer_from_entities(self.layer_view,", "pos _marker_box[(x+X_OFFSET)*(width+2)+y+Y_OFFSET] = (i.color, m) self.box[:] = [urwid.Columns([(width+2, urwid.Text(_marker_box)), self.selection_data],", "self.menu_view: self.menu.on_update() def handle_input(self, _input): if _input == \"tab\": self.menu_view", "= [] for p in self.header_list: widgets.append(urwid.AttrMap(urwid.AttrMap(urwid.Text(p.status, wrap=\"clip\"), {self.player.id:\"player\"}), {p.id:\"other\"", "i.in_inventory_marker_positions): x, y = pos _marker_box[(x+X_OFFSET)*(width+2)+y+Y_OFFSET] = (i.color, m) self.box[:]", "super().__init__(parent, mind, self.active_body) def on_update(self): self.active_body.on_update() def handle_input(self, _input): #", "self.menu_width return self.mind.screen_size[0] @property def body_width(self): return self.mind.screen_size[0] @property def", "_input == \"ctrl v\": self.layer_view = self.layer_view + 1 if", "None) class InventoryFrame(UiFrame): def __init__(self, parent, mind): columns = urwid.Columns([urwid.Text(\"\")])", "{obj.marker[0]} \"] else: _marker = [f\" \"] if i <", "selection_data(self): if not self.player.inventory.selection: return urwid.Text(\"\") i = self.player.inventory.selection _text", "for i, p in self.mind.master.players.items()})) if widgets: self.header_widget.body[:] = widgets", "if boxed: if no_title: self.contents[\"body\"] = (urwid.LineBox(self.active_body), None) else: self.contents[\"body\"]", "focus_column) def focus_next(self): try: self.focus_position += 1 except: pass def", "self.contents[\"header\"] = (urwid.Text([(i.color, f\"{i.name}\\n\"), f\"{i.description}\\n\"], align=\"center\"), None) def update_footer(self): if", "= [\"Map commands\\n\\n\", f\"←→↑↓:move\\n\", f\"shift+←→↑↓:dash\\n\", f\"a:attack\\n\", f\"q:pickup\\n\"] class_action_keys = [k", "in self.mind.key_map: _action = self.mind.key_map[_input] self.player.handle_input(_action) class MenuFrame(UiFrame): def __init__(self,", "on_update(self): self.active_body.on_update() def handle_input(self, _input): # print(\"HANDLING\", _input) self.active_body.handle_input(_input) #", "< 0: self.focus_position += self.widget_size new_body = [b for b", "def restart(self): pass def focus_next(self): pass def focus_previous(self): pass def", "# if self.disabled: # urwid.AttrMap(self, \"disabled\") # else: # urwid.AttrMap(self,", "f\"{player.inventory.encumbrance:>2d}\")) else: _right.append((\"white\", f\"{player.inventory.encumbrance:>2d}\")) _right.append(f\"/{player.encumbrance:<2d}\\n\") _right.append(f\"Speed {player.movement_speed}\\n\") _right.append(f\"Monsterized {player.MP:<2d}\\n\") self.box[:]", "= lambda scr=raw_display.Screen(): scr.get_cols_rows() MIN_HEADER_HEIGHT = 3 MAX_MENU_WIDTH = 48", "+= [(\"green\", f\"{'Enter:equip':<14s}\")] elif i.is_equipped: _text += [(\"green\", f\"{'Enter:unequip':<14s}\")] elif", "if i < 9: _num = f\"\\n {i+1} \" elif", "_top = f\"{player.name:<12s} {player.game_class.name:<10s}\\nLev:{player.level:<2d} Exp:{player.exp:<4d} {player.location.name}@({x},{y})\\n\" _left = [] for", "] class UiFrame(urwid.Frame): def __init__(self, parent, mind, *args, **kargs): self.parent", "c.name[0].upper() + c.name[1:6] _left += [f\"{_name:<6} \", (state, f\"{c.value:>2d}\"), f\"", "+2\\nSing and summon\"} line = [] for c in self.choices:", "can be set and displayed - otherwise, we leave Button", "for m, pos in zip(i.in_inventory_markers, i.in_inventory_marker_positions): x, y = pos", "box) def on_update(self): player = self.player x, y, z =", "min(self.listbox.focus_position, len(self.choices)-1) choice = self.choices[index] self.contents[\"body\"] = (urwid.ListBox(urwid.SimpleListWalker([urwid.Text(self.descriptions[choice])])), None) class", "wrap=\"clip\"), {self.player.id:\"player\"}), {p.id:\"other\" for i, p in self.mind.master.players.items()}) for line", "align=\"center\"), None) def update_body(self): side = urwid.Text(\"║\") width = 8", "None, \"line\") def selectable(self): return not self.disabled def attr_button(label, cmd=None,", "class MapFrame(UiFrame): def __init__(self, parent, mind): map_box = urwid.ListBox(urwid.SimpleListWalker([urwid.Text(\"\")])) self.map_box", "os, copy from rpg_game.utils import log, mod, distance from rpg_game.constants", "= \"]\" def __init__(self, label, on_press=None, user_data=None, borders=True, disabled=False): self._label", "= 2 Y_OFFSET = 4 for m, pos in zip(i.in_inventory_markers,", "= -1 _title = _frames[idx] self.active_body = self.bodies[_title] super().__init__(parent, mind,", "SIZE = lambda scr=raw_display.Screen(): scr.get_cols_rows() MIN_HEADER_HEIGHT = 3 MAX_MENU_WIDTH =", "align = \"center\", user_args = None, borders=True, disabled=False): btn =", "(\"name\",\"white\",\"black\"), ] class UiFrame(urwid.Frame): def __init__(self, parent, mind, *args, **kargs):", "i, obj in self.player.inventory.content.items(): if obj: _size += 1 if", "red\",\"black\"), (\"white\",\"white\",\"black\"), (\"disabled\",\"dark red\",\"black\"), (\"red\",\"dark red\",\"black\"), (\"green\",\"light green\",\"black\"), (\"yellow\",\"yellow\",\"black\"), (\"brown\",\"brown\",\"black\"),", "+= [i.eq_description, f\"\\nEncumbrance:{i.encumbrance}\\n\"] return urwid.Text(_text) def update_header(self): if not self.player.inventory.selection:", "number * value) min_dmg = max(1, base + min_dmg) max_dmg", "_input == self.mind.key_map[\"equipment-menu\"] and self.menu_view: self.menu.update_body(\"Equipment\") elif _input == self.mind.key_map[\"inventory-menu\"]", "= _name[0].upper() + _name[1:] if obj: _equipment += [urwid.Text([f\"{_name}: \",", "= 1) ] class EquipmentFrame(UiFrame): def __init__(self, parent, mind): box", "Hit points +2\\nSneak attack, hide and trap\", \"Bard\": \"The noisy", "{\"Intro\" : IntroFrame(self, mind)} self.active_body = self.bodies[\"Intro\"] super().__init__(parent, mind, self.active_body)", "= \"center\", user_args = None, borders=True, disabled=False): btn = create_button(label,", "elif _input == \"-\": _input = 10 elif _input ==", "commands\\n\\n\", f\"tab:open/close\\n\",f\"0/9-=:select item\\n\", f\"ctrl+p:respawn\\n\", f\"ctrl+a:inventory\\n\", f\"ctrl+s:status\\n\", f\"ctrl+d:help\\n\", f\"ctrl+e:equipment\\n\"] columns =", "(\"cyan_line\",\"light cyan\",\"white\", \"standout\"), (\"name\",\"white\",\"black\"), ] class UiFrame(urwid.Frame): def __init__(self, parent,", "f\"{'Enter:equip':<14s}\")] elif i.is_equipped: _text += [(\"green\", f\"{'Enter:unequip':<14s}\")] elif i.is_consumable: _text", "__init__(self, parent, mind): map_box = urwid.ListBox(urwid.SimpleListWalker([urwid.Text(\"\")])) self.map_box = map_box.body self.layer_view", "(state, f\"{c.value:>2d}\"), f\" ({c.mod:<+2d})\\n\"] elif self.parent.parent.menu_width > 36: _name =", "cols = urwid.Columns([self._label], dividechars=0) super(urwid.Button, self).__init__(cols) self.disabled = disabled if", "+1\\n Fireball, teleport and ice wall\", \"Thief\": \"The sneaky thief\\n\\nDexterity", "_name = c.name[0].upper() + c.name[1:6] _left += [f\"{_name:<6} \", (state,", "pass def update_body(self, title, no_title=False, boxed=False): self.active_body = self.bodies[title] if", "= urwid.ListBox(urwid.SimpleListWalker([urwid.Text(\"\")])) self.box = box.body super().__init__(parent, mind, box) def on_update(self):", "if _input.isnumeric() and int(_input) > 0: _input = int(_input)-1 elif", "# def disabled(self, value): # if self._disabled == value: #", "def disconnect(self): pass def restart(self): pass def focus_next(self): pass def", "player.equipment[\"main_hand\"] if not weapon: min_dmg, max_dmg = (1, 4) else:", "def __init__(self, parent, mind): self.mind = mind map_commands = [\"Map", "borders: cols = urwid.Columns([ ('fixed', len(self.button_left), urwid.Text(self.button_left)), self._label, ('fixed', len(self.button_right),", "displayed - otherwise, we leave Button behaviour unchanged ''' button_left", "def focus_next(self): try: self.focus_position += 1 except IndexError: pass def", "(\"normal\",\"white\",\"black\"), (\"positive\",\"light green\",\"black\"), (\"negative\",\"dark red\",\"black\"), (\"white\",\"white\",\"black\"), (\"disabled\",\"dark red\",\"black\"), (\"red\",\"dark red\",\"black\"),", "= self.parent.header_height + 2 tot_rows = self.mind.screen_size[1] return (tot_rows -", "= urwid.ListBox(urwid.SimpleListWalker([urwid.Text(\"\")])) self.map_box = map_box.body self.layer_view = -1 self.debug_view =", "def selectable(self): return not self.disabled def attr_button(label, cmd=None, attr_map=None, focus_map", "else: return None def handle_input(self, _input): pass def on_update(self): pass", "Hit points +6\\nDemolish and parry\", \"Wizard\": \"The opportune wizard\\n\\nIntelligence +1\\n", "not self.debug_view elif _input == \"ctrl v\": self.layer_view = self.layer_view", "(\"yellow_line\",\"yellow\",\"white\", \"standout\"), (\"cyan\",\"light cyan\",\"black\"), (\"cyan_line\",\"light cyan\",\"white\", \"standout\"), (\"name\",\"white\",\"black\"), ] class", "f\"Reduction:{val} \" else: _top += f\"{b}:{val} \" _top += \"\\n\"", "try: self.focus_position -= 1 except IndexError: pass class SelectableColumns(urwid.Columns): def", "update_footer(self): if not self.player.inventory.selection: self.contents[\"footer\"] = (self.default_footer, None) else: i", "and obj is self.player.inventory.selection: _marker += [(\"line\", _num)] else: _marker", "8 height = 6 _marker_box = [\"╔\" +\"═\"*width+\"╗\\n\"] for x", "(obj.color, f\"{obj.name}\")])] else: _equipment += [urwid.Text([f\"{_name}: \"])] _bonus = {}", "print(\"HANDLING\", _input) self.active_body.handle_input(_input) # def exit(self): # self.disconnect() # self.mind.disconnect()#should", ">= self.widget_size: self.focus_position -= self.widget_size new_body = [b for b", "line = [] for c in self.choices: btn = attr_button(c,", "(\"frame\",\"white\",\"white\"), (\"player\", \"light green\", \"black\"), (\"other\", \"light blue\", \"black\"), (\"monster\",", "None) def update_footer(self): if not self.player.inventory.selection: self.contents[\"footer\"] = (self.default_footer, None)", "urwid.SelectableIcon - make button_left and button_right plain strings and variable", "eqp in player.equipment_set: for b in set(list(eqp.bonus.keys()) + list(eqp.set_bonus.keys())): val", "set_text(self, label): ''' set_text is invoked by Button.set_label ''' self.__super.set_text(label)", "urwid.ListBox(urwid.SimpleListWalker([urwid.Text(\"\")])) self.map_box = map_box.body self.layer_view = -1 self.debug_view = False", "(\"rare_line\",\"yellow\",\"white\",\"standout\"), (\"unique\",\"light magenta\",\"black\"), (\"unique_line\",\"light magenta\",\"white\",\"standout\"), (\"set\",\"light green\",\"black\"), (\"set_line\",\"light green\",\"white\",\"standout\"), (\"normal\",\"white\",\"black\"),", "x - self.parent.body_height//2) if h+self.parent.body_height >= len(visible_map): visible_map = visible_map[len(visible_map)-self.parent.body_height:]", "weapon.dmg min_dmg, max_dmg = (number * 1, number * value)", "- FOOTER_HEIGHT) def on_update(self): if self.layer_view == -1: _map =", "i, p in self.mind.master.players.items()}) for line in visible_map] self.map_box[:] =", "map_box.body self.layer_view = -1 self.debug_view = False super().__init__(parent, mind, map_box)", "= box.body super().__init__(parent, mind, box) def on_update(self): player = self.player", "log, mod, distance from rpg_game.constants import * from urwid import", "= \"line\", align = \"center\", user_args = None, borders=True, disabled=False):", "button_left and button_right plain strings and variable width - any", "header=_header, footer=None, focus_part=\"body\") self.menu_view = True self.update_footer() self.header_widget = self.header.original_widget.box_widget", "def update_header(self): widgets = [] for p in self.header_list: widgets.append(urwid.AttrMap(urwid.AttrMap(urwid.Text(p.status,", "self.header.original_widget.box_widget self.footer_content_size = 0 @property def header_height(self): return MIN_HEADER_HEIGHT#max(MIN_HEADER_HEIGHT, self.mind.screen_size[1]//8)", "lambda scr=raw_display.Screen(): scr.get_cols_rows() MIN_HEADER_HEIGHT = 3 MAX_MENU_WIDTH = 48 FOOTER_HEIGHT", "p in self.mind.master.players.items()}) for line in visible_map] self.map_box[:] = map_with_attr", "GameFrame(self, self.mind) self.update_body(\"Game\", no_title=True) class IntroFrame(UiFrame): def __init__(self, parent, mind):", "in _bonus: _bonus[b] = val else: _bonus[b] += val _top", "elif obj.is_consumable: _marker = [\"(\", (obj.color, f\"{obj.marker[0]}\"), \")\"] else: _marker", "focus_next(self): pass def focus_previous(self): pass def update_body(self, title, no_title=False, boxed=False):", "if self.mind.screen_size != (80, 24): inv_btns.append(urwid.Text(\"\\nSET TERMINAL\\nTO 80X24\", align=\"center\")) self.contents[\"footer\"]", "z = player.position _top = f\"{player.name:<12s} {player.game_class.name:<10s}\\nLev:{player.level:<2d} Exp:{player.exp:<4d} {player.location.name}@({x},{y})\\n\" _left", "self.layer_view = -1 self.debug_view = False super().__init__(parent, mind, map_box) self.on_update()", "== \"ctrl v\": self.layer_view = self.layer_view + 1 if self.layer_view", "_right.append(f\"Damage {min_dmg:>3d}-{max_dmg:<3d}\\n\") _right.append(f\"Reduction {player.dmg_reduction:<3d}\\n\") _right.append(f\"Encumb \") if player.inventory.encumbrance == EXTRA_ENCUMBRANCE_MULTI*player.encumbrance:", "key=lambda ent: distance(self.player.position, ent.position)) def update_footer(self): _size = 0 inv_btns", "PALETTE = [ (\"line\", 'black', 'white', \"standout\"), (\"top\",\"white\",\"black\"), (\"frame\",\"white\",\"white\"), (\"player\",", "super(FrameColumns, self).__init__(widget_list, dividechars) self.parent = parent def focus_next(self): try: self.focus_position", "SelectableListBox(urwid.ListBox): def __init__(self, body): super(SelectableListBox, self).__init__(body) def focus_next(self): try: self.focus_position", "points +2\\nSing and summon\"} line = [] for c in", "self.bodies = {\"Intro\" : IntroFrame(self, mind)} self.active_body = self.bodies[\"Intro\"] super().__init__(parent,", "**kargs) @property def player(self): if self.mind.avatar.uuid in self.mind.master.players: return self.mind.master.players[self.mind.avatar.uuid]", "IntroFrame(UiFrame): def __init__(self, parent, mind): # urwid.Padding(urwid.BigText(('top', \"Hack\\'n\\'SSH\"), urwid.HalfBlock5x4Font())), self.choices", "for i, p in self.mind.master.players.items()}) for line in visible_map] self.map_box[:]", "self.disabled = disabled if on_press: urwid.connect_signal(self, 'click', on_press, user_data) self.set_label(label)", "''' self.__super.set_text(label) self._cursor_position = len(label) + 1 class MyButton(urwid.Button): '''", "\"Q:drop\")], align=\"center\") super().__init__(parent, mind, box, header=self.default_header, footer=self.default_footer) @property def selection_data(self):", "(\"line\", 'black', 'white', \"standout\"), (\"top\",\"white\",\"black\"), (\"frame\",\"white\",\"white\"), (\"player\", \"light green\", \"black\"),", "if self.mind.avatar.uuid in self.mind.connections: return self.mind.connections[self.mind.avatar.uuid] else: return None def", "to select\\n\\n\", align=\"center\") self.default_footer = urwid.Text([(\"green\", f\"{'Enter:use/eqp':<14s}\"), (\"yellow\", \"Q:drop\")], align=\"center\")", "1 except: pass class FrameColumns(urwid.Columns): def __init__(self, parent, widget_list, dividechars=0):", "self.active_body = self.bodies[title] if boxed: if no_title: self.contents[\"body\"] = (urwid.LineBox(self.active_body),", "def on_update(self): self.active_body.on_update() def handle_input(self, _input): # print(\"HANDLING\", _input) self.active_body.handle_input(_input)", "rpg_game.constants import * from urwid import raw_display SIZE = lambda", "+\"═\"*width+\"╗\\n\"] for x in range(height): _marker_box += [\"║\"] for y", "self.header_widget.body[:] = widgets class MapFrame(UiFrame): def __init__(self, parent, mind): map_box", "[] _text += [i.eq_description, f\"\\nEncumbrance:{i.encumbrance}\\n\"] return urwid.Text(_text) def update_header(self): if", "self.update_body(\"Game\", no_title=True) class IntroFrame(UiFrame): def __init__(self, parent, mind): # urwid.Padding(urwid.BigText(('top',", "in _map] h = max(0, x - self.parent.body_height//2) if h+self.parent.body_height", "pass def focus_previous(self): try: self.focus_position -= 1 except: pass class", "and self.menu_view: self.menu.update_body(\"Status\") elif _input == self.mind.key_map[\"help-menu\"] and self.menu_view: self.menu.update_body(\"Help\")", "[\"(\", (obj.color, f\"{obj.marker[0]}\"), \")\"] else: _marker = [f\" {obj.marker[0]} \"]", "mind): columns = urwid.Columns([urwid.Text(\"\")]) box = urwid.ListBox(urwid.SimpleListWalker([columns])) self.box = box.body", "\"ctrl f\": self.debug_view = not self.debug_view elif _input == \"ctrl", "of urwid.SelectableIcon - make button_left and button_right plain strings and", "self.player.inventory.selection _text = [] if not i.requisites(self.player): _text += [(\"red\",", "player.inventory.encumbrance > player.encumbrance: _right.append((\"yellow\", f\"{player.inventory.encumbrance:>2d}\")) else: _right.append((\"white\", f\"{player.inventory.encumbrance:>2d}\")) _right.append(f\"/{player.encumbrance:<2d}\\n\") _right.append(f\"Speed", "= urwid.LineBox(urwid.BoxAdapter(self.listbox, len(self.choices)+1)) super().__init__(parent, mind, urwid.ListBox(urwid.SimpleListWalker([urwid.Text(self.descriptions[\"Warrior\"])])), header=header, focus_part=\"header\") def select_class(self,", "urwid.ListBox(urwid.SimpleListWalker([columns]))) class SelectableListBox(urwid.ListBox): def __init__(self, body): super(SelectableListBox, self).__init__(body) def focus_next(self):", "mind, box, header=self.default_header, footer=self.default_footer) @property def selection_data(self): if not self.player.inventory.selection:", "self.mind = mind urwid.AttrMap(self,\"frame\") super().__init__(*args, **kargs) @property def player(self): if", "map_width(self): if self.menu_view: return self.mind.screen_size[0] - self.menu_width return self.mind.screen_size[0] @property", "= len(label) + 1 class MyButton(urwid.Button): ''' - override __init__", "{} for eqp in player.equipment_set: for b in set(list(eqp.bonus.keys()) +", "self.mind.key_map: _action = self.mind.key_map[_input] self.player.handle_input(_action) class MenuFrame(UiFrame): def __init__(self, parent,", "def create_button(label, cmd=None, align = \"center\", user_args = None, borders=True,", "_bonus.items(): if b == \"dmg_reduction\": _top += f\"Reduction:{val} \" else:", "self.disabled def attr_button(label, cmd=None, attr_map=None, focus_map = \"line\", align =", "= self.header.original_widget.box_widget self.footer_content_size = 0 @property def header_height(self): return MIN_HEADER_HEIGHT#max(MIN_HEADER_HEIGHT,", "*args) def register_event(self, event_type, callback): self.mind.register_GUI_event(event_type, callback) def disconnect(self): pass", "focus_part=\"body\") self.menu_view = True self.update_footer() self.header_widget = self.header.original_widget.box_widget self.footer_content_size =", "def restart(self): self.update_body(\"Intro\", no_title=True) def start_game_frame(self): self.bodies[\"Game\"] = GameFrame(self, self.mind)", "\"line\", align = \"center\", user_args = None, borders=True, disabled=False): btn", "_name = c.name[0].upper() + c.name[1:] _left += [f\"{_name:<12} \", (state,", "self._label = ButtonLabel(\"\") if borders: cols = urwid.Columns([ ('fixed', len(self.button_left),", "def __init__(self, parent, widget_list, dividechars=0): self.widget_size = len(widget_list) super(FrameColumns, self).__init__(widget_list,", "focus_next(self): try: self.focus_position += 1 if self.focus_position >= self.widget_size: self.focus_position", "False super().__init__(parent, mind, map_box) self.on_update() @property def visible_range(self): header_height =", "[\"║\\n\"] _marker_box += [\"╚\" +\"═\"*width+\"╝\"] if self.player.inventory.selection: i = self.player.inventory.selection", "rpg_game.utils import log, mod, distance from rpg_game.constants import * from", "[b for b in self.parent.bodies][self.focus_position] self.parent.update_body(new_body) except: pass class ButtonLabel(urwid.SelectableIcon):", "Intelligence +1, Hit points +2\\nSneak attack, hide and trap\", \"Bard\":", "= \"\\n - \" elif i == 11: _num =", "= map_with_attr def handle_input(self, _input): if _input == \"ctrl f\":", "on_press: urwid.connect_signal(self, 'click', on_press, user_data) self.set_label(label) self.lllavel = label #", "[(\"top\", _num)] btn = urwid.Text(_marker, align=\"center\") inv_btns.append((5, urwid.LineBox(btn))) if self.mind.screen_size", "= urwid.Columns([self._label], dividechars=0) super(urwid.Button, self).__init__(cols) self.disabled = disabled if on_press:", "and self.menu_view: self.menu.update_body(\"Inventory\") else: self.map.handle_input(_input) def select_item(self, _input): if _input.isnumeric()", "Hit points +4\\nCharge and parry\", \"Dwarf\": \"The short dwarf\\n\\nStrength +1,", "i.is_equipped: _text += [(\"green\", f\"{'Enter:equip':<14s}\")] elif i.is_equipped: _text += [(\"green\",", "+ 1 class MyButton(urwid.Button): ''' - override __init__ to use", "box, header=self.default_header, footer=self.default_footer) @property def selection_data(self): if not self.player.inventory.selection: return", "(\"unique\",\"light magenta\",\"black\"), (\"unique_line\",\"light magenta\",\"white\",\"standout\"), (\"set\",\"light green\",\"black\"), (\"set_line\",\"light green\",\"white\",\"standout\"), (\"normal\",\"white\",\"black\"), (\"positive\",\"light", "and variable width - any string, including an empty string,", "elif _input == \"0\": s_input = 9 elif _input ==", "\"[\"] elif obj.is_consumable: _marker = [\"(\", (obj.color, f\"{obj.marker[0]}\"), \")\"] else:", "h+self.parent.body_height >= len(visible_map): visible_map = visible_map[len(visible_map)-self.parent.body_height:] else: visible_map = visible_map[h:h+self.parent.body_height]", "# return # if self.disabled: # urwid.AttrMap(self, \"disabled\") # else:", "None) class GUI(UiFrame): def __init__(self, parent, mind): self.bodies = {\"Intro\"", "visible_map[h:h+self.parent.body_height] map_with_attr = [urwid.AttrMap(urwid.AttrMap(urwid.Text(line, wrap=\"clip\"), {self.player.id:\"player\"}), {p.id:\"other\" for i, p", "distance(self.player.position, ent.position) <= 3 and ent.status], key=lambda ent: distance(self.player.position, ent.position))", "def header_height(self): return MIN_HEADER_HEIGHT#max(MIN_HEADER_HEIGHT, self.mind.screen_size[1]//8) @property def menu_width(self): if self.menu_view:", "and int(_input) > 0: _input = int(_input)-1 elif _input ==", "_input): if _input.isnumeric() and int(_input) > 0: _input = int(_input)-1", "= \"\\n = \" if obj and obj is self.player.inventory.selection:", "= t.replace(\"_\", \" \") _name = _name[0].upper() + _name[1:] if", "@property def selection_data(self): if not self.player.inventory.selection: return urwid.Text(\"\") i =", "_left = [] for s in CHARACTERISTICS: c = getattr(player,", "class SelectableColumns(urwid.Columns): def __init__(self, widget_list, focus_column=None, dividechars=0): super().__init__(widget_list, dividechars, focus_column)", "= align if cmd: if user_args: urwid.connect_signal(btn, \"click\", cmd, user_args", "<= 3 and ent.status], key=lambda ent: distance(self.player.position, ent.position)) def update_footer(self):", "class InventoryFrame(UiFrame): def __init__(self, parent, mind): columns = urwid.Columns([urwid.Text(\"\")]) box", "urwid.Text(_right)], dividechars = 1) ] class EquipmentFrame(UiFrame): def __init__(self, parent,", "def focus_next(self): try: self.focus_position += 1 except: pass def focus_previous(self):", "_frames[idx] self.active_body = self.bodies[_title] super().__init__(parent, mind, urwid.LineBox(self.active_body, title=_title)) def on_update(self):", "if _input == \"ctrl f\": self.debug_view = not self.debug_view elif", "- override __init__ to use our ButtonLabel instead of urwid.SelectableIcon", "menu_view(self): return self._menu_view @menu_view.setter def menu_view(self, value): self._menu_view = value", "f\"{obj.marker[0]}\"), \"[\"] elif obj.is_consumable: _marker = [\"(\", (obj.color, f\"{obj.marker[0]}\"), \")\"]", "] class EquipmentFrame(UiFrame): def __init__(self, parent, mind): box = urwid.ListBox(urwid.SimpleListWalker([urwid.Text(\"\")]))", "def focus_next(self): pass def focus_previous(self): pass def update_body(self, title, no_title=False,", "i = self.player.inventory.selection X_OFFSET = 2 Y_OFFSET = 4 for", "= -1 self.debug_view = False super().__init__(parent, mind, map_box) self.on_update() @property", "[\"[\", (obj.color, f\"{obj.marker[0]}\"), \"]\"] elif obj.is_equipment and not obj.is_equipped: _marker", "self.update_footer() self.header_widget = self.header.original_widget.box_widget self.footer_content_size = 0 @property def header_height(self):", "self.disconnect() # self.mind.disconnect()#should use dispatch event def restart(self): self.update_body(\"Intro\", no_title=True)", "# def exit(self): # self.disconnect() # self.mind.disconnect()#should use dispatch event", "self.bodies[_title] super().__init__(parent, mind, urwid.LineBox(self.active_body, title=_title)) def on_update(self): self.active_body.on_update() def selectable(self):", "= (self.default_header, None) else: i = self.player.inventory.selection self.contents[\"header\"] = (urwid.Text([(i.color,", "empty string, can be set and displayed - otherwise, we", "(\"unique_line\",\"light magenta\",\"white\",\"standout\"), (\"set\",\"light green\",\"black\"), (\"set_line\",\"light green\",\"white\",\"standout\"), (\"normal\",\"white\",\"black\"), (\"positive\",\"light green\",\"black\"), (\"negative\",\"dark", "self.mind.disconnect()#should use dispatch event def restart(self): self.update_body(\"Intro\", no_title=True) def start_game_frame(self):", "attack, hide and trap\", \"Bard\": \"The noisy bard\\n\\nCharisma +1, Dexterity", "max(1, base + max_dmg) _right.append(f\"Damage {min_dmg:>3d}-{max_dmg:<3d}\\n\") _right.append(f\"Reduction {player.dmg_reduction:<3d}\\n\") _right.append(f\"Encumb \")", "MenuFrame(UiFrame): def __init__(self, parent, mind): _frames = (\"Inventory\", \"Status\", \"Equipment\",", "_right.append((\"red\", f\"{player.inventory.encumbrance:>2d}\")) elif player.inventory.encumbrance > player.encumbrance: _right.append((\"yellow\", f\"{player.inventory.encumbrance:>2d}\")) else: _right.append((\"white\",", "self.mind.screen_size[0] @property def body_width(self): return self.mind.screen_size[0] @property def body_height(self): return", "super(SelectableListBox, self).__init__(body) def focus_next(self): try: self.focus_position += 1 except IndexError:", "mind, urwid.ListBox(urwid.SimpleListWalker([columns]))) class SelectableListBox(urwid.ListBox): def __init__(self, body): super(SelectableListBox, self).__init__(body) def", "for y in range(width): _marker_box += [\".\"] _marker_box += [\"║\\n\"]", "urwid.AttrMap(self, None, \"line\") def selectable(self): return not self.disabled def attr_button(label,", "obj.is_equipment and not obj.is_equipped: _marker = [\"]\", (obj.color, f\"{obj.marker[0]}\"), \"[\"]", "(\"brown\",\"brown\",\"black\"), (\"white_line\",\"black\",\"white\", \"standout\"), (\"red_line\",\"dark red\",\"white\", \"standout\"), (\"green_line\",\"light green\",\"white\", \"standout\"), (\"yellow_line\",\"yellow\",\"white\",", "self.player.use_quick_item(self.player.inventory.selection) self.update_footer() elif _input == \"Q\" and self.player.inventory.selection: self.player.actions[\"drop\"].use(self.player, obj=self.player.inventory.selection)", "self.player.inventory.selection: self.contents[\"footer\"] = (self.default_footer, None) else: i = self.player.inventory.selection _text", "= [b for b in self.parent.bodies][self.focus_position] self.parent.update_body(new_body) except: pass def", "columns = urwid.Columns([urwid.Text(map_commands, wrap=\"clip\"), urwid.Text(menu_commands, wrap=\"clip\")], dividechars = 1) super().__init__(parent,", "base + max_dmg) _right.append(f\"Damage {min_dmg:>3d}-{max_dmg:<3d}\\n\") _right.append(f\"Reduction {player.dmg_reduction:<3d}\\n\") _right.append(f\"Encumb \") if", "w = max(0, y - self.parent.body_width//3) visible_map = [line[w:w+self.parent.body_width] for", "self.parent.update_body(new_body) except: pass def focus_previous(self): try: self.focus_position -= 1 if", "(\"rare\",\"yellow\",\"black\"), (\"rare_line\",\"yellow\",\"white\",\"standout\"), (\"unique\",\"light magenta\",\"black\"), (\"unique_line\",\"light magenta\",\"white\",\"standout\"), (\"set\",\"light green\",\"black\"), (\"set_line\",\"light green\",\"white\",\"standout\"),", "distance from rpg_game.constants import * from urwid import raw_display SIZE", "map_box = urwid.ListBox(urwid.SimpleListWalker([urwid.Text(\"\")])) self.map_box = map_box.body self.layer_view = -1 self.debug_view", "visible_map[len(visible_map)-self.parent.body_height:] else: visible_map = visible_map[h:h+self.parent.body_height] map_with_attr = [urwid.AttrMap(urwid.AttrMap(urwid.Text(line, wrap=\"clip\"), {self.player.id:\"player\"}),", "i == 11: _num = \"\\n = \" if obj", "if obj and obj is self.player.inventory.selection: _marker += [(\"line\", _num)]", "= False super().__init__(parent, mind, map_box) self.on_update() @property def visible_range(self): header_height", "[] for i, obj in self.player.inventory.content.items(): if obj: _size +=", "= mind _header = urwid.LineBox(urwid.BoxAdapter(SelectableListBox(urwid.SimpleFocusListWalker([urwid.Text(\"\")])), self.header_height)) self._menu_view = True self.map", "line.append(btn) walker = urwid.SimpleFocusListWalker(line) urwid.connect_signal(walker, \"modified\", self.update_description) self.listbox = SelectableListBox(walker)", "4 for m, pos in zip(i.in_inventory_markers, i.in_inventory_marker_positions): x, y =", "self.focus_position += 1 except: pass def focus_previous(self): try: self.focus_position -=", "borders=True, disabled=False): btn = MyButton(label, borders=borders, disabled=disabled) btn._label.align = align", "button): index = min(self.listbox.focus_position, len(self.choices)-1) choice = self.choices[index] self.mind.master.new_player(self.mind.avatar.uuid, choice)", "= (urwid.ListBox(urwid.SimpleListWalker([urwid.Text(self.descriptions[choice])])), None) class GameFrame(UiFrame): def __init__(self, parent, mind): self.mind", "= [] _text += [i.eq_description, f\"\\nEncumbrance:{i.encumbrance}\\n\"] return urwid.Text(_text) def update_header(self):", "0: _input = int(_input)-1 elif _input == \"0\": s_input =", "__init__(self, parent, mind): columns = urwid.Columns([urwid.Text(\"\")]) box = urwid.ListBox(urwid.SimpleListWalker([columns])) self.box", "def attr_button(label, cmd=None, attr_map=None, focus_map = \"line\", align = \"center\",", "label, on_press=None, user_data=None, borders=True, disabled=False): self._label = ButtonLabel(\"\") if borders:", "self.player.inventory.content.items(): if obj: _size += 1 if obj.is_equipment and obj.is_equipped:", "cols = urwid.Columns([ ('fixed', len(self.button_left), urwid.Text(self.button_left)), self._label, ('fixed', len(self.button_right), urwid.Text(self.button_right))],", "2 @property def menu_view(self): return self._menu_view @menu_view.setter def menu_view(self, value):", "f\"shift+←→↑↓:dash\\n\", f\"a:attack\\n\", f\"q:pickup\\n\"] class_action_keys = [k for k, act in", "self.focus_position >= self.widget_size: self.focus_position -= self.widget_size new_body = [b for", "X_OFFSET = 2 Y_OFFSET = 4 for m, pos in", "_name = t.replace(\"_\", \" \") _name = _name[0].upper() + _name[1:]", "0)] if self.parent.parent.menu_width > 40: _name = c.name[0].upper() + c.name[1:]", "event def restart(self): self.update_body(\"Intro\", no_title=True) def start_game_frame(self): self.bodies[\"Game\"] = GameFrame(self,", "menu_view(self, value): self._menu_view = value _columns = [(self.map_width, self.map), (self.menu_width,", "__init__(self, parent, mind): box = urwid.ListBox(urwid.SimpleListWalker([urwid.Text(\"\")])) self.box = box.body super().__init__(parent,", "MenuFrame(self, mind) super().__init__(parent, mind, urwid.Columns([(self.map_width, self.map), (self.menu_width, self.menu)], focus_column=1), header=_header,", "= 9 elif _input == \"-\": _input = 10 elif", "attr_map, focus_map=focus_map) def create_button(label, cmd=None, align = \"center\", user_args =", "# self.disconnect() # self.mind.disconnect()#should use dispatch event def restart(self): self.update_body(\"Intro\",", "= self.bodies[_title] self.contents[\"body\"] = (urwid.LineBox(self.active_body, title=_title), None) class InventoryFrame(UiFrame): def", "f\"q:pickup\\n\"] class_action_keys = [k for k, act in self.mind.key_map.items() if", "and parry\", \"Wizard\": \"The opportune wizard\\n\\nIntelligence +1\\n Fireball, teleport and", "_map = self.player.location.layer_from_entities(self.layer_view, self.debug_view) x, y, z = self.player.position w", "behaviour unchanged ''' button_left = \"[\" button_right = \"]\" def", "2: self.layer_view = -1 elif _input in self.mind.key_map: _action =", "[\"╔\" +\"═\"*width+\"╗\\n\"] for x in range(height): _marker_box += [\"║\"] for", "create_button(label, cmd=cmd, align = align, user_args = user_args, borders=borders, disabled=disabled)", "= urwid.ListBox(urwid.SimpleListWalker([columns])) self.box = box.body self.default_header = urwid.Text(\"0/9-= to select\\n\\n\",", "None) else: self.contents[\"body\"] = (self.active_body, None) class GUI(UiFrame): def __init__(self,", "f\"←→↑↓:move\\n\", f\"shift+←→↑↓:dash\\n\", f\"a:attack\\n\", f\"q:pickup\\n\"] class_action_keys = [k for k, act", "10 elif _input == \"=\": _input = 11 self.player.inventory.selection =", "\", (state, f\"{c.value:>2d}\"), f\" ({c.mod:<+2d})\\n\"] elif self.parent.parent.menu_width > 36: _name", "player(self): if self.mind.avatar.uuid in self.mind.master.players: return self.mind.master.players[self.mind.avatar.uuid] else: return None", "obj and obj is self.player.inventory.selection: _marker += [(\"line\", _num)] else:", "= map_box.body self.layer_view = -1 self.debug_view = False super().__init__(parent, mind,", "_size def on_update(self): self.update_header() if self.footer_content_size != len(self.player.inventory.all): self.update_footer() if", "@property def header_list(self): return sorted([ent for k, ent in self.player.location.entities.items()", "in self.parent.bodies][self.focus_position] self.parent.update_body(new_body) except: pass class ButtonLabel(urwid.SelectableIcon): def set_text(self, label):", "if _input == \"tab\": self.menu_view = not self.menu_view elif _input", "return self.mind.screen_size[0] @property def body_width(self): return self.mind.screen_size[0] @property def body_height(self):", "_equipment += [urwid.Text([f\"{_name}: \", (obj.color, f\"{obj.name}\")])] else: _equipment += [urwid.Text([f\"{_name}:", "if widgets: self.header_widget.body[:] = widgets class MapFrame(UiFrame): def __init__(self, parent,", "{self.player.id:\"player\"}), {p.id:\"other\" for i, p in self.mind.master.players.items()}) for line in", "c.name[1:6] _left += [f\"{_name:<6} \", (state, f\"{c.value:>2d}\"), f\" ({c.mod:<+2d})\\n\"] else:", "b == \"dmg_reduction\": _top += f\"Reduction:{val} \" else: _top +=", "self.update_footer() elif _input == self.mind.key_map[\"status-menu\"] and self.menu_view: self.menu.update_body(\"Status\") elif _input", "if b == \"dmg_reduction\": _top += f\"Reduction:{val} \" else: _top", "wizard\\n\\nIntelligence +1\\n Fireball, teleport and ice wall\", \"Thief\": \"The sneaky", "= copy.deepcopy(self.player.location.map) else: _map = self.player.location.layer_from_entities(self.layer_view, self.debug_view) x, y, z", "range(width): _marker_box += [\".\"] _marker_box += [\"║\\n\"] _marker_box += [\"╚\"", "focus_next(self): try: self.focus_position += 1 except: pass def focus_previous(self): try:", "except: pass class ButtonLabel(urwid.SelectableIcon): def set_text(self, label): ''' set_text is", "+= 1 if self.focus_position >= self.widget_size: self.focus_position -= self.widget_size new_body", "_input in self.mind.key_map: _action = self.mind.key_map[_input] self.player.handle_input(_action) class MenuFrame(UiFrame): def", "f\" ({c.mod:<+2d})\\n\"] _right = [] base = player.STR.mod weapon =", "[urwid.Text([f\"{_name}: \"])] _bonus = {} for eqp in player.equipment_set: for", "(\"player\", \"light green\", \"black\"), (\"other\", \"light blue\", \"black\"), (\"monster\", \"dark", "urwid.Text(_marker_box)), self.selection_data], dividechars=1)] def on_update(self): self.update_header() self.update_body() self.update_footer() class StatusFrame(UiFrame):", "focus_previous(self): try: self.focus_position -= 1 if self.focus_position < 0: self.focus_position", "value): self._menu_view = value _columns = [(self.map_width, self.map), (self.menu_width, self.menu)]", "IndexError: pass class SelectableColumns(urwid.Columns): def __init__(self, widget_list, focus_column=None, dividechars=0): super().__init__(widget_list,", "import time, os, copy from rpg_game.utils import log, mod, distance", "select\\n\\n\", align=\"center\") self.default_footer = urwid.Text([(\"green\", f\"{'Enter:use/eqp':<14s}\"), (\"yellow\", \"Q:drop\")], align=\"center\") super().__init__(parent,", "invoked by Button.set_label ''' self.__super.set_text(label) self._cursor_position = len(label) + 1", "player.position _top = f\"{player.name:<12s} {player.game_class.name:<10s}\\nLev:{player.level:<2d} Exp:{player.exp:<4d} {player.location.name}@({x},{y})\\n\" _left = []", "(\"negative\",\"dark red\",\"black\"), (\"white\",\"white\",\"black\"), (\"disabled\",\"dark red\",\"black\"), (\"red\",\"dark red\",\"black\"), (\"green\",\"light green\",\"black\"), (\"yellow\",\"yellow\",\"black\"),", "def __init__(self, parent, mind): map_box = urwid.ListBox(urwid.SimpleListWalker([urwid.Text(\"\")])) self.map_box = map_box.body", "> 40: _name = c.name[0].upper() + c.name[1:] _left += [f\"{_name:<12}", "mind map_commands = [\"Map commands\\n\\n\", f\"←→↑↓:move\\n\", f\"shift+←→↑↓:dash\\n\", f\"a:attack\\n\", f\"q:pickup\\n\"] class_action_keys", "in zip(i.in_inventory_markers, i.in_inventory_marker_positions): x, y = pos _marker_box[(x+X_OFFSET)*(width+2)+y+Y_OFFSET] = (i.color,", "(self.default_header, None) else: i = self.player.inventory.selection self.contents[\"header\"] = (urwid.Text([(i.color, f\"{i.name}\\n\"),", "base + min_dmg) max_dmg = max(1, base + max_dmg) _right.append(f\"Damage", "in self.mind.master.players.items()})) if widgets: self.header_widget.body[:] = widgets class MapFrame(UiFrame): def", "3 and ent.status], key=lambda ent: distance(self.player.position, ent.position)) def update_footer(self): _size", "elif _input == self.mind.key_map[\"help-menu\"] and self.menu_view: self.menu.update_body(\"Help\") elif _input ==", "self.select_item(_input) self.update_footer() elif _input == self.mind.key_map[\"status-menu\"] and self.menu_view: self.menu.update_body(\"Status\") elif", "+= [(\"top\", _num)] btn = urwid.Text(_marker, align=\"center\") inv_btns.append((5, urwid.LineBox(btn))) if", "ButtonLabel(\"\") if borders: cols = urwid.Columns([ ('fixed', len(self.button_left), urwid.Text(self.button_left)), self._label,", "1 except IndexError: pass class SelectableColumns(urwid.Columns): def __init__(self, widget_list, focus_column=None,", "focus_part=\"header\") def select_class(self, button): index = min(self.listbox.focus_position, len(self.choices)-1) choice =", "(urwid.Text(_text, align=\"center\"), None) def update_body(self): side = urwid.Text(\"║\") width =", "int(_input)-1 elif _input == \"0\": s_input = 9 elif _input", "self._label, ('fixed', len(self.button_right), urwid.Text(self.button_right))], dividechars=1) else: cols = urwid.Columns([self._label], dividechars=0)", "= MyButton(label, borders=borders, disabled=disabled) btn._label.align = align if cmd: if", "in self.header_list: widgets.append(urwid.AttrMap(urwid.AttrMap(urwid.Text(p.status, wrap=\"clip\"), {self.player.id:\"player\"}), {p.id:\"other\" for i, p in", "self).__init__(body) def focus_next(self): try: self.focus_position += 1 except IndexError: pass", "mind): box = urwid.ListBox(urwid.SimpleListWalker([urwid.Text(\"\")])) self.box = box.body super().__init__(parent, mind, box)", "\" else: _top += f\"{b}:{val} \" _top += \"\\n\" self.box[:]", "align=\"center\") super().__init__(parent, mind, box, header=self.default_header, footer=self.default_footer) @property def selection_data(self): if", "in _bonus.items(): if b == \"dmg_reduction\": _top += f\"Reduction:{val} \"", "\"light green\", \"black\"), (\"other\", \"light blue\", \"black\"), (\"monster\", \"dark red\",", "self.mind.connections: return self.mind.connections[self.mind.avatar.uuid] else: return None def handle_input(self, _input): pass", "else: # urwid.AttrMap(self, None, \"line\") def selectable(self): return not self.disabled", "\"dmg_reduction\": _top += f\"Reduction:{val} \" else: _top += f\"{b}:{val} \"", "self.player.inventory.selection: i = self.player.inventory.selection X_OFFSET = 2 Y_OFFSET = 4", "+ max_dmg) _right.append(f\"Damage {min_dmg:>3d}-{max_dmg:<3d}\\n\") _right.append(f\"Reduction {player.dmg_reduction:<3d}\\n\") _right.append(f\"Encumb \") if player.inventory.encumbrance", "[(\"green\", f\"{'Enter:unequip':<14s}\")] elif i.is_consumable: _text += [(\"green\", f\"{'Enter:use':<14s}\")] _text +=", "= widgets class MapFrame(UiFrame): def __init__(self, parent, mind): map_box =", "+ _name[1:] if obj: _equipment += [urwid.Text([f\"{_name}: \", (obj.color, f\"{obj.name}\")])]", "align=\"center\") inv_btns.append((5, urwid.LineBox(btn))) if self.mind.screen_size != (80, 24): inv_btns.append(urwid.Text(\"\\nSET TERMINAL\\nTO", "_name[1:] if obj: _equipment += [urwid.Text([f\"{_name}: \", (obj.color, f\"{obj.name}\")])] else:", "width = 8 height = 6 _marker_box = [\"╔\" +\"═\"*width+\"╗\\n\"]", "else: _left += [f\"{s:<3} \", (state, f\"{c.value:>2d}\"), f\" ({c.mod:<+2d})\\n\"] _right", "visible_map] self.map_box[:] = map_with_attr def handle_input(self, _input): if _input ==", "set and displayed - otherwise, we leave Button behaviour unchanged", "min(MAX_MENU_WIDTH, (3*self.mind.screen_size[0])//7) return 0 @property def map_width(self): if self.menu_view: return", "= GameFrame(self, self.mind) self.update_body(\"Game\", no_title=True) class IntroFrame(UiFrame): def __init__(self, parent,", "[f\"{s:<3} \", (state, f\"{c.value:>2d}\"), f\" ({c.mod:<+2d})\\n\"] _right = [] base", "+= 1 except: pass def focus_previous(self): try: self.focus_position -= 1", "self.selection_data], dividechars=1)] def on_update(self): self.update_header() self.update_body() self.update_footer() class StatusFrame(UiFrame): def", "def __init__(self, label, on_press=None, user_data=None, borders=True, disabled=False): self._label = ButtonLabel(\"\")", "[\".\"] _marker_box += [\"║\\n\"] _marker_box += [\"╚\" +\"═\"*width+\"╝\"] if self.player.inventory.selection:", "_right = [] base = player.STR.mod weapon = player.equipment[\"main_hand\"] if", "{i+1} \" elif i == 9: _num = \"\\n 0", "- header_height - FOOTER_HEIGHT) def on_update(self): if self.layer_view == -1:", "def dispatch_event(self, event_type, *args): self.mind.get_GUI_event(event_type, *args) def register_event(self, event_type, callback):", "class IntroFrame(UiFrame): def __init__(self, parent, mind): # urwid.Padding(urwid.BigText(('top', \"Hack\\'n\\'SSH\"), urwid.HalfBlock5x4Font())),", "in range(width): _marker_box += [\".\"] _marker_box += [\"║\\n\"] _marker_box +=", "(\"cyan\",\"light cyan\",\"black\"), (\"cyan_line\",\"light cyan\",\"white\", \"standout\"), (\"name\",\"white\",\"black\"), ] class UiFrame(urwid.Frame): def", "map_commands = [\"Map commands\\n\\n\", f\"←→↑↓:move\\n\", f\"shift+←→↑↓:dash\\n\", f\"a:attack\\n\", f\"q:pickup\\n\"] class_action_keys =", "self).__init__(widget_list, dividechars) self.parent = parent def focus_next(self): try: self.focus_position +=", "f\"ctrl+a:inventory\\n\", f\"ctrl+s:status\\n\", f\"ctrl+d:help\\n\", f\"ctrl+e:equipment\\n\"] columns = urwid.Columns([urwid.Text(map_commands, wrap=\"clip\"), urwid.Text(menu_commands, wrap=\"clip\")],", "= _frames[idx] self.active_body = self.bodies[_title] super().__init__(parent, mind, urwid.LineBox(self.active_body, title=_title)) def", "self.mind.screen_size[1] - self.header_height - FOOTER_HEIGHT - 2 @property def menu_view(self):", "super().__init__(parent, mind, urwid.ListBox(urwid.SimpleListWalker([columns]))) class SelectableListBox(urwid.ListBox): def __init__(self, body): super(SelectableListBox, self).__init__(body)", "_columns = [(self.map_width, self.map), (self.menu_width, self.menu)] self.contents[\"body\"] = (urwid.Columns(_columns, focus_column=1),", "_marker += [(\"top\", _num)] btn = urwid.Text(_marker, align=\"center\") inv_btns.append((5, urwid.LineBox(btn)))", "event_type, *args): self.mind.get_GUI_event(event_type, *args) def register_event(self, event_type, callback): self.mind.register_GUI_event(event_type, callback)", "\"Thief\", \"Bard\") self.descriptions = {\"Warrior\": \"The mighty warrior\\n\\nStrength +1, Hit", "_size = 0 inv_btns = [] for i, obj in", "self.contents[\"body\"] = (urwid.Columns(_columns, focus_column=1), None) @property def header_list(self): return sorted([ent", "TERMINAL\\nTO 80X24\", align=\"center\")) self.contents[\"footer\"] = (SelectableColumns(inv_btns, dividechars=0), None) self.footer_content_size =", "({c.mod:<+2d})\\n\"] else: _left += [f\"{s:<3} \", (state, f\"{c.value:>2d}\"), f\" ({c.mod:<+2d})\\n\"]", "if not weapon: min_dmg, max_dmg = (1, 4) else: number,", "b in self.parent.bodies][self.focus_position] self.parent.update_body(new_body) except: pass class ButtonLabel(urwid.SelectableIcon): def set_text(self,", "be set and displayed - otherwise, we leave Button behaviour", "\") _name = _name[0].upper() + _name[1:] if obj: _equipment +=", "handle_input(self, _input): pass def on_update(self): pass def dispatch_event(self, event_type, *args):", "for b in self.parent.bodies][self.focus_position] self.parent.update_body(new_body) except: pass def focus_previous(self): try:", "self.update_body(\"Intro\", no_title=True) def start_game_frame(self): self.bodies[\"Game\"] = GameFrame(self, self.mind) self.update_body(\"Game\", no_title=True)", "leave Button behaviour unchanged ''' button_left = \"[\" button_right =", "for p in self.header_list: widgets.append(urwid.AttrMap(urwid.AttrMap(urwid.Text(p.status, wrap=\"clip\"), {self.player.id:\"player\"}), {p.id:\"other\" for i,", "= value _columns = [(self.map_width, self.map), (self.menu_width, self.menu)] self.contents[\"body\"] =", "+ int(c.temp_bonus > 0)] if self.parent.parent.menu_width > 40: _name =", "scr.get_cols_rows() MIN_HEADER_HEIGHT = 3 MAX_MENU_WIDTH = 48 FOOTER_HEIGHT = 4", "24): self.update_footer() self.map.on_update() if self.menu_view: self.menu.on_update() def handle_input(self, _input): if", "columns = urwid.Columns([urwid.Text(\"\")]) box = urwid.ListBox(urwid.SimpleListWalker([columns])) self.box = box.body self.default_header", "box = urwid.ListBox(urwid.SimpleListWalker([columns])) self.box = box.body self.default_header = urwid.Text(\"0/9-= to", "== \"tab\": self.menu_view = not self.menu_view elif _input == \"enter\"", "(number * 1, number * value) min_dmg = max(1, base", "MAX_MENU_WIDTH = 48 FOOTER_HEIGHT = 4 PALETTE = [ (\"line\",", "width - any string, including an empty string, can be", "\" elif i == 9: _num = \"\\n 0 \"", "== \"enter\" and self.player.inventory.selection: self.player.use_quick_item(self.player.inventory.selection) self.update_footer() elif _input == \"Q\"", "MIN_HEADER_HEIGHT#max(MIN_HEADER_HEIGHT, self.mind.screen_size[1]//8) @property def menu_width(self): if self.menu_view: return min(MAX_MENU_WIDTH, (3*self.mind.screen_size[0])//7)", "\"Equipment\", \"Help\") self.bodies = {b : globals()[f\"{b}Frame\"](self, mind) for b", "parent, widget_list, dividechars=0): self.widget_size = len(widget_list) super(FrameColumns, self).__init__(widget_list, dividechars) self.parent", "[b for b in self.parent.bodies][self.focus_position] self.parent.update_body(new_body) except: pass def focus_previous(self):", "mind): _frames = (\"Inventory\", \"Status\", \"Equipment\", \"Help\") self.bodies = {b", "f\"{c.value:>2d}\"), f\" ({c.mod:<+2d})\\n\"] else: _left += [f\"{s:<3} \", (state, f\"{c.value:>2d}\"),", "def register_event(self, event_type, callback): self.mind.register_GUI_event(event_type, callback) def disconnect(self): pass def", "y, z = self.player.position w = max(0, y - self.parent.body_width//3)", "_marker_box = [\"╔\" +\"═\"*width+\"╗\\n\"] for x in range(height): _marker_box +=", "(\"set_line\",\"light green\",\"white\",\"standout\"), (\"normal\",\"white\",\"black\"), (\"positive\",\"light green\",\"black\"), (\"negative\",\"dark red\",\"black\"), (\"white\",\"white\",\"black\"), (\"disabled\",\"dark red\",\"black\"),", "obj is self.player.inventory.selection: _marker += [(\"line\", _num)] else: _marker +=", "== \"ctrl f\": self.debug_view = not self.debug_view elif _input ==", "_top += \"\\n\" self.box[:] = [urwid.Text(_top)] + _equipment class HelpFrame(UiFrame):", "\"])] _bonus = {} for eqp in player.equipment_set: for b", "if self.focus_position < 0: self.focus_position += self.widget_size new_body = [b", "pass class FrameColumns(urwid.Columns): def __init__(self, parent, widget_list, dividechars=0): self.widget_size =", "for k, ent in self.player.location.entities.items() if distance(self.player.position, ent.position) <= 3", "in set(list(eqp.bonus.keys()) + list(eqp.set_bonus.keys())): val = player.full_eqp_bonus(eqp, b) if b", "0: self.focus_position += self.widget_size new_body = [b for b in", "selectable(self): return not self.disabled def attr_button(label, cmd=None, attr_map=None, focus_map =", "visible_map = visible_map[h:h+self.parent.body_height] map_with_attr = [urwid.AttrMap(urwid.AttrMap(urwid.Text(line, wrap=\"clip\"), {self.player.id:\"player\"}), {p.id:\"other\" for", "self.focus_position -= 1 if self.focus_position < 0: self.focus_position += self.widget_size", "self.menu.update_body(\"Inventory\") else: self.map.handle_input(_input) def select_item(self, _input): if _input.isnumeric() and int(_input)", "state = [\"normal\", \"positive\", \"negative\"][-int(c.temp_bonus < 0) + int(c.temp_bonus >", "= 11 self.player.inventory.selection = self.player.inventory.get(_input) def update_header(self): widgets = []", "(urwid.Text([(i.color, f\"{i.name}\\n\"), f\"{i.description}\\n\"], align=\"center\"), None) def update_footer(self): if not self.player.inventory.selection:", "self.player.location.layer_from_entities(self.layer_view, self.debug_view) x, y, z = self.player.position w = max(0,", "btn._label.align = align if cmd: if user_args: urwid.connect_signal(btn, \"click\", cmd,", "widget_list, focus_column=None, dividechars=0): super().__init__(widget_list, dividechars, focus_column) def focus_next(self): try: self.focus_position", "header = urwid.LineBox(urwid.BoxAdapter(self.listbox, len(self.choices)+1)) super().__init__(parent, mind, urwid.ListBox(urwid.SimpleListWalker([urwid.Text(self.descriptions[\"Warrior\"])])), header=header, focus_part=\"header\") def", "self.update_header() if self.footer_content_size != len(self.player.inventory.all): self.update_footer() if self.mind.screen_size != (80,", "pass def restart(self): pass def focus_next(self): pass def focus_previous(self): pass", "self.menu_view: return self.mind.screen_size[0] - self.menu_width return self.mind.screen_size[0] @property def body_width(self):", "elif i == 10: _num = \"\\n - \" elif", "btn = create_button(label, cmd=cmd, align = align, user_args = user_args,", "pass def focus_previous(self): pass def update_body(self, title, no_title=False, boxed=False): self.active_body", "[k for k, act in self.mind.key_map.items() if act.startswith(\"class_ability\")] for i,", "body): super(SelectableListBox, self).__init__(body) def focus_next(self): try: self.focus_position += 1 except", "max_dmg) _right.append(f\"Damage {min_dmg:>3d}-{max_dmg:<3d}\\n\") _right.append(f\"Reduction {player.dmg_reduction:<3d}\\n\") _right.append(f\"Encumb \") if player.inventory.encumbrance ==", "f\" ({c.mod:<+2d})\\n\"] else: _left += [f\"{s:<3} \", (state, f\"{c.value:>2d}\"), f\"", "noisy bard\\n\\nCharisma +1, Dexterity +1, Intelligence +1, Hit points +2\\nSing", "@property def map_width(self): if self.menu_view: return self.mind.screen_size[0] - self.menu_width return", "red\",\"black\"), (\"red\",\"dark red\",\"black\"), (\"green\",\"light green\",\"black\"), (\"yellow\",\"yellow\",\"black\"), (\"brown\",\"brown\",\"black\"), (\"white_line\",\"black\",\"white\", \"standout\"), (\"red_line\",\"dark", "@menu_view.setter def menu_view(self, value): self._menu_view = value _columns = [(self.map_width,", "1) super().__init__(parent, mind, urwid.ListBox(urwid.SimpleListWalker([columns]))) class SelectableListBox(urwid.ListBox): def __init__(self, body): super(SelectableListBox,", "SelectableListBox(walker) header = urwid.LineBox(urwid.BoxAdapter(self.listbox, len(self.choices)+1)) super().__init__(parent, mind, urwid.ListBox(urwid.SimpleListWalker([urwid.Text(self.descriptions[\"Warrior\"])])), header=header, focus_part=\"header\")", "\"Thief\": \"The sneaky thief\\n\\nDexterity +1, Intelligence +1, Hit points +2\\nSneak", "self.active_body = self.bodies[_title] super().__init__(parent, mind, urwid.LineBox(self.active_body, title=_title)) def on_update(self): self.active_body.on_update()", "y = pos _marker_box[(x+X_OFFSET)*(width+2)+y+Y_OFFSET] = (i.color, m) self.box[:] = [urwid.Columns([(width+2,", "self.box = box.body super().__init__(parent, mind, box) def on_update(self): player =", "super().__init__(parent, mind, urwid.Columns([(self.map_width, self.map), (self.menu_width, self.menu)], focus_column=1), header=_header, footer=None, focus_part=\"body\")", "self.mind.master.players.items()}) for line in visible_map] self.map_box[:] = map_with_attr def handle_input(self,", "True self.update_footer() self.header_widget = self.header.original_widget.box_widget self.footer_content_size = 0 @property def", "_right.append((\"white\", f\"{player.inventory.encumbrance:>2d}\")) _right.append(f\"/{player.encumbrance:<2d}\\n\") _right.append(f\"Speed {player.movement_speed}\\n\") _right.append(f\"Monsterized {player.MP:<2d}\\n\") self.box[:] = [urwid.Text(_top),", "= \" if obj and obj is self.player.inventory.selection: _marker +=", "and ent.status], key=lambda ent: distance(self.player.position, ent.position)) def update_footer(self): _size =", "-= self.widget_size new_body = [b for b in self.parent.bodies][self.focus_position] self.parent.update_body(new_body)", "in player.equipment_set: for b in set(list(eqp.bonus.keys()) + list(eqp.set_bonus.keys())): val =", "and trap\", \"Bard\": \"The noisy bard\\n\\nCharisma +1, Dexterity +1, Intelligence", "m) self.box[:] = [urwid.Columns([(width+2, urwid.Text(_marker_box)), self.selection_data], dividechars=1)] def on_update(self): self.update_header()", "-1: _map = copy.deepcopy(self.player.location.map) else: _map = self.player.location.layer_from_entities(self.layer_view, self.debug_view) x,", "self.player.inventory.selection: return urwid.Text(\"\") i = self.player.inventory.selection _text = [] _text", "teleport and ice wall\", \"Thief\": \"The sneaky thief\\n\\nDexterity +1, Intelligence", "= (number * 1, number * value) min_dmg = max(1,", "def update_description(self): index = min(self.listbox.focus_position, len(self.choices)-1) choice = self.choices[index] self.contents[\"body\"]", "max_dmg = (1, 4) else: number, value = weapon.dmg min_dmg,", "_marker_box += [\"║\\n\"] _marker_box += [\"╚\" +\"═\"*width+\"╝\"] if self.player.inventory.selection: i", "_text += [(\"yellow\", \"Q:drop\")] self.contents[\"footer\"] = (urwid.Text(_text, align=\"center\"), None) def", "else: visible_map = visible_map[h:h+self.parent.body_height] map_with_attr = [urwid.AttrMap(urwid.AttrMap(urwid.Text(line, wrap=\"clip\"), {self.player.id:\"player\"}), {p.id:\"other\"", "urwid.Columns([self._label], dividechars=0) super(urwid.Button, self).__init__(cols) self.disabled = disabled if on_press: urwid.connect_signal(self,", "= label # @property # def disabled(self): # return self._disabled", "\"standout\"), (\"green_line\",\"light green\",\"white\", \"standout\"), (\"yellow_line\",\"yellow\",\"white\", \"standout\"), (\"cyan\",\"light cyan\",\"black\"), (\"cyan_line\",\"light cyan\",\"white\",", "for t, obj in player.equipment.items(): _name = t.replace(\"_\", \" \")", "p in self.mind.master.players.items()})) if widgets: self.header_widget.body[:] = widgets class MapFrame(UiFrame):", "def on_update(self): player = self.player _equipment = [] for t,", "urwid.AttrMap(self, \"disabled\") # else: # urwid.AttrMap(self, None, \"line\") def selectable(self):", "{player.game_class.name:<10s}\\nLev:{player.level:<2d} Exp:{player.exp:<4d} {player.location.name}@({x},{y})\\n\" _left = [] for s in CHARACTERISTICS:", "\", (state, f\"{c.value:>2d}\"), f\" ({c.mod:<+2d})\\n\"] else: _left += [f\"{s:<3} \",", "11: _num = \"\\n = \" if obj and obj", "def on_update(self): if self.layer_view == -1: _map = copy.deepcopy(self.player.location.map) else:", "(1, 4) else: number, value = weapon.dmg min_dmg, max_dmg =", "if cmd: if user_args: urwid.connect_signal(btn, \"click\", cmd, user_args = user_args)", "c = getattr(player, s) state = [\"normal\", \"positive\", \"negative\"][-int(c.temp_bonus <", "class MenuFrame(UiFrame): def __init__(self, parent, mind): _frames = (\"Inventory\", \"Status\",", "mind): map_box = urwid.ListBox(urwid.SimpleListWalker([urwid.Text(\"\")])) self.map_box = map_box.body self.layer_view = -1", "button_left = \"[\" button_right = \"]\" def __init__(self, label, on_press=None,", "self.focus_position -= self.widget_size new_body = [b for b in self.parent.bodies][self.focus_position]", "GUI(UiFrame): def __init__(self, parent, mind): self.bodies = {\"Intro\" : IntroFrame(self,", "def header_list(self): return sorted([ent for k, ent in self.player.location.entities.items() if", "box.body super().__init__(parent, mind, box) def on_update(self): player = self.player _equipment", "f\"ctrl+p:respawn\\n\", f\"ctrl+a:inventory\\n\", f\"ctrl+s:status\\n\", f\"ctrl+d:help\\n\", f\"ctrl+e:equipment\\n\"] columns = urwid.Columns([urwid.Text(map_commands, wrap=\"clip\"), urwid.Text(menu_commands,", "header_height - FOOTER_HEIGHT) def on_update(self): if self.layer_view == -1: _map", "event_type, callback): self.mind.register_GUI_event(event_type, callback) def disconnect(self): pass def restart(self): pass", "- self.parent.body_width//3) visible_map = [line[w:w+self.parent.body_width] for line in _map] h", "focus_previous(self): try: self.focus_position -= 1 except IndexError: pass class SelectableColumns(urwid.Columns):", "_text += [(\"green\", f\"{'Enter:unequip':<14s}\")] elif i.is_consumable: _text += [(\"green\", f\"{'Enter:use':<14s}\")]", "value) min_dmg = max(1, base + min_dmg) max_dmg = max(1,", "self.player.position w = max(0, y - self.parent.body_width//3) visible_map = [line[w:w+self.parent.body_width]", "\"center\", user_args = None, borders=True, disabled=False): btn = create_button(label, cmd=cmd,", "self.map), (self.menu_width, self.menu)], focus_column=1), header=_header, footer=None, focus_part=\"body\") self.menu_view = True", "_marker = [\"(\", (obj.color, f\"{obj.marker[0]}\"), \")\"] else: _marker = [f\"", "an empty string, can be set and displayed - otherwise,", "elif i.is_equipped: _text += [(\"green\", f\"{'Enter:unequip':<14s}\")] elif i.is_consumable: _text +=", "wall\", \"Thief\": \"The sneaky thief\\n\\nDexterity +1, Intelligence +1, Hit points", "+ _equipment class HelpFrame(UiFrame): def __init__(self, parent, mind): self.mind =", "= 6 _marker_box = [\"╔\" +\"═\"*width+\"╗\\n\"] for x in range(height):", "else: _map = self.player.location.layer_from_entities(self.layer_view, self.debug_view) x, y, z = self.player.position", "self.focus_position -= 1 except: pass class FrameColumns(urwid.Columns): def __init__(self, parent,", "self.bodies[title] if boxed: if no_title: self.contents[\"body\"] = (urwid.LineBox(self.active_body), None) else:", "[] for p in self.header_list: widgets.append(urwid.AttrMap(urwid.AttrMap(urwid.Text(p.status, wrap=\"clip\"), {self.player.id:\"player\"}), {p.id:\"other\" for", "urwid.connect_signal(walker, \"modified\", self.update_description) self.listbox = SelectableListBox(walker) header = urwid.LineBox(urwid.BoxAdapter(self.listbox, len(self.choices)+1))", "and parry\", \"Dwarf\": \"The short dwarf\\n\\nStrength +1, Constitution +1, Hit", "int(_input) > 0: _input = int(_input)-1 elif _input == \"0\":", "cyan\",\"black\"), (\"cyan_line\",\"light cyan\",\"white\", \"standout\"), (\"name\",\"white\",\"black\"), ] class UiFrame(urwid.Frame): def __init__(self,", "= [line[w:w+self.parent.body_width] for line in _map] h = max(0, x", "ent in self.player.location.entities.items() if distance(self.player.position, ent.position) <= 3 and ent.status],", "self.debug_view) x, y, z = self.player.position w = max(0, y", "\"]\" def __init__(self, label, on_press=None, user_data=None, borders=True, disabled=False): self._label =", "[(\"yellow\", \"Q:drop\")] self.contents[\"footer\"] = (urwid.Text(_text, align=\"center\"), None) def update_body(self): side", "[line[w:w+self.parent.body_width] for line in _map] h = max(0, x -", "urwid import raw_display SIZE = lambda scr=raw_display.Screen(): scr.get_cols_rows() MIN_HEADER_HEIGHT =", "zip(i.in_inventory_markers, i.in_inventory_marker_positions): x, y = pos _marker_box[(x+X_OFFSET)*(width+2)+y+Y_OFFSET] = (i.color, m)", "focus_previous(self): try: self.focus_position -= 1 except: pass class FrameColumns(urwid.Columns): def", "return not self.disabled def attr_button(label, cmd=None, attr_map=None, focus_map = \"line\",", "+= [f\"{_name:<6} \", (state, f\"{c.value:>2d}\"), f\" ({c.mod:<+2d})\\n\"] else: _left +=", "+1, Intelligence +1, Hit points +2\\nSing and summon\"} line =", "body_height(self): return self.mind.screen_size[1] - self.header_height - FOOTER_HEIGHT - 2 @property", "= (1, 4) else: number, value = weapon.dmg min_dmg, max_dmg", "def on_update(self): self.update_header() self.update_body() self.update_footer() class StatusFrame(UiFrame): def __init__(self, parent,", "== EXTRA_ENCUMBRANCE_MULTI*player.encumbrance: _right.append((\"red\", f\"{player.inventory.encumbrance:>2d}\")) elif player.inventory.encumbrance > player.encumbrance: _right.append((\"yellow\", f\"{player.inventory.encumbrance:>2d}\"))", "MapFrame(self, mind) self.menu = MenuFrame(self, mind) super().__init__(parent, mind, urwid.Columns([(self.map_width, self.map),", "self.mind.screen_size != (80, 24): inv_btns.append(urwid.Text(\"\\nSET TERMINAL\\nTO 80X24\", align=\"center\")) self.contents[\"footer\"] =", "def player(self): if self.mind.avatar.uuid in self.mind.master.players: return self.mind.master.players[self.mind.avatar.uuid] else: return", "equip':<14s}\")] elif not i.is_equipped: _text += [(\"green\", f\"{'Enter:equip':<14s}\")] elif i.is_equipped:", "green\",\"black\"), (\"set_line\",\"light green\",\"white\",\"standout\"), (\"normal\",\"white\",\"black\"), (\"positive\",\"light green\",\"black\"), (\"negative\",\"dark red\",\"black\"), (\"white\",\"white\",\"black\"), (\"disabled\",\"dark", "self.choices[index] self.mind.master.new_player(self.mind.avatar.uuid, choice) self.parent.start_game_frame() def update_description(self): index = min(self.listbox.focus_position, len(self.choices)-1)", "# self.mind.disconnect()#should use dispatch event def restart(self): self.update_body(\"Intro\", no_title=True) def", "def on_update(self): pass def dispatch_event(self, event_type, *args): self.mind.get_GUI_event(event_type, *args) def", "player.equipment_set: for b in set(list(eqp.bonus.keys()) + list(eqp.set_bonus.keys())): val = player.full_eqp_bonus(eqp,", "return self._disabled # @disabled.setter # def disabled(self, value): # if", "1 except IndexError: pass def focus_previous(self): try: self.focus_position -= 1", "user_args = user_args, borders=borders, disabled=disabled) return urwid.AttrMap(btn, attr_map, focus_map=focus_map) def", "def connection(self): if self.mind.avatar.uuid in self.mind.connections: return self.mind.connections[self.mind.avatar.uuid] else: return", "obj=self.player.inventory.selection) self.update_footer() elif _input.isnumeric() or _input in (\"-\", \"=\"): self.select_item(_input)", "c in self.choices: btn = attr_button(c, self.select_class) line.append(btn) walker =", "# urwid.Padding(urwid.BigText(('top', \"Hack\\'n\\'SSH\"), urwid.HalfBlock5x4Font())), self.choices = (\"Warrior\", \"Dwarf\", \"Wizard\", \"Thief\",", "f\"ctrl+d:help\\n\", f\"ctrl+e:equipment\\n\"] columns = urwid.Columns([urwid.Text(map_commands, wrap=\"clip\"), urwid.Text(menu_commands, wrap=\"clip\")], dividechars =", "(\"white\",\"white\",\"black\"), (\"disabled\",\"dark red\",\"black\"), (\"red\",\"dark red\",\"black\"), (\"green\",\"light green\",\"black\"), (\"yellow\",\"yellow\",\"black\"), (\"brown\",\"brown\",\"black\"), (\"white_line\",\"black\",\"white\",", "align=\"center\"), None) def update_footer(self): if not self.player.inventory.selection: self.contents[\"footer\"] = (self.default_footer,", "self.mind.key_map[\"status-menu\"] and self.menu_view: self.menu.update_body(\"Status\") elif _input == self.mind.key_map[\"help-menu\"] and self.menu_view:", "self.parent.header_height + 2 tot_rows = self.mind.screen_size[1] return (tot_rows - header_height", "* 1, number * value) min_dmg = max(1, base +", "self.footer_content_size != len(self.player.inventory.all): self.update_footer() if self.mind.screen_size != (80, 24): self.update_footer()", "(self.active_body, None) class GUI(UiFrame): def __init__(self, parent, mind): self.bodies =", "[urwid.Text(_top), urwid.Columns([urwid.Text(_left), urwid.Text(_right)], dividechars = 1) ] class EquipmentFrame(UiFrame): def", "wrap=\"clip\")], dividechars = 1) super().__init__(parent, mind, urwid.ListBox(urwid.SimpleListWalker([columns]))) class SelectableListBox(urwid.ListBox): def", "pass def on_update(self): pass def dispatch_event(self, event_type, *args): self.mind.get_GUI_event(event_type, *args)", "elif _input == \"Q\" and self.player.inventory.selection: self.player.actions[\"drop\"].use(self.player, obj=self.player.inventory.selection) self.update_footer() elif", "def __init__(self, parent, mind): columns = urwid.Columns([urwid.Text(\"\")]) box = urwid.ListBox(urwid.SimpleListWalker([columns]))", "= 4 PALETTE = [ (\"line\", 'black', 'white', \"standout\"), (\"top\",\"white\",\"black\"),", "(state, f\"{c.value:>2d}\"), f\" ({c.mod:<+2d})\\n\"] else: _left += [f\"{s:<3} \", (state,", "player.inventory.encumbrance == EXTRA_ENCUMBRANCE_MULTI*player.encumbrance: _right.append((\"red\", f\"{player.inventory.encumbrance:>2d}\")) elif player.inventory.encumbrance > player.encumbrance: _right.append((\"yellow\",", "def menu_width(self): if self.menu_view: return min(MAX_MENU_WIDTH, (3*self.mind.screen_size[0])//7) return 0 @property", "(obj.color, f\"{obj.marker[0]}\"), \")\"] else: _marker = [f\" {obj.marker[0]} \"] else:", "if h+self.parent.body_height >= len(visible_map): visible_map = visible_map[len(visible_map)-self.parent.body_height:] else: visible_map =", "\"] if i < 9: _num = f\"\\n {i+1} \"", "i = self.player.inventory.selection _text = [] if not i.requisites(self.player): _text", "s_input = 9 elif _input == \"-\": _input = 10", "@property def body_width(self): return self.mind.screen_size[0] @property def body_height(self): return self.mind.screen_size[1]", "red\",\"black\"), (\"green\",\"light green\",\"black\"), (\"yellow\",\"yellow\",\"black\"), (\"brown\",\"brown\",\"black\"), (\"white_line\",\"black\",\"white\", \"standout\"), (\"red_line\",\"dark red\",\"white\", \"standout\"),", "def on_update(self): self.update_header() if self.footer_content_size != len(self.player.inventory.all): self.update_footer() if self.mind.screen_size", "parent def focus_next(self): try: self.focus_position += 1 if self.focus_position >=", "= (urwid.Columns(_columns, focus_column=1), None) @property def header_list(self): return sorted([ent for", "x, y, z = player.position _top = f\"{player.name:<12s} {player.game_class.name:<10s}\\nLev:{player.level:<2d} Exp:{player.exp:<4d}", "on_update(self): pass def dispatch_event(self, event_type, *args): self.mind.get_GUI_event(event_type, *args) def register_event(self,", "bard\\n\\nCharisma +1, Dexterity +1, Intelligence +1, Hit points +2\\nSing and", ">= len(visible_map): visible_map = visible_map[len(visible_map)-self.parent.body_height:] else: visible_map = visible_map[h:h+self.parent.body_height] map_with_attr", "\"\\n - \" elif i == 11: _num = \"\\n", "= self.player.inventory.selection _text = [] if not i.requisites(self.player): _text +=", "(urwid.ListBox(urwid.SimpleListWalker([urwid.Text(self.descriptions[choice])])), None) class GameFrame(UiFrame): def __init__(self, parent, mind): self.mind =", "_input == \"=\": _input = 11 self.player.inventory.selection = self.player.inventory.get(_input) def", "urwid.connect_signal(btn, \"click\", cmd, user_args = user_args) else: urwid.connect_signal(btn, \"click\", cmd)", "\"Dwarf\": \"The short dwarf\\n\\nStrength +1, Constitution +1, Hit points +6\\nDemolish", "self.mind.screen_size[1] return (tot_rows - header_height - FOOTER_HEIGHT) def on_update(self): if", "urwid import time, os, copy from rpg_game.utils import log, mod,", "_input = int(_input)-1 elif _input == \"0\": s_input = 9", "def focus_previous(self): try: self.focus_position -= 1 if self.focus_position < 0:", "return self.mind.screen_size[0] @property def body_height(self): return self.mind.screen_size[1] - self.header_height -", "= user_args, borders=borders, disabled=disabled) return urwid.AttrMap(btn, attr_map, focus_map=focus_map) def create_button(label,", "_num = \"\\n 0 \" elif i == 10: _num", "max(1, base + min_dmg) max_dmg = max(1, base + max_dmg)", "mind): # urwid.Padding(urwid.BigText(('top', \"Hack\\'n\\'SSH\"), urwid.HalfBlock5x4Font())), self.choices = (\"Warrior\", \"Dwarf\", \"Wizard\",", "= True self.map = MapFrame(self, mind) self.menu = MenuFrame(self, mind)", "{p.id:\"other\" for i, p in self.mind.master.players.items()})) if widgets: self.header_widget.body[:] =", "self.contents[\"footer\"] = (self.default_footer, None) else: i = self.player.inventory.selection _text =", "urwid.ListBox(urwid.SimpleListWalker([urwid.Text(self.descriptions[\"Warrior\"])])), header=header, focus_part=\"header\") def select_class(self, button): index = min(self.listbox.focus_position, len(self.choices)-1)", "f\"{obj.name}\")])] else: _equipment += [urwid.Text([f\"{_name}: \"])] _bonus = {} for", "# else: # urwid.AttrMap(self, None, \"line\") def selectable(self): return not", "Y_OFFSET = 4 for m, pos in zip(i.in_inventory_markers, i.in_inventory_marker_positions): x,", "f\"{obj.marker[0]}\"), \"]\"] elif obj.is_equipment and not obj.is_equipped: _marker = [\"]\",", "tot_rows = self.mind.screen_size[1] return (tot_rows - header_height - FOOTER_HEIGHT) def", "EquipmentFrame(UiFrame): def __init__(self, parent, mind): box = urwid.ListBox(urwid.SimpleListWalker([urwid.Text(\"\")])) self.box =", "SelectableColumns(urwid.Columns): def __init__(self, widget_list, focus_column=None, dividechars=0): super().__init__(widget_list, dividechars, focus_column) def", "def selection_data(self): if not self.player.inventory.selection: return urwid.Text(\"\") i = self.player.inventory.selection", "self.layer_view + 1 if self.layer_view > 2: self.layer_view = -1", "_input == \"0\": s_input = 9 elif _input == \"-\":", "callback): self.mind.register_GUI_event(event_type, callback) def disconnect(self): pass def restart(self): pass def", "[f\" {obj.marker[0]} \"] else: _marker = [f\" \"] if i", "(80, 24): self.update_footer() self.map.on_update() if self.menu_view: self.menu.on_update() def handle_input(self, _input):", "+= [\"║\\n\"] _marker_box += [\"╚\" +\"═\"*width+\"╝\"] if self.player.inventory.selection: i =", "mind): self.mind = mind _header = urwid.LineBox(urwid.BoxAdapter(SelectableListBox(urwid.SimpleFocusListWalker([urwid.Text(\"\")])), self.header_height)) self._menu_view =", "len(self.choices)-1) choice = self.choices[index] self.contents[\"body\"] = (urwid.ListBox(urwid.SimpleListWalker([urwid.Text(self.descriptions[choice])])), None) class GameFrame(UiFrame):", "_input) self.active_body.handle_input(_input) # def exit(self): # self.disconnect() # self.mind.disconnect()#should use", "focus_column=1), None) @property def header_list(self): return sorted([ent for k, ent", "+ min_dmg) max_dmg = max(1, base + max_dmg) _right.append(f\"Damage {min_dmg:>3d}-{max_dmg:<3d}\\n\")", "not in _bonus: _bonus[b] = val else: _bonus[b] += val", "int(c.temp_bonus > 0)] if self.parent.parent.menu_width > 40: _name = c.name[0].upper()", "for b in _frames} idx = -1 _title = _frames[idx]", "+ c.name[1:6] _left += [f\"{_name:<6} \", (state, f\"{c.value:>2d}\"), f\" ({c.mod:<+2d})\\n\"]", "mind): self.mind = mind map_commands = [\"Map commands\\n\\n\", f\"←→↑↓:move\\n\", f\"shift+←→↑↓:dash\\n\",", "self.header_height)) self._menu_view = True self.map = MapFrame(self, mind) self.menu =", "self.player x, y, z = player.position _top = f\"{player.name:<12s} {player.game_class.name:<10s}\\nLev:{player.level:<2d}", "\"-\": _input = 10 elif _input == \"=\": _input =", "= min(self.listbox.focus_position, len(self.choices)-1) choice = self.choices[index] self.mind.master.new_player(self.mind.avatar.uuid, choice) self.parent.start_game_frame() def", "\"line\") def selectable(self): return not self.disabled def attr_button(label, cmd=None, attr_map=None,", "def focus_next(self): try: self.focus_position += 1 if self.focus_position >= self.widget_size:", "= [urwid.Columns([(width+2, urwid.Text(_marker_box)), self.selection_data], dividechars=1)] def on_update(self): self.update_header() self.update_body() self.update_footer()", "def __init__(self, parent, mind): box = urwid.ListBox(urwid.SimpleListWalker([urwid.Text(\"\")])) self.box = box.body", "raw_display SIZE = lambda scr=raw_display.Screen(): scr.get_cols_rows() MIN_HEADER_HEIGHT = 3 MAX_MENU_WIDTH", "if not i.requisites(self.player): _text += [(\"red\", f\"{'Cannot equip':<14s}\")] elif not", "self.map), (self.menu_width, self.menu)] self.contents[\"body\"] = (urwid.Columns(_columns, focus_column=1), None) @property def", "None, borders=True, disabled=False): btn = MyButton(label, borders=borders, disabled=disabled) btn._label.align =", "self.update_footer() elif _input == \"Q\" and self.player.inventory.selection: self.player.actions[\"drop\"].use(self.player, obj=self.player.inventory.selection) self.update_footer()", "b, val in _bonus.items(): if b == \"dmg_reduction\": _top +=", "string, including an empty string, can be set and displayed", "= {\"Intro\" : IntroFrame(self, mind)} self.active_body = self.bodies[\"Intro\"] super().__init__(parent, mind,", "in visible_map] self.map_box[:] = map_with_attr def handle_input(self, _input): if _input", "wrap=\"clip\"), urwid.Text(menu_commands, wrap=\"clip\")], dividechars = 1) super().__init__(parent, mind, urwid.ListBox(urwid.SimpleListWalker([columns]))) class", "index = min(self.listbox.focus_position, len(self.choices)-1) choice = self.choices[index] self.mind.master.new_player(self.mind.avatar.uuid, choice) self.parent.start_game_frame()", "None def handle_input(self, _input): pass def on_update(self): pass def dispatch_event(self,", "= val else: _bonus[b] += val _top = \"\" for", "+= [(\"yellow\", \"Q:drop\")] self.contents[\"footer\"] = (urwid.Text(_text, align=\"center\"), None) def update_body(self):", "''' button_left = \"[\" button_right = \"]\" def __init__(self, label,", "attr_button(label, cmd=None, attr_map=None, focus_map = \"line\", align = \"center\", user_args", "2 tot_rows = self.mind.screen_size[1] return (tot_rows - header_height - FOOTER_HEIGHT)", "urwid.Padding(urwid.BigText(('top', \"Hack\\'n\\'SSH\"), urwid.HalfBlock5x4Font())), self.choices = (\"Warrior\", \"Dwarf\", \"Wizard\", \"Thief\", \"Bard\")", "c.name[1:] _left += [f\"{_name:<12} \", (state, f\"{c.value:>2d}\"), f\" ({c.mod:<+2d})\\n\"] elif", "borders=True, disabled=False): btn = create_button(label, cmd=cmd, align = align, user_args", "_input): if _input == \"ctrl f\": self.debug_view = not self.debug_view", "None) class GameFrame(UiFrame): def __init__(self, parent, mind): self.mind = mind", "None, borders=True, disabled=False): btn = create_button(label, cmd=cmd, align = align,", "_equipment = [] for t, obj in player.equipment.items(): _name =", "+= [(\"line\", _num)] else: _marker += [(\"top\", _num)] btn =", "self.box = box.body self.default_header = urwid.Text(\"0/9-= to select\\n\\n\", align=\"center\") self.default_footer", "1 class MyButton(urwid.Button): ''' - override __init__ to use our", "min(self.listbox.focus_position, len(self.choices)-1) choice = self.choices[index] self.mind.master.new_player(self.mind.avatar.uuid, choice) self.parent.start_game_frame() def update_description(self):", "self.header_height - FOOTER_HEIGHT - 2 @property def menu_view(self): return self._menu_view", "self.player.handle_input(_action) class MenuFrame(UiFrame): def __init__(self, parent, mind): _frames = (\"Inventory\",", "register_event(self, event_type, callback): self.mind.register_GUI_event(event_type, callback) def disconnect(self): pass def restart(self):", "+= 1 if obj.is_equipment and obj.is_equipped: _marker = [\"[\", (obj.color,", "pos in zip(i.in_inventory_markers, i.in_inventory_marker_positions): x, y = pos _marker_box[(x+X_OFFSET)*(width+2)+y+Y_OFFSET] =", "k = class_action_keys[i] map_commands.append(f\"{k}:{self.player.class_actions[act].description.lower()}\\n\") menu_commands = [\"Menu commands\\n\\n\", f\"tab:open/close\\n\",f\"0/9-=:select item\\n\",", "elif _input == \"ctrl v\": self.layer_view = self.layer_view + 1", "+= [urwid.Text([f\"{_name}: \", (obj.color, f\"{obj.name}\")])] else: _equipment += [urwid.Text([f\"{_name}: \"])]", "in self.mind.master.players.items()}) for line in visible_map] self.map_box[:] = map_with_attr def", "not i.requisites(self.player): _text += [(\"red\", f\"{'Cannot equip':<14s}\")] elif not i.is_equipped:", "_right.append(f\"Encumb \") if player.inventory.encumbrance == EXTRA_ENCUMBRANCE_MULTI*player.encumbrance: _right.append((\"red\", f\"{player.inventory.encumbrance:>2d}\")) elif player.inventory.encumbrance", "self.map_box = map_box.body self.layer_view = -1 self.debug_view = False super().__init__(parent,", "def update_footer(self): if not self.player.inventory.selection: self.contents[\"footer\"] = (self.default_footer, None) else:", "self.active_body = self.bodies[\"Intro\"] super().__init__(parent, mind, self.active_body) def on_update(self): self.active_body.on_update() def", "not self.disabled def attr_button(label, cmd=None, attr_map=None, focus_map = \"line\", align", "in CHARACTERISTICS: c = getattr(player, s) state = [\"normal\", \"positive\",", "def select_item(self, _input): if _input.isnumeric() and int(_input) > 0: _input", "elif _input == self.mind.key_map[\"status-menu\"] and self.menu_view: self.menu.update_body(\"Status\") elif _input ==", "if not self.player.inventory.selection: return urwid.Text(\"\") i = self.player.inventory.selection _text =", "menu_width(self): if self.menu_view: return min(MAX_MENU_WIDTH, (3*self.mind.screen_size[0])//7) return 0 @property def", "and not obj.is_equipped: _marker = [\"]\", (obj.color, f\"{obj.marker[0]}\"), \"[\"] elif", "to use our ButtonLabel instead of urwid.SelectableIcon - make button_left", "class_action_keys[i] map_commands.append(f\"{k}:{self.player.class_actions[act].description.lower()}\\n\") menu_commands = [\"Menu commands\\n\\n\", f\"tab:open/close\\n\",f\"0/9-=:select item\\n\", f\"ctrl+p:respawn\\n\", f\"ctrl+a:inventory\\n\",", "pass class SelectableColumns(urwid.Columns): def __init__(self, widget_list, focus_column=None, dividechars=0): super().__init__(widget_list, dividechars,", "m, pos in zip(i.in_inventory_markers, i.in_inventory_marker_positions): x, y = pos _marker_box[(x+X_OFFSET)*(width+2)+y+Y_OFFSET]", "= [ (\"line\", 'black', 'white', \"standout\"), (\"top\",\"white\",\"black\"), (\"frame\",\"white\",\"white\"), (\"player\", \"light", "weapon: min_dmg, max_dmg = (1, 4) else: number, value =", "\"Wizard\", \"Thief\", \"Bard\") self.descriptions = {\"Warrior\": \"The mighty warrior\\n\\nStrength +1,", "(\"common_line\",\"black\",\"white\",\"standout\"), (\"uncommon\",\"dark cyan\",\"black\"), (\"uncommon_line\",\"dark cyan\",\"white\",\"standout\"), (\"rare\",\"yellow\",\"black\"), (\"rare_line\",\"yellow\",\"white\",\"standout\"), (\"unique\",\"light magenta\",\"black\"), (\"unique_line\",\"light", "if self._disabled == value: # return # if self.disabled: #", "on_update(self): player = self.player _equipment = [] for t, obj", "def disabled(self): # return self._disabled # @disabled.setter # def disabled(self,", "> 36: _name = c.name[0].upper() + c.name[1:6] _left += [f\"{_name:<6}", "== -1: _map = copy.deepcopy(self.player.location.map) else: _map = self.player.location.layer_from_entities(self.layer_view, self.debug_view)", "= [f\" \"] if i < 9: _num = f\"\\n", "@property def connection(self): if self.mind.avatar.uuid in self.mind.connections: return self.mind.connections[self.mind.avatar.uuid] else:", "(SelectableColumns(inv_btns, dividechars=0), None) self.footer_content_size = _size def on_update(self): self.update_header() if", "dispatch event def restart(self): self.update_body(\"Intro\", no_title=True) def start_game_frame(self): self.bodies[\"Game\"] =", "len(self.choices)+1)) super().__init__(parent, mind, urwid.ListBox(urwid.SimpleListWalker([urwid.Text(self.descriptions[\"Warrior\"])])), header=header, focus_part=\"header\") def select_class(self, button): index", "visible_range(self): header_height = self.parent.header_height + 2 tot_rows = self.mind.screen_size[1] return", "is invoked by Button.set_label ''' self.__super.set_text(label) self._cursor_position = len(label) +", "MyButton(urwid.Button): ''' - override __init__ to use our ButtonLabel instead", "boxed=False): self.active_body = self.bodies[title] if boxed: if no_title: self.contents[\"body\"] =", "= (self.default_footer, None) else: i = self.player.inventory.selection _text = []", "+4\\nCharge and parry\", \"Dwarf\": \"The short dwarf\\n\\nStrength +1, Constitution +1,", "= [] base = player.STR.mod weapon = player.equipment[\"main_hand\"] if not", "= -1 elif _input in self.mind.key_map: _action = self.mind.key_map[_input] self.player.handle_input(_action)", "def __init__(self, parent, mind): self.bodies = {\"Intro\" : IntroFrame(self, mind)}", "mind, box) def on_update(self): player = self.player x, y, z", "for line in visible_map] self.map_box[:] = map_with_attr def handle_input(self, _input):", "def __init__(self, parent, mind): self.mind = mind _header = urwid.LineBox(urwid.BoxAdapter(SelectableListBox(urwid.SimpleFocusListWalker([urwid.Text(\"\")])),", "sneaky thief\\n\\nDexterity +1, Intelligence +1, Hit points +2\\nSneak attack, hide", "\"The noisy bard\\n\\nCharisma +1, Dexterity +1, Intelligence +1, Hit points", "_map = copy.deepcopy(self.player.location.map) else: _map = self.player.location.layer_from_entities(self.layer_view, self.debug_view) x, y,", "**kargs): self.parent = parent self.mind = mind urwid.AttrMap(self,\"frame\") super().__init__(*args, **kargs)", "ent.position) <= 3 and ent.status], key=lambda ent: distance(self.player.position, ent.position)) def", "_frames = (\"Inventory\", \"Status\", \"Equipment\", \"Help\") self.bodies = {b :", "_num = \"\\n - \" elif i == 11: _num", "parent, mind): self.mind = mind map_commands = [\"Map commands\\n\\n\", f\"←→↑↓:move\\n\",", "i.is_equipped: _text += [(\"green\", f\"{'Enter:unequip':<14s}\")] elif i.is_consumable: _text += [(\"green\",", "self.update_footer() if self.mind.screen_size != (80, 24): self.update_footer() self.map.on_update() if self.menu_view:", "handle_input(self, _input): # print(\"HANDLING\", _input) self.active_body.handle_input(_input) # def exit(self): #", "\"ctrl v\": self.layer_view = self.layer_view + 1 if self.layer_view >", "= [urwid.Text(_top)] + _equipment class HelpFrame(UiFrame): def __init__(self, parent, mind):", "== \"dmg_reduction\": _top += f\"Reduction:{val} \" else: _top += f\"{b}:{val}", "min_dmg, max_dmg = (1, 4) else: number, value = weapon.dmg", "else: number, value = weapon.dmg min_dmg, max_dmg = (number *", "choice) self.parent.start_game_frame() def update_description(self): index = min(self.listbox.focus_position, len(self.choices)-1) choice =", "11 self.player.inventory.selection = self.player.inventory.get(_input) def update_header(self): widgets = [] for", "(\"red_line\",\"dark red\",\"white\", \"standout\"), (\"green_line\",\"light green\",\"white\", \"standout\"), (\"yellow_line\",\"yellow\",\"white\", \"standout\"), (\"cyan\",\"light cyan\",\"black\"),", "\"\" for b, val in _bonus.items(): if b == \"dmg_reduction\":", "player.equipment.items(): _name = t.replace(\"_\", \" \") _name = _name[0].upper() +", "not self.player.inventory.selection: self.contents[\"footer\"] = (self.default_footer, None) else: i = self.player.inventory.selection", "= self.player.inventory.selection self.contents[\"header\"] = (urwid.Text([(i.color, f\"{i.name}\\n\"), f\"{i.description}\\n\"], align=\"center\"), None) def", "len(self.button_right), urwid.Text(self.button_right))], dividechars=1) else: cols = urwid.Columns([self._label], dividechars=0) super(urwid.Button, self).__init__(cols)", "except: pass class FrameColumns(urwid.Columns): def __init__(self, parent, widget_list, dividechars=0): self.widget_size", "borders=borders, disabled=disabled) btn._label.align = align if cmd: if user_args: urwid.connect_signal(btn,", "Constitution +1, Hit points +6\\nDemolish and parry\", \"Wizard\": \"The opportune", "= (self.active_body, None) class GUI(UiFrame): def __init__(self, parent, mind): self.bodies", "self.player.inventory.selection: self.player.use_quick_item(self.player.inventory.selection) self.update_footer() elif _input == \"Q\" and self.player.inventory.selection: self.player.actions[\"drop\"].use(self.player,", "our ButtonLabel instead of urwid.SelectableIcon - make button_left and button_right", "else: self.map.handle_input(_input) def select_item(self, _input): if _input.isnumeric() and int(_input) >", "__init__ to use our ButtonLabel instead of urwid.SelectableIcon - make", "(\"Warrior\", \"Dwarf\", \"Wizard\", \"Thief\", \"Bard\") self.descriptions = {\"Warrior\": \"The mighty", "in (\"-\", \"=\"): self.select_item(_input) self.update_footer() elif _input == self.mind.key_map[\"status-menu\"] and", "ButtonLabel instead of urwid.SelectableIcon - make button_left and button_right plain", "i < 9: _num = f\"\\n {i+1} \" elif i", "parent, mind): box = urwid.ListBox(urwid.SimpleListWalker([urwid.Text(\"\")])) self.box = box.body super().__init__(parent, mind,", "def visible_range(self): header_height = self.parent.header_height + 2 tot_rows = self.mind.screen_size[1]", "title, no_title=False, boxed=False): self.active_body = self.bodies[title] if boxed: if no_title:", "and button_right plain strings and variable width - any string,", "= create_button(label, cmd=cmd, align = align, user_args = user_args, borders=borders,", "map_commands.append(f\"{k}:{self.player.class_actions[act].description.lower()}\\n\") menu_commands = [\"Menu commands\\n\\n\", f\"tab:open/close\\n\",f\"0/9-=:select item\\n\", f\"ctrl+p:respawn\\n\", f\"ctrl+a:inventory\\n\", f\"ctrl+s:status\\n\",", "if self.mind.screen_size != (80, 24): self.update_footer() self.map.on_update() if self.menu_view: self.menu.on_update()", "- otherwise, we leave Button behaviour unchanged ''' button_left =", "self.player.inventory.selection: _marker += [(\"line\", _num)] else: _marker += [(\"top\", _num)]", "i = self.player.inventory.selection self.contents[\"header\"] = (urwid.Text([(i.color, f\"{i.name}\\n\"), f\"{i.description}\\n\"], align=\"center\"), None)", "= None, borders=True, disabled=False): btn = MyButton(label, borders=borders, disabled=disabled) btn._label.align", "_left += [f\"{_name:<12} \", (state, f\"{c.value:>2d}\"), f\" ({c.mod:<+2d})\\n\"] elif self.parent.parent.menu_width", "wrap=\"clip\"), {self.player.id:\"player\"}), {p.id:\"other\" for i, p in self.mind.master.players.items()})) if widgets:", "player.STR.mod weapon = player.equipment[\"main_hand\"] if not weapon: min_dmg, max_dmg =", "('fixed', len(self.button_right), urwid.Text(self.button_right))], dividechars=1) else: cols = urwid.Columns([self._label], dividechars=0) super(urwid.Button,", "self.player.inventory.get(_input) def update_header(self): widgets = [] for p in self.header_list:", "self.parent = parent def focus_next(self): try: self.focus_position += 1 if", "- self.menu_width return self.mind.screen_size[0] @property def body_width(self): return self.mind.screen_size[0] @property", "cmd=cmd, align = align, user_args = user_args, borders=borders, disabled=disabled) return", "player = self.player _equipment = [] for t, obj in", "self.mind.screen_size[0] @property def body_height(self): return self.mind.screen_size[1] - self.header_height - FOOTER_HEIGHT", "dividechars=0) super(urwid.Button, self).__init__(cols) self.disabled = disabled if on_press: urwid.connect_signal(self, 'click',", "and obj.is_equipped: _marker = [\"[\", (obj.color, f\"{obj.marker[0]}\"), \"]\"] elif obj.is_equipment", "ent.position)) def update_footer(self): _size = 0 inv_btns = [] for", "0) + int(c.temp_bonus > 0)] if self.parent.parent.menu_width > 40: _name", "max_dmg = (number * 1, number * value) min_dmg =", "and self.menu_view: self.menu.update_body(\"Equipment\") elif _input == self.mind.key_map[\"inventory-menu\"] and self.menu_view: self.menu.update_body(\"Inventory\")", "__init__(self, parent, mind, *args, **kargs): self.parent = parent self.mind =", "f\"{'Enter:unequip':<14s}\")] elif i.is_consumable: _text += [(\"green\", f\"{'Enter:use':<14s}\")] _text += [(\"yellow\",", "focus_column=1), header=_header, footer=None, focus_part=\"body\") self.menu_view = True self.update_footer() self.header_widget =", "[\"]\", (obj.color, f\"{obj.marker[0]}\"), \"[\"] elif obj.is_consumable: _marker = [\"(\", (obj.color,", "f\"{b}:{val} \" _top += \"\\n\" self.box[:] = [urwid.Text(_top)] + _equipment", "no_title=True) def start_game_frame(self): self.bodies[\"Game\"] = GameFrame(self, self.mind) self.update_body(\"Game\", no_title=True) class", "user_args: urwid.connect_signal(btn, \"click\", cmd, user_args = user_args) else: urwid.connect_signal(btn, \"click\",", "+ 2 tot_rows = self.mind.screen_size[1] return (tot_rows - header_height -", "1 if self.focus_position >= self.widget_size: self.focus_position -= self.widget_size new_body =", "choice = self.choices[index] self.mind.master.new_player(self.mind.avatar.uuid, choice) self.parent.start_game_frame() def update_description(self): index =", "[] for c in self.choices: btn = attr_button(c, self.select_class) line.append(btn)", "= (\"Warrior\", \"Dwarf\", \"Wizard\", \"Thief\", \"Bard\") self.descriptions = {\"Warrior\": \"The", "None) @property def header_list(self): return sorted([ent for k, ent in", "if self.menu_view: return self.mind.screen_size[0] - self.menu_width return self.mind.screen_size[0] @property def", "(i.color, m) self.box[:] = [urwid.Columns([(width+2, urwid.Text(_marker_box)), self.selection_data], dividechars=1)] def on_update(self):", "if self.mind.avatar.uuid in self.mind.master.players: return self.mind.master.players[self.mind.avatar.uuid] else: return None @property", "max(0, x - self.parent.body_height//2) if h+self.parent.body_height >= len(visible_map): visible_map =", "not self.player.inventory.selection: return urwid.Text(\"\") i = self.player.inventory.selection _text = []", "= \"[\" button_right = \"]\" def __init__(self, label, on_press=None, user_data=None,", "cyan\",\"white\", \"standout\"), (\"name\",\"white\",\"black\"), ] class UiFrame(urwid.Frame): def __init__(self, parent, mind,", "@property # def disabled(self): # return self._disabled # @disabled.setter #", "elif self.parent.parent.menu_width > 36: _name = c.name[0].upper() + c.name[1:6] _left", "(\"uncommon_line\",\"dark cyan\",\"white\",\"standout\"), (\"rare\",\"yellow\",\"black\"), (\"rare_line\",\"yellow\",\"white\",\"standout\"), (\"unique\",\"light magenta\",\"black\"), (\"unique_line\",\"light magenta\",\"white\",\"standout\"), (\"set\",\"light green\",\"black\"),", "y - self.parent.body_width//3) visible_map = [line[w:w+self.parent.body_width] for line in _map]", "parent self.mind = mind urwid.AttrMap(self,\"frame\") super().__init__(*args, **kargs) @property def player(self):", "if self.layer_view > 2: self.layer_view = -1 elif _input in", "s) state = [\"normal\", \"positive\", \"negative\"][-int(c.temp_bonus < 0) + int(c.temp_bonus", "{player.location.name}@({x},{y})\\n\" _left = [] for s in CHARACTERISTICS: c =", "+1, Intelligence +1, Hit points +2\\nSneak attack, hide and trap\",", "handle_input(self, _input): if _input == \"tab\": self.menu_view = not self.menu_view", "_input == self.mind.key_map[\"help-menu\"] and self.menu_view: self.menu.update_body(\"Help\") elif _input == self.mind.key_map[\"equipment-menu\"]", "self.menu)] self.contents[\"body\"] = (urwid.Columns(_columns, focus_column=1), None) @property def header_list(self): return", "import log, mod, distance from rpg_game.constants import * from urwid", "_marker_box += [\"╚\" +\"═\"*width+\"╝\"] if self.player.inventory.selection: i = self.player.inventory.selection X_OFFSET", "max(0, y - self.parent.body_width//3) visible_map = [line[w:w+self.parent.body_width] for line in", "parent, mind): self.bodies = {\"Intro\" : IntroFrame(self, mind)} self.active_body =", "2 Y_OFFSET = 4 for m, pos in zip(i.in_inventory_markers, i.in_inventory_marker_positions):", "in self.choices: btn = attr_button(c, self.select_class) line.append(btn) walker = urwid.SimpleFocusListWalker(line)", "= 0 @property def header_height(self): return MIN_HEADER_HEIGHT#max(MIN_HEADER_HEIGHT, self.mind.screen_size[1]//8) @property def", "scr=raw_display.Screen(): scr.get_cols_rows() MIN_HEADER_HEIGHT = 3 MAX_MENU_WIDTH = 48 FOOTER_HEIGHT =", "or _input in (\"-\", \"=\"): self.select_item(_input) self.update_footer() elif _input ==", "if not self.player.inventory.selection: self.contents[\"header\"] = (self.default_header, None) else: i =", "self.parent.bodies][self.focus_position] self.parent.update_body(new_body) except: pass def focus_previous(self): try: self.focus_position -= 1", "inv_btns.append((5, urwid.LineBox(btn))) if self.mind.screen_size != (80, 24): inv_btns.append(urwid.Text(\"\\nSET TERMINAL\\nTO 80X24\",", "btn = urwid.Text(_marker, align=\"center\") inv_btns.append((5, urwid.LineBox(btn))) if self.mind.screen_size != (80,", "= urwid.Text(\"0/9-= to select\\n\\n\", align=\"center\") self.default_footer = urwid.Text([(\"green\", f\"{'Enter:use/eqp':<14s}\"), (\"yellow\",", "+ 1 if self.layer_view > 2: self.layer_view = -1 elif", "override __init__ to use our ButtonLabel instead of urwid.SelectableIcon -", "self.contents[\"body\"] = (urwid.LineBox(self.active_body, title=_title), None) class InventoryFrame(UiFrame): def __init__(self, parent,", "obj.is_consumable: _marker = [\"(\", (obj.color, f\"{obj.marker[0]}\"), \")\"] else: _marker =", "commands\\n\\n\", f\"←→↑↓:move\\n\", f\"shift+←→↑↓:dash\\n\", f\"a:attack\\n\", f\"q:pickup\\n\"] class_action_keys = [k for k,", "= ButtonLabel(\"\") if borders: cols = urwid.Columns([ ('fixed', len(self.button_left), urwid.Text(self.button_left)),", "+= [\"║\"] for y in range(width): _marker_box += [\".\"] _marker_box", "cmd=None, align = \"center\", user_args = None, borders=True, disabled=False): btn", "mind, urwid.Columns([(self.map_width, self.map), (self.menu_width, self.menu)], focus_column=1), header=_header, footer=None, focus_part=\"body\") self.menu_view", "{p.id:\"other\" for i, p in self.mind.master.players.items()}) for line in visible_map]", "def selectable(self): return False def update_body(self, _title): self.active_body = self.bodies[_title]", "1 if self.layer_view > 2: self.layer_view = -1 elif _input", "* value) min_dmg = max(1, base + min_dmg) max_dmg =", "red\",\"white\", \"standout\"), (\"green_line\",\"light green\",\"white\", \"standout\"), (\"yellow_line\",\"yellow\",\"white\", \"standout\"), (\"cyan\",\"light cyan\",\"black\"), (\"cyan_line\",\"light", "mind, box) def on_update(self): player = self.player _equipment = []", "restart(self): pass def focus_next(self): pass def focus_previous(self): pass def update_body(self,", "self.parent.parent.menu_width > 36: _name = c.name[0].upper() + c.name[1:6] _left +=", "dispatch_event(self, event_type, *args): self.mind.get_GUI_event(event_type, *args) def register_event(self, event_type, callback): self.mind.register_GUI_event(event_type,", "\", (state, f\"{c.value:>2d}\"), f\" ({c.mod:<+2d})\\n\"] _right = [] base =", "-= 1 except: pass class FrameColumns(urwid.Columns): def __init__(self, parent, widget_list,", "x, y = pos _marker_box[(x+X_OFFSET)*(width+2)+y+Y_OFFSET] = (i.color, m) self.box[:] =", "_input == self.mind.key_map[\"inventory-menu\"] and self.menu_view: self.menu.update_body(\"Inventory\") else: self.map.handle_input(_input) def select_item(self,", "FOOTER_HEIGHT) def on_update(self): if self.layer_view == -1: _map = copy.deepcopy(self.player.location.map)", "in player.equipment.items(): _name = t.replace(\"_\", \" \") _name = _name[0].upper()", "[(self.map_width, self.map), (self.menu_width, self.menu)] self.contents[\"body\"] = (urwid.Columns(_columns, focus_column=1), None) @property", "= self.mind.screen_size[1] return (tot_rows - header_height - FOOTER_HEIGHT) def on_update(self):", "[urwid.Columns([(width+2, urwid.Text(_marker_box)), self.selection_data], dividechars=1)] def on_update(self): self.update_header() self.update_body() self.update_footer() class", "self.menu_view: self.menu.update_body(\"Inventory\") else: self.map.handle_input(_input) def select_item(self, _input): if _input.isnumeric() and", "b in set(list(eqp.bonus.keys()) + list(eqp.set_bonus.keys())): val = player.full_eqp_bonus(eqp, b) if", "True self.map = MapFrame(self, mind) self.menu = MenuFrame(self, mind) super().__init__(parent,", "def handle_input(self, _input): if _input == \"tab\": self.menu_view = not", "no_title: self.contents[\"body\"] = (urwid.LineBox(self.active_body), None) else: self.contents[\"body\"] = (urwid.LineBox(self.active_body, title=title),", "if obj: _equipment += [urwid.Text([f\"{_name}: \", (obj.color, f\"{obj.name}\")])] else: _equipment", "f\"{player.name:<12s} {player.game_class.name:<10s}\\nLev:{player.level:<2d} Exp:{player.exp:<4d} {player.location.name}@({x},{y})\\n\" _left = [] for s in", "= [] for i, obj in self.player.inventory.content.items(): if obj: _size", "StatusFrame(UiFrame): def __init__(self, parent, mind): box = urwid.ListBox(urwid.SimpleListWalker([urwid.Text(\"\")])) self.box =", "self.bodies[\"Intro\"] super().__init__(parent, mind, self.active_body) def on_update(self): self.active_body.on_update() def handle_input(self, _input):", "def on_update(self): self.active_body.on_update() def selectable(self): return False def update_body(self, _title):", "__init__(self, parent, widget_list, dividechars=0): self.widget_size = len(widget_list) super(FrameColumns, self).__init__(widget_list, dividechars)", "is self.player.inventory.selection: _marker += [(\"line\", _num)] else: _marker += [(\"top\",", "i, p in self.mind.master.players.items()})) if widgets: self.header_widget.body[:] = widgets class", "self.menu_view = True self.update_footer() self.header_widget = self.header.original_widget.box_widget self.footer_content_size = 0", "== 10: _num = \"\\n - \" elif i ==", "exit(self): # self.disconnect() # self.mind.disconnect()#should use dispatch event def restart(self):", "\"\\n = \" if obj and obj is self.player.inventory.selection: _marker", "mind _header = urwid.LineBox(urwid.BoxAdapter(SelectableListBox(urwid.SimpleFocusListWalker([urwid.Text(\"\")])), self.header_height)) self._menu_view = True self.map =", "_input == \"Q\" and self.player.inventory.selection: self.player.actions[\"drop\"].use(self.player, obj=self.player.inventory.selection) self.update_footer() elif _input.isnumeric()", "_marker += [(\"line\", _num)] else: _marker += [(\"top\", _num)] btn", "update_body(self): side = urwid.Text(\"║\") width = 8 height = 6", "= {} for eqp in player.equipment_set: for b in set(list(eqp.bonus.keys())", "user_data=None, borders=True, disabled=False): self._label = ButtonLabel(\"\") if borders: cols =", "\"The opportune wizard\\n\\nIntelligence +1\\n Fireball, teleport and ice wall\", \"Thief\":", "y in range(width): _marker_box += [\".\"] _marker_box += [\"║\\n\"] _marker_box", "def set_text(self, label): ''' set_text is invoked by Button.set_label '''", "self._menu_view = True self.map = MapFrame(self, mind) self.menu = MenuFrame(self,", "{\"Warrior\": \"The mighty warrior\\n\\nStrength +1, Hit points +4\\nCharge and parry\",", "self.box[:] = [urwid.Text(_top)] + _equipment class HelpFrame(UiFrame): def __init__(self, parent,", "\"standout\", \"\"), (\"common\",\"white\",\"black\"), (\"common_line\",\"black\",\"white\",\"standout\"), (\"uncommon\",\"dark cyan\",\"black\"), (\"uncommon_line\",\"dark cyan\",\"white\",\"standout\"), (\"rare\",\"yellow\",\"black\"), (\"rare_line\",\"yellow\",\"white\",\"standout\"),", "self.widget_size: self.focus_position -= self.widget_size new_body = [b for b in", "- FOOTER_HEIGHT - 2 @property def menu_view(self): return self._menu_view @menu_view.setter", "self.mind.avatar.uuid in self.mind.master.players: return self.mind.master.players[self.mind.avatar.uuid] else: return None @property def", "mind, urwid.ListBox(urwid.SimpleListWalker([urwid.Text(self.descriptions[\"Warrior\"])])), header=header, focus_part=\"header\") def select_class(self, button): index = min(self.listbox.focus_position,", "[(\"green\", f\"{'Enter:use':<14s}\")] _text += [(\"yellow\", \"Q:drop\")] self.contents[\"footer\"] = (urwid.Text(_text, align=\"center\"),", "t, obj in player.equipment.items(): _name = t.replace(\"_\", \" \") _name", "self.disabled: # urwid.AttrMap(self, \"disabled\") # else: # urwid.AttrMap(self, None, \"line\")", "mind, map_box) self.on_update() @property def visible_range(self): header_height = self.parent.header_height +", "else: self.contents[\"body\"] = (self.active_body, None) class GUI(UiFrame): def __init__(self, parent,", "6 _marker_box = [\"╔\" +\"═\"*width+\"╗\\n\"] for x in range(height): _marker_box", "self.player.location.entities.items() if distance(self.player.position, ent.position) <= 3 and ent.status], key=lambda ent:", "\"Hack\\'n\\'SSH\"), urwid.HalfBlock5x4Font())), self.choices = (\"Warrior\", \"Dwarf\", \"Wizard\", \"Thief\", \"Bard\") self.descriptions", "= [k for k, act in self.mind.key_map.items() if act.startswith(\"class_ability\")] for", "base = player.STR.mod weapon = player.equipment[\"main_hand\"] if not weapon: min_dmg,", "string, can be set and displayed - otherwise, we leave", "(3*self.mind.screen_size[0])//7) return 0 @property def map_width(self): if self.menu_view: return self.mind.screen_size[0]", "player.encumbrance: _right.append((\"yellow\", f\"{player.inventory.encumbrance:>2d}\")) else: _right.append((\"white\", f\"{player.inventory.encumbrance:>2d}\")) _right.append(f\"/{player.encumbrance:<2d}\\n\") _right.append(f\"Speed {player.movement_speed}\\n\") _right.append(f\"Monsterized", "val else: _bonus[b] += val _top = \"\" for b,", "_num)] btn = urwid.Text(_marker, align=\"center\") inv_btns.append((5, urwid.LineBox(btn))) if self.mind.screen_size !=", "\"disabled\") # else: # urwid.AttrMap(self, None, \"line\") def selectable(self): return", "f\"{i.description}\\n\"], align=\"center\"), None) def update_footer(self): if not self.player.inventory.selection: self.contents[\"footer\"] =", "self.parent.body_width//3) visible_map = [line[w:w+self.parent.body_width] for line in _map] h =", "_action = self.mind.key_map[_input] self.player.handle_input(_action) class MenuFrame(UiFrame): def __init__(self, parent, mind):", "dividechars, focus_column) def focus_next(self): try: self.focus_position += 1 except: pass", "elif i.is_consumable: _text += [(\"green\", f\"{'Enter:use':<14s}\")] _text += [(\"yellow\", \"Q:drop\")]", "IndexError: pass def focus_previous(self): try: self.focus_position -= 1 except IndexError:", "InventoryFrame(UiFrame): def __init__(self, parent, mind): columns = urwid.Columns([urwid.Text(\"\")]) box =", "k, ent in self.player.location.entities.items() if distance(self.player.position, ent.position) <= 3 and", "p in self.header_list: widgets.append(urwid.AttrMap(urwid.AttrMap(urwid.Text(p.status, wrap=\"clip\"), {self.player.id:\"player\"}), {p.id:\"other\" for i, p", "return urwid.AttrMap(btn, attr_map, focus_map=focus_map) def create_button(label, cmd=None, align = \"center\",", "= self.player.inventory.selection X_OFFSET = 2 Y_OFFSET = 4 for m,", "{player.dmg_reduction:<3d}\\n\") _right.append(f\"Encumb \") if player.inventory.encumbrance == EXTRA_ENCUMBRANCE_MULTI*player.encumbrance: _right.append((\"red\", f\"{player.inventory.encumbrance:>2d}\")) elif", "align if cmd: if user_args: urwid.connect_signal(btn, \"click\", cmd, user_args =", "elif _input == self.mind.key_map[\"equipment-menu\"] and self.menu_view: self.menu.update_body(\"Equipment\") elif _input ==", "pass def focus_next(self): pass def focus_previous(self): pass def update_body(self, title,", "mind, self.active_body) def on_update(self): self.active_body.on_update() def handle_input(self, _input): # print(\"HANDLING\",", "map_with_attr = [urwid.AttrMap(urwid.AttrMap(urwid.Text(line, wrap=\"clip\"), {self.player.id:\"player\"}), {p.id:\"other\" for i, p in", "{player.MP:<2d}\\n\") self.box[:] = [urwid.Text(_top), urwid.Columns([urwid.Text(_left), urwid.Text(_right)], dividechars = 1) ]", "= urwid.LineBox(urwid.BoxAdapter(SelectableListBox(urwid.SimpleFocusListWalker([urwid.Text(\"\")])), self.header_height)) self._menu_view = True self.map = MapFrame(self, mind)", "dividechars=1) else: cols = urwid.Columns([self._label], dividechars=0) super(urwid.Button, self).__init__(cols) self.disabled =", "return None def handle_input(self, _input): pass def on_update(self): pass def", "points +6\\nDemolish and parry\", \"Wizard\": \"The opportune wizard\\n\\nIntelligence +1\\n Fireball,", "mind, urwid.LineBox(self.active_body, title=_title)) def on_update(self): self.active_body.on_update() def selectable(self): return False", "attr_button(c, self.select_class) line.append(btn) walker = urwid.SimpleFocusListWalker(line) urwid.connect_signal(walker, \"modified\", self.update_description) self.listbox", "_input == self.mind.key_map[\"status-menu\"] and self.menu_view: self.menu.update_body(\"Status\") elif _input == self.mind.key_map[\"help-menu\"]", "80X24\", align=\"center\")) self.contents[\"footer\"] = (SelectableColumns(inv_btns, dividechars=0), None) self.footer_content_size = _size", "= 10 elif _input == \"=\": _input = 11 self.player.inventory.selection", "update_footer(self): _size = 0 inv_btns = [] for i, obj", "{self.player.id:\"player\"}), {p.id:\"other\" for i, p in self.mind.master.players.items()})) if widgets: self.header_widget.body[:]", "from rpg_game.utils import log, mod, distance from rpg_game.constants import *", "self.footer_content_size = 0 @property def header_height(self): return MIN_HEADER_HEIGHT#max(MIN_HEADER_HEIGHT, self.mind.screen_size[1]//8) @property", "self.mind.get_GUI_event(event_type, *args) def register_event(self, event_type, callback): self.mind.register_GUI_event(event_type, callback) def disconnect(self):", "use dispatch event def restart(self): self.update_body(\"Intro\", no_title=True) def start_game_frame(self): self.bodies[\"Game\"]", "[urwid.Text(_top)] + _equipment class HelpFrame(UiFrame): def __init__(self, parent, mind): self.mind", "red\", \"black\"), (\"fatigued\", \"dark red\", \"white\", \"standout\"), (\"reversed\", \"standout\", \"\"),", "parry\", \"Wizard\": \"The opportune wizard\\n\\nIntelligence +1\\n Fireball, teleport and ice", "@disabled.setter # def disabled(self, value): # if self._disabled == value:", "self.listbox = SelectableListBox(walker) header = urwid.LineBox(urwid.BoxAdapter(self.listbox, len(self.choices)+1)) super().__init__(parent, mind, urwid.ListBox(urwid.SimpleListWalker([urwid.Text(self.descriptions[\"Warrior\"])])),", "1 except: pass def focus_previous(self): try: self.focus_position -= 1 except:", "and ice wall\", \"Thief\": \"The sneaky thief\\n\\nDexterity +1, Intelligence +1,", "(state, f\"{c.value:>2d}\"), f\" ({c.mod:<+2d})\\n\"] _right = [] base = player.STR.mod", "urwid.AttrMap(self,\"frame\") super().__init__(*args, **kargs) @property def player(self): if self.mind.avatar.uuid in self.mind.master.players:", "hide and trap\", \"Bard\": \"The noisy bard\\n\\nCharisma +1, Dexterity +1,", "urwid.LineBox(btn))) if self.mind.screen_size != (80, 24): inv_btns.append(urwid.Text(\"\\nSET TERMINAL\\nTO 80X24\", align=\"center\"))", "obj in player.equipment.items(): _name = t.replace(\"_\", \" \") _name =", "# @disabled.setter # def disabled(self, value): # if self._disabled ==", "ent.status], key=lambda ent: distance(self.player.position, ent.position)) def update_footer(self): _size = 0", "_right.append(f\"Monsterized {player.MP:<2d}\\n\") self.box[:] = [urwid.Text(_top), urwid.Columns([urwid.Text(_left), urwid.Text(_right)], dividechars = 1)", "= c.name[0].upper() + c.name[1:6] _left += [f\"{_name:<6} \", (state, f\"{c.value:>2d}\"),", "__init__(self, parent, mind): self.mind = mind map_commands = [\"Map commands\\n\\n\",", "f\"{player.inventory.encumbrance:>2d}\")) elif player.inventory.encumbrance > player.encumbrance: _right.append((\"yellow\", f\"{player.inventory.encumbrance:>2d}\")) else: _right.append((\"white\", f\"{player.inventory.encumbrance:>2d}\"))", "= \"\" for b, val in _bonus.items(): if b ==", "def focus_previous(self): try: self.focus_position -= 1 except IndexError: pass class", "elif _input.isnumeric() or _input in (\"-\", \"=\"): self.select_item(_input) self.update_footer() elif", "(\"other\", \"light blue\", \"black\"), (\"monster\", \"dark red\", \"black\"), (\"fatigued\", \"dark", "else: cols = urwid.Columns([self._label], dividechars=0) super(urwid.Button, self).__init__(cols) self.disabled = disabled", "self.bodies[\"Game\"] = GameFrame(self, self.mind) self.update_body(\"Game\", no_title=True) class IntroFrame(UiFrame): def __init__(self,", "return False def update_body(self, _title): self.active_body = self.bodies[_title] self.contents[\"body\"] =", "[(\"red\", f\"{'Cannot equip':<14s}\")] elif not i.is_equipped: _text += [(\"green\", f\"{'Enter:equip':<14s}\")]", "ent: distance(self.player.position, ent.position)) def update_footer(self): _size = 0 inv_btns =", "self.parent.body_height//2) if h+self.parent.body_height >= len(visible_map): visible_map = visible_map[len(visible_map)-self.parent.body_height:] else: visible_map", "= None, borders=True, disabled=False): btn = create_button(label, cmd=cmd, align =", "\"\"), (\"common\",\"white\",\"black\"), (\"common_line\",\"black\",\"white\",\"standout\"), (\"uncommon\",\"dark cyan\",\"black\"), (\"uncommon_line\",\"dark cyan\",\"white\",\"standout\"), (\"rare\",\"yellow\",\"black\"), (\"rare_line\",\"yellow\",\"white\",\"standout\"), (\"unique\",\"light", "(self.menu_width, self.menu)], focus_column=1), header=_header, footer=None, focus_part=\"body\") self.menu_view = True self.update_footer()", "_bonus: _bonus[b] = val else: _bonus[b] += val _top =", "= _size def on_update(self): self.update_header() if self.footer_content_size != len(self.player.inventory.all): self.update_footer()", "short dwarf\\n\\nStrength +1, Constitution +1, Hit points +6\\nDemolish and parry\",", "\"standout\"), (\"yellow_line\",\"yellow\",\"white\", \"standout\"), (\"cyan\",\"light cyan\",\"black\"), (\"cyan_line\",\"light cyan\",\"white\", \"standout\"), (\"name\",\"white\",\"black\"), ]", "for i, act in enumerate(self.player.class_actions): k = class_action_keys[i] map_commands.append(f\"{k}:{self.player.class_actions[act].description.lower()}\\n\") menu_commands", "self.contents[\"body\"] = (self.active_body, None) class GUI(UiFrame): def __init__(self, parent, mind):", "= [\"[\", (obj.color, f\"{obj.marker[0]}\"), \"]\"] elif obj.is_equipment and not obj.is_equipped:", "from rpg_game.constants import * from urwid import raw_display SIZE =", "self.header_list: widgets.append(urwid.AttrMap(urwid.AttrMap(urwid.Text(p.status, wrap=\"clip\"), {self.player.id:\"player\"}), {p.id:\"other\" for i, p in self.mind.master.players.items()}))", "= [f\" {obj.marker[0]} \"] else: _marker = [f\" \"] if", "(self.default_footer, None) else: i = self.player.inventory.selection _text = [] if", "= (urwid.LineBox(self.active_body, title=_title), None) class InventoryFrame(UiFrame): def __init__(self, parent, mind):", "_num = f\"\\n {i+1} \" elif i == 9: _num", "self.player.inventory.selection _text = [] _text += [i.eq_description, f\"\\nEncumbrance:{i.encumbrance}\\n\"] return urwid.Text(_text)", "(tot_rows - header_height - FOOTER_HEIGHT) def on_update(self): if self.layer_view ==", "_title = _frames[idx] self.active_body = self.bodies[_title] super().__init__(parent, mind, urwid.LineBox(self.active_body, title=_title))", "update_body(self, _title): self.active_body = self.bodies[_title] self.contents[\"body\"] = (urwid.LineBox(self.active_body, title=_title), None)", "idx = -1 _title = _frames[idx] self.active_body = self.bodies[_title] super().__init__(parent,", "else: i = self.player.inventory.selection self.contents[\"header\"] = (urwid.Text([(i.color, f\"{i.name}\\n\"), f\"{i.description}\\n\"], align=\"center\"),", "+1, Hit points +2\\nSneak attack, hide and trap\", \"Bard\": \"The", "i == 9: _num = \"\\n 0 \" elif i", "variable width - any string, including an empty string, can", "disabled if on_press: urwid.connect_signal(self, 'click', on_press, user_data) self.set_label(label) self.lllavel =", "t.replace(\"_\", \" \") _name = _name[0].upper() + _name[1:] if obj:", "!= (80, 24): self.update_footer() self.map.on_update() if self.menu_view: self.menu.on_update() def handle_input(self,", "user_args = None, borders=True, disabled=False): btn = MyButton(label, borders=borders, disabled=disabled)", "= f\"{player.name:<12s} {player.game_class.name:<10s}\\nLev:{player.level:<2d} Exp:{player.exp:<4d} {player.location.name}@({x},{y})\\n\" _left = [] for s", "widgets.append(urwid.AttrMap(urwid.AttrMap(urwid.Text(p.status, wrap=\"clip\"), {self.player.id:\"player\"}), {p.id:\"other\" for i, p in self.mind.master.players.items()})) if", "s in CHARACTERISTICS: c = getattr(player, s) state = [\"normal\",", "return self.mind.connections[self.mind.avatar.uuid] else: return None def handle_input(self, _input): pass def", "< 9: _num = f\"\\n {i+1} \" elif i ==", "self.menu.on_update() def handle_input(self, _input): if _input == \"tab\": self.menu_view =", "on_update(self): player = self.player x, y, z = player.position _top", "> 0)] if self.parent.parent.menu_width > 40: _name = c.name[0].upper() +", "== \"Q\" and self.player.inventory.selection: self.player.actions[\"drop\"].use(self.player, obj=self.player.inventory.selection) self.update_footer() elif _input.isnumeric() or", "self.menu.update_body(\"Status\") elif _input == self.mind.key_map[\"help-menu\"] and self.menu_view: self.menu.update_body(\"Help\") elif _input", "_input = 10 elif _input == \"=\": _input = 11", "self.box[:] = [urwid.Text(_top), urwid.Columns([urwid.Text(_left), urwid.Text(_right)], dividechars = 1) ] class", "in self.mind.key_map.items() if act.startswith(\"class_ability\")] for i, act in enumerate(self.player.class_actions): k", "if self.layer_view == -1: _map = copy.deepcopy(self.player.location.map) else: _map =", "def exit(self): # self.disconnect() # self.mind.disconnect()#should use dispatch event def", "self.parent.update_body(new_body) except: pass class ButtonLabel(urwid.SelectableIcon): def set_text(self, label): ''' set_text", "9: _num = f\"\\n {i+1} \" elif i == 9:", "+ list(eqp.set_bonus.keys())): val = player.full_eqp_bonus(eqp, b) if b not in", "f\"{c.value:>2d}\"), f\" ({c.mod:<+2d})\\n\"] _right = [] base = player.STR.mod weapon", "return 0 @property def map_width(self): if self.menu_view: return self.mind.screen_size[0] -", "= (\"Inventory\", \"Status\", \"Equipment\", \"Help\") self.bodies = {b : globals()[f\"{b}Frame\"](self,", "self.contents[\"body\"] = (urwid.ListBox(urwid.SimpleListWalker([urwid.Text(self.descriptions[choice])])), None) class GameFrame(UiFrame): def __init__(self, parent, mind):", "- self.header_height - FOOTER_HEIGHT - 2 @property def menu_view(self): return", "focus_map = \"line\", align = \"center\", user_args = None, borders=True,", "self.focus_position < 0: self.focus_position += self.widget_size new_body = [b for", "by Button.set_label ''' self.__super.set_text(label) self._cursor_position = len(label) + 1 class", "cyan\",\"white\",\"standout\"), (\"rare\",\"yellow\",\"black\"), (\"rare_line\",\"yellow\",\"white\",\"standout\"), (\"unique\",\"light magenta\",\"black\"), (\"unique_line\",\"light magenta\",\"white\",\"standout\"), (\"set\",\"light green\",\"black\"), (\"set_line\",\"light", "24): inv_btns.append(urwid.Text(\"\\nSET TERMINAL\\nTO 80X24\", align=\"center\")) self.contents[\"footer\"] = (SelectableColumns(inv_btns, dividechars=0), None)", "[] for t, obj in player.equipment.items(): _name = t.replace(\"_\", \"", "body_width(self): return self.mind.screen_size[0] @property def body_height(self): return self.mind.screen_size[1] - self.header_height", "\" if obj and obj is self.player.inventory.selection: _marker += [(\"line\",", "_num = \"\\n = \" if obj and obj is", "\"enter\" and self.player.inventory.selection: self.player.use_quick_item(self.player.inventory.selection) self.update_footer() elif _input == \"Q\" and", "_name[0].upper() + _name[1:] if obj: _equipment += [urwid.Text([f\"{_name}: \", (obj.color,", "Fireball, teleport and ice wall\", \"Thief\": \"The sneaky thief\\n\\nDexterity +1,", "Intelligence +1, Hit points +2\\nSing and summon\"} line = []", "= [urwid.Text(_top), urwid.Columns([urwid.Text(_left), urwid.Text(_right)], dividechars = 1) ] class EquipmentFrame(UiFrame):", "urwid.Text(self.button_left)), self._label, ('fixed', len(self.button_right), urwid.Text(self.button_right))], dividechars=1) else: cols = urwid.Columns([self._label],", "value: # return # if self.disabled: # urwid.AttrMap(self, \"disabled\") #", "urwid.HalfBlock5x4Font())), self.choices = (\"Warrior\", \"Dwarf\", \"Wizard\", \"Thief\", \"Bard\") self.descriptions =", "return self.mind.screen_size[0] - self.menu_width return self.mind.screen_size[0] @property def body_width(self): return", "self.box[:] = [urwid.Columns([(width+2, urwid.Text(_marker_box)), self.selection_data], dividechars=1)] def on_update(self): self.update_header() self.update_body()", "any string, including an empty string, can be set and", "\"The short dwarf\\n\\nStrength +1, Constitution +1, Hit points +6\\nDemolish and", "Exp:{player.exp:<4d} {player.location.name}@({x},{y})\\n\" _left = [] for s in CHARACTERISTICS: c", "self.menu.update_body(\"Equipment\") elif _input == self.mind.key_map[\"inventory-menu\"] and self.menu_view: self.menu.update_body(\"Inventory\") else: self.map.handle_input(_input)", "[f\"{_name:<6} \", (state, f\"{c.value:>2d}\"), f\" ({c.mod:<+2d})\\n\"] else: _left += [f\"{s:<3}", "side = urwid.Text(\"║\") width = 8 height = 6 _marker_box", "i.requisites(self.player): _text += [(\"red\", f\"{'Cannot equip':<14s}\")] elif not i.is_equipped: _text", "elif _input in self.mind.key_map: _action = self.mind.key_map[_input] self.player.handle_input(_action) class MenuFrame(UiFrame):", "dividechars=0): self.widget_size = len(widget_list) super(FrameColumns, self).__init__(widget_list, dividechars) self.parent = parent", "= player.equipment[\"main_hand\"] if not weapon: min_dmg, max_dmg = (1, 4)", "self.focus_position += 1 if self.focus_position >= self.widget_size: self.focus_position -= self.widget_size", "opportune wizard\\n\\nIntelligence +1\\n Fireball, teleport and ice wall\", \"Thief\": \"The", "_map] h = max(0, x - self.parent.body_height//2) if h+self.parent.body_height >=", "_text = [] _text += [i.eq_description, f\"\\nEncumbrance:{i.encumbrance}\\n\"] return urwid.Text(_text) def", "= (urwid.LineBox(self.active_body), None) else: self.contents[\"body\"] = (urwid.LineBox(self.active_body, title=title), None) else:", "[(\"green\", f\"{'Enter:equip':<14s}\")] elif i.is_equipped: _text += [(\"green\", f\"{'Enter:unequip':<14s}\")] elif i.is_consumable:", "[\"normal\", \"positive\", \"negative\"][-int(c.temp_bonus < 0) + int(c.temp_bonus > 0)] if", "4 PALETTE = [ (\"line\", 'black', 'white', \"standout\"), (\"top\",\"white\",\"black\"), (\"frame\",\"white\",\"white\"),", "self.focus_position += self.widget_size new_body = [b for b in self.parent.bodies][self.focus_position]", "dividechars) self.parent = parent def focus_next(self): try: self.focus_position += 1", "= attr_button(c, self.select_class) line.append(btn) walker = urwid.SimpleFocusListWalker(line) urwid.connect_signal(walker, \"modified\", self.update_description)", "* from urwid import raw_display SIZE = lambda scr=raw_display.Screen(): scr.get_cols_rows()", "f\"ctrl+e:equipment\\n\"] columns = urwid.Columns([urwid.Text(map_commands, wrap=\"clip\"), urwid.Text(menu_commands, wrap=\"clip\")], dividechars = 1)", "= self.bodies[\"Intro\"] super().__init__(parent, mind, self.active_body) def on_update(self): self.active_body.on_update() def handle_input(self,", "title=_title), None) class InventoryFrame(UiFrame): def __init__(self, parent, mind): columns =", "__init__(self, widget_list, focus_column=None, dividechars=0): super().__init__(widget_list, dividechars, focus_column) def focus_next(self): try:", "set(list(eqp.bonus.keys()) + list(eqp.set_bonus.keys())): val = player.full_eqp_bonus(eqp, b) if b not", "magenta\",\"white\",\"standout\"), (\"set\",\"light green\",\"black\"), (\"set_line\",\"light green\",\"white\",\"standout\"), (\"normal\",\"white\",\"black\"), (\"positive\",\"light green\",\"black\"), (\"negative\",\"dark red\",\"black\")," ]
[ "\"array\", \"items\": {\"type\": \"number\"}, } _combined_schemas = { \"$schema\": \"http://json-schema.org/draft-04/schema#\",", "\"type\": \"array\", \"items\": {\"type\": \"array\", \"items\": {\"type\": \"number\"}}, }, ],", "\"type\": \"boolean\", \"default\": True, \"description\": \"whether to calculate the intercept", "{\"type\": \"number\"}}, }, ], \"description\": \"Samples.\", } }, } _output_predict_schema", "\"fit_intercept\": { \"type\": \"boolean\", \"default\": True, \"description\": \"whether to calculate", "to calculate the intercept for this model\", }, \"normalize\": {", "} }, } _output_predict_schema = { \"$schema\": \"http://json-schema.org/draft-04/schema#\", \"description\": \"Returns", "from sklearn.linear_model import LinearRegression as Op from lale.docstrings import set_docstrings", "\"type\": \"object\", \"required\": [\"X\", \"y\"], \"properties\": { \"X\": { \"anyOf\":", "n_jobs > only provide speedup for n_targets > 1 and", "else, it may be overwritten.\", }, \"n_jobs\": { \"anyOf\": [{\"type\":", "\"items\": {\"type\": \"number\"}}, }, ], \"description\": \"Training data\", }, \"y\":", "def __init__(self, **hyperparams): self._hyperparams = hyperparams self._wrapped_model = Op(**self._hyperparams) def", "intercept for this model\", }, \"normalize\": { \"type\": \"boolean\", \"default\":", "LinearRegression Ordinary least squares Linear Regression.\", \"allOf\": [ { \"type\":", "\"sklearn.linear_model\", \"type\": \"object\", \"tags\": {\"pre\": [], \"op\": [\"estimator\"], \"post\": []},", "n_features)\", }, { \"type\": \"array\", \"items\": {\"type\": \"array\", \"items\": {\"type\":", "} _input_predict_schema = { \"$schema\": \"http://json-schema.org/draft-04/schema#\", \"description\": \"Predict using the", "}, }, { \"XXX TODO XXX\": \"Parameter: n_jobs > only", "\"default\": True, \"description\": \"If True, X will be copied; else,", "is set to False\", }, \"copy_X\": { \"type\": \"boolean\", \"default\":", "ignored when ``fit_intercept`` is set to False\", }, \"copy_X\": {", "\"relevantToOptimizer\": [\"fit_intercept\", \"normalize\", \"copy_X\"], \"additionalProperties\": False, \"properties\": { \"fit_intercept\": {", "{\"laleType\": \"Any\", \"XXX TODO XXX\": \"item type\"}, \"XXX TODO XXX\":", "\"required\": [\"X\", \"y\"], \"properties\": { \"X\": { \"anyOf\": [ {", "\"description\": \"Samples.\", } }, } _output_predict_schema = { \"$schema\": \"http://json-schema.org/draft-04/schema#\",", "\"anyOf\": [{\"type\": \"integer\"}, {\"enum\": [None]}], \"default\": 1, \"description\": \"The number", "and sufficient large problems\" }, ], } _input_fit_schema = {", "TODO XXX\": \"array-like or sparse matrix, shape (n_samples, n_features)\", },", "sklearn.linear_model import LinearRegression as Op from lale.docstrings import set_docstrings from", "schema for expected data and hyperparameters.\", \"documentation_url\": \"https://scikit-learn.org/0.20/modules/generated/sklearn.linear_model.LinearRegression#sklearn-linear_model-linearregression\", \"import_from\": \"sklearn.linear_model\",", "linear model\", \"type\": \"object\", \"required\": [\"X\"], \"properties\": { \"X\": {", "for expected data and hyperparameters.\", \"documentation_url\": \"https://scikit-learn.org/0.20/modules/generated/sklearn.linear_model.LinearRegression#sklearn-linear_model-linearregression\", \"import_from\": \"sklearn.linear_model\", \"type\":", "\"properties\": { \"fit_intercept\": { \"type\": \"boolean\", \"default\": True, \"description\": \"whether", "\"items\": {\"type\": \"array\", \"items\": {\"type\": \"number\"}}, }, ], \"description\": \"Training", "Linear Regression.\", \"allOf\": [ { \"type\": \"object\", \"required\": [\"fit_intercept\", \"normalize\",", "[], \"op\": [\"estimator\"], \"post\": []}, \"properties\": { \"hyperparams\": _hyperparams_schema, \"input_fit\":", "\"whether to calculate the intercept for this model\", }, \"normalize\":", "\"properties\": { \"X\": { \"anyOf\": [ { \"type\": \"array\", \"items\":", "\"y\"], \"properties\": { \"X\": { \"anyOf\": [ { \"type\": \"array\",", "\"http://json-schema.org/draft-04/schema#\", \"description\": \"Combined schema for expected data and hyperparameters.\", \"documentation_url\":", "hyperparameters.\", \"documentation_url\": \"https://scikit-learn.org/0.20/modules/generated/sklearn.linear_model.LinearRegression#sklearn-linear_model-linearregression\", \"import_from\": \"sklearn.linear_model\", \"type\": \"object\", \"tags\": {\"pre\": [],", "to False\", }, \"copy_X\": { \"type\": \"boolean\", \"default\": True, \"description\":", "} _output_predict_schema = { \"$schema\": \"http://json-schema.org/draft-04/schema#\", \"description\": \"Returns predicted values.\",", "is ignored when ``fit_intercept`` is set to False\", }, \"copy_X\":", "{ \"anyOf\": [{\"type\": \"integer\"}, {\"enum\": [None]}], \"default\": 1, \"description\": \"The", "}, { \"XXX TODO XXX\": \"Parameter: n_jobs > only provide", "data and hyperparameters.\", \"documentation_url\": \"https://scikit-learn.org/0.20/modules/generated/sklearn.linear_model.LinearRegression#sklearn-linear_model-linearregression\", \"import_from\": \"sklearn.linear_model\", \"type\": \"object\", \"tags\":", "= { \"$schema\": \"http://json-schema.org/draft-04/schema#\", \"description\": \"Combined schema for expected data", "each sample \", }, }, } _input_predict_schema = { \"$schema\":", "False\", }, \"copy_X\": { \"type\": \"boolean\", \"default\": True, \"description\": \"If", "\", }, }, } _input_predict_schema = { \"$schema\": \"http://json-schema.org/draft-04/schema#\", \"description\":", "this model\", }, \"normalize\": { \"type\": \"boolean\", \"default\": False, \"description\":", "{\"type\": \"number\"}, \"description\": \"Individual weights for each sample \", },", "fit(self, X, y=None): if y is not None: self._wrapped_model.fit(X, y)", "set_docstrings from lale.operators import make_operator class LinearRegressionImpl: def __init__(self, **hyperparams):", "\"Predict using the linear model\", \"type\": \"object\", \"required\": [\"X\"], \"properties\":", "\"op\": [\"estimator\"], \"post\": []}, \"properties\": { \"hyperparams\": _hyperparams_schema, \"input_fit\": _input_fit_schema,", "parameter is ignored when ``fit_intercept`` is set to False\", },", "{\"type\": \"number\"}}, \"description\": \"Target values\", }, \"sample_weight\": { \"type\": \"array\",", "[{\"type\": \"integer\"}, {\"enum\": [None]}], \"default\": 1, \"description\": \"The number of", "{\"type\": \"array\", \"items\": {\"type\": \"number\"}}, }, ], \"description\": \"Samples.\", }", "for n_targets > 1 and sufficient large problems\" }, ],", "\"items\": {\"type\": \"number\"}}, }, ], \"description\": \"Samples.\", } }, }", "\"Fit linear model.\", \"type\": \"object\", \"required\": [\"X\", \"y\"], \"properties\": {", "__init__(self, **hyperparams): self._hyperparams = hyperparams self._wrapped_model = Op(**self._hyperparams) def fit(self,", "\"type\": \"array\", \"items\": {\"type\": \"array\", \"items\": {\"type\": \"number\"}}, \"description\": \"Target", "\"array\", \"items\": {\"type\": \"number\"}, \"description\": \"Individual weights for each sample", "\"Combined schema for expected data and hyperparameters.\", \"documentation_url\": \"https://scikit-learn.org/0.20/modules/generated/sklearn.linear_model.LinearRegression#sklearn-linear_model-linearregression\", \"import_from\":", "X will be copied; else, it may be overwritten.\", },", "for each sample \", }, }, } _input_predict_schema = {", "\"hyperparams\": _hyperparams_schema, \"input_fit\": _input_fit_schema, \"input_predict\": _input_predict_schema, \"output_predict\": _output_predict_schema, }, }", "\"array\", \"items\": {\"type\": \"array\", \"items\": {\"type\": \"number\"}}, }, ], \"description\":", "self def predict(self, X): return self._wrapped_model.predict(X) _hyperparams_schema = { \"$schema\":", "as Op from lale.docstrings import set_docstrings from lale.operators import make_operator", "not None: self._wrapped_model.fit(X, y) else: self._wrapped_model.fit(X) return self def predict(self,", "[\"fit_intercept\", \"normalize\", \"copy_X\"], \"additionalProperties\": False, \"properties\": { \"fit_intercept\": { \"type\":", "True, \"description\": \"If True, X will be copied; else, it", "\"documentation_url\": \"https://scikit-learn.org/0.20/modules/generated/sklearn.linear_model.LinearRegression#sklearn-linear_model-linearregression\", \"import_from\": \"sklearn.linear_model\", \"type\": \"object\", \"tags\": {\"pre\": [], \"op\":", "TODO XXX\": \"array_like or sparse matrix, shape (n_samples, n_features)\", },", "``fit_intercept`` is set to False\", }, \"copy_X\": { \"type\": \"boolean\",", "\"required\": [\"X\"], \"properties\": { \"X\": { \"anyOf\": [ { \"type\":", "model.\", \"type\": \"object\", \"required\": [\"X\", \"y\"], \"properties\": { \"X\": {", "\"type\": \"boolean\", \"default\": False, \"description\": \"This parameter is ignored when", "problems\" }, ], } _input_fit_schema = { \"$schema\": \"http://json-schema.org/draft-04/schema#\", \"description\":", "\"items\": {\"laleType\": \"Any\", \"XXX TODO XXX\": \"item type\"}, \"XXX TODO", "_hyperparams_schema = { \"$schema\": \"http://json-schema.org/draft-04/schema#\", \"description\": \"inherited docstring for LinearRegression", "\"n_jobs\"], \"relevantToOptimizer\": [\"fit_intercept\", \"normalize\", \"copy_X\"], \"additionalProperties\": False, \"properties\": { \"fit_intercept\":", "\"normalize\": { \"type\": \"boolean\", \"default\": False, \"description\": \"This parameter is", "type\"}, \"XXX TODO XXX\": \"array-like or sparse matrix, shape (n_samples,", "\"object\", \"required\": [\"X\", \"y\"], \"properties\": { \"X\": { \"anyOf\": [", "predict(self, X): return self._wrapped_model.predict(X) _hyperparams_schema = { \"$schema\": \"http://json-schema.org/draft-04/schema#\", \"description\":", "and hyperparameters.\", \"documentation_url\": \"https://scikit-learn.org/0.20/modules/generated/sklearn.linear_model.LinearRegression#sklearn-linear_model-linearregression\", \"import_from\": \"sklearn.linear_model\", \"type\": \"object\", \"tags\": {\"pre\":", "will be copied; else, it may be overwritten.\", }, \"n_jobs\":", "Op from lale.docstrings import set_docstrings from lale.operators import make_operator class", "= Op(**self._hyperparams) def fit(self, X, y=None): if y is not", "\"description\": \"Combined schema for expected data and hyperparameters.\", \"documentation_url\": \"https://scikit-learn.org/0.20/modules/generated/sklearn.linear_model.LinearRegression#sklearn-linear_model-linearregression\",", "= { \"$schema\": \"http://json-schema.org/draft-04/schema#\", \"description\": \"Predict using the linear model\",", "import inf, nan from sklearn.linear_model import LinearRegression as Op from", "of jobs to use for the computation\", }, }, },", "_input_fit_schema = { \"$schema\": \"http://json-schema.org/draft-04/schema#\", \"description\": \"Fit linear model.\", \"type\":", "X): return self._wrapped_model.predict(X) _hyperparams_schema = { \"$schema\": \"http://json-schema.org/draft-04/schema#\", \"description\": \"inherited", "import LinearRegression as Op from lale.docstrings import set_docstrings from lale.operators", "sparse matrix, shape (n_samples, n_features)\", }, { \"type\": \"array\", \"items\":", "{ \"$schema\": \"http://json-schema.org/draft-04/schema#\", \"description\": \"Fit linear model.\", \"type\": \"object\", \"required\":", "expected data and hyperparameters.\", \"documentation_url\": \"https://scikit-learn.org/0.20/modules/generated/sklearn.linear_model.LinearRegression#sklearn-linear_model-linearregression\", \"import_from\": \"sklearn.linear_model\", \"type\": \"object\",", "> only provide speedup for n_targets > 1 and sufficient", "[\"X\", \"y\"], \"properties\": { \"X\": { \"anyOf\": [ { \"type\":", "\"type\": \"boolean\", \"default\": True, \"description\": \"If True, X will be", "or sparse matrix, shape (n_samples, n_features)\", }, { \"type\": \"array\",", "\"default\": True, \"description\": \"whether to calculate the intercept for this", "self._hyperparams = hyperparams self._wrapped_model = Op(**self._hyperparams) def fit(self, X, y=None):", "{ \"$schema\": \"http://json-schema.org/draft-04/schema#\", \"description\": \"Combined schema for expected data and", "\"Parameter: n_jobs > only provide speedup for n_targets > 1", "lale.docstrings import set_docstrings from lale.operators import make_operator class LinearRegressionImpl: def", "], } _input_fit_schema = { \"$schema\": \"http://json-schema.org/draft-04/schema#\", \"description\": \"Fit linear", "{ \"type\": \"boolean\", \"default\": True, \"description\": \"whether to calculate the", "import set_docstrings from lale.operators import make_operator class LinearRegressionImpl: def __init__(self,", "\"boolean\", \"default\": True, \"description\": \"If True, X will be copied;", "[]}, \"properties\": { \"hyperparams\": _hyperparams_schema, \"input_fit\": _input_fit_schema, \"input_predict\": _input_predict_schema, \"output_predict\":", "}, \"copy_X\": { \"type\": \"boolean\", \"default\": True, \"description\": \"If True,", "], \"description\": \"Samples.\", } }, } _output_predict_schema = { \"$schema\":", "[ { \"type\": \"array\", \"items\": {\"laleType\": \"Any\", \"XXX TODO XXX\":", "y is not None: self._wrapped_model.fit(X, y) else: self._wrapped_model.fit(X) return self", "inf, nan from sklearn.linear_model import LinearRegression as Op from lale.docstrings", "\"description\": \"Target values\", }, \"sample_weight\": { \"type\": \"array\", \"items\": {\"type\":", "overwritten.\", }, \"n_jobs\": { \"anyOf\": [{\"type\": \"integer\"}, {\"enum\": [None]}], \"default\":", "_hyperparams_schema, \"input_fit\": _input_fit_schema, \"input_predict\": _input_predict_schema, \"output_predict\": _output_predict_schema, }, } set_docstrings(LinearRegressionImpl,", "(n_samples, n_features)\", }, { \"type\": \"array\", \"items\": {\"type\": \"array\", \"items\":", "using the linear model\", \"type\": \"object\", \"required\": [\"X\"], \"properties\": {", "}, \"sample_weight\": { \"type\": \"array\", \"items\": {\"type\": \"number\"}, \"description\": \"Individual", "\"$schema\": \"http://json-schema.org/draft-04/schema#\", \"description\": \"inherited docstring for LinearRegression Ordinary least squares", "\"default\": 1, \"description\": \"The number of jobs to use for", "\"array-like or sparse matrix, shape (n_samples, n_features)\", }, { \"type\":", "\"import_from\": \"sklearn.linear_model\", \"type\": \"object\", \"tags\": {\"pre\": [], \"op\": [\"estimator\"], \"post\":", "\"http://json-schema.org/draft-04/schema#\", \"description\": \"Returns predicted values.\", \"type\": \"array\", \"items\": {\"type\": \"number\"},", "matrix, shape (n_samples, n_features)\", }, { \"type\": \"array\", \"items\": {\"type\":", "\"item type\"}, \"XXX TODO XXX\": \"array_like or sparse matrix, shape", "be overwritten.\", }, \"n_jobs\": { \"anyOf\": [{\"type\": \"integer\"}, {\"enum\": [None]}],", "\"description\": \"The number of jobs to use for the computation\",", "\"inherited docstring for LinearRegression Ordinary least squares Linear Regression.\", \"allOf\":", "data\", }, \"y\": { \"type\": \"array\", \"items\": {\"type\": \"array\", \"items\":", "TODO XXX\": \"Parameter: n_jobs > only provide speedup for n_targets", "XXX\": \"array_like or sparse matrix, shape (n_samples, n_features)\", }, {", "speedup for n_targets > 1 and sufficient large problems\" },", "\"copy_X\"], \"additionalProperties\": False, \"properties\": { \"fit_intercept\": { \"type\": \"boolean\", \"default\":", "\"boolean\", \"default\": True, \"description\": \"whether to calculate the intercept for", "hyperparams self._wrapped_model = Op(**self._hyperparams) def fit(self, X, y=None): if y", "{ \"type\": \"array\", \"items\": {\"laleType\": \"Any\", \"XXX TODO XXX\": \"item", "\"X\": { \"anyOf\": [ { \"type\": \"array\", \"items\": {\"laleType\": \"Any\",", "Op(**self._hyperparams) def fit(self, X, y=None): if y is not None:", "if y is not None: self._wrapped_model.fit(X, y) else: self._wrapped_model.fit(X) return", "self._wrapped_model.fit(X, y) else: self._wrapped_model.fit(X) return self def predict(self, X): return", "}, ], \"description\": \"Training data\", }, \"y\": { \"type\": \"array\",", "\"type\": \"object\", \"required\": [\"X\"], \"properties\": { \"X\": { \"anyOf\": [", "\"default\": False, \"description\": \"This parameter is ignored when ``fit_intercept`` is", "}, }, } _input_predict_schema = { \"$schema\": \"http://json-schema.org/draft-04/schema#\", \"description\": \"Predict", "\"Training data\", }, \"y\": { \"type\": \"array\", \"items\": {\"type\": \"array\",", "}, } _output_predict_schema = { \"$schema\": \"http://json-schema.org/draft-04/schema#\", \"description\": \"Returns predicted", "TODO XXX\": \"item type\"}, \"XXX TODO XXX\": \"array_like or sparse", "}, \"normalize\": { \"type\": \"boolean\", \"default\": False, \"description\": \"This parameter", "{ \"type\": \"array\", \"items\": {\"type\": \"array\", \"items\": {\"type\": \"number\"}}, \"description\":", "{ \"type\": \"boolean\", \"default\": True, \"description\": \"If True, X will", "{ \"$schema\": \"http://json-schema.org/draft-04/schema#\", \"description\": \"Predict using the linear model\", \"type\":", "LinearRegression as Op from lale.docstrings import set_docstrings from lale.operators import", "\"XXX TODO XXX\": \"item type\"}, \"XXX TODO XXX\": \"array_like or", "self._wrapped_model = Op(**self._hyperparams) def fit(self, X, y=None): if y is", "predicted values.\", \"type\": \"array\", \"items\": {\"type\": \"number\"}, } _combined_schemas =", "\"sample_weight\": { \"type\": \"array\", \"items\": {\"type\": \"number\"}, \"description\": \"Individual weights", "\"http://json-schema.org/draft-04/schema#\", \"description\": \"inherited docstring for LinearRegression Ordinary least squares Linear", "self._wrapped_model.fit(X) return self def predict(self, X): return self._wrapped_model.predict(X) _hyperparams_schema =", "\"boolean\", \"default\": False, \"description\": \"This parameter is ignored when ``fit_intercept``", "[ { \"type\": \"object\", \"required\": [\"fit_intercept\", \"normalize\", \"copy_X\", \"n_jobs\"], \"relevantToOptimizer\":", "> 1 and sufficient large problems\" }, ], } _input_fit_schema", "\"normalize\", \"copy_X\"], \"additionalProperties\": False, \"properties\": { \"fit_intercept\": { \"type\": \"boolean\",", "1 and sufficient large problems\" }, ], } _input_fit_schema =", "\"required\": [\"fit_intercept\", \"normalize\", \"copy_X\", \"n_jobs\"], \"relevantToOptimizer\": [\"fit_intercept\", \"normalize\", \"copy_X\"], \"additionalProperties\":", "\"type\": \"array\", \"items\": {\"type\": \"number\"}, } _combined_schemas = { \"$schema\":", "\"This parameter is ignored when ``fit_intercept`` is set to False\",", "\"type\": \"object\", \"tags\": {\"pre\": [], \"op\": [\"estimator\"], \"post\": []}, \"properties\":", "\"description\": \"Training data\", }, \"y\": { \"type\": \"array\", \"items\": {\"type\":", "weights for each sample \", }, }, } _input_predict_schema =", "\"items\": {\"type\": \"number\"}, } _combined_schemas = { \"$schema\": \"http://json-schema.org/draft-04/schema#\", \"description\":", "\"http://json-schema.org/draft-04/schema#\", \"description\": \"Predict using the linear model\", \"type\": \"object\", \"required\":", "for the computation\", }, }, }, { \"XXX TODO XXX\":", "TODO XXX\": \"item type\"}, \"XXX TODO XXX\": \"array-like or sparse", "\"items\": {\"type\": \"array\", \"items\": {\"type\": \"number\"}}, \"description\": \"Target values\", },", "\"items\": {\"type\": \"number\"}, \"description\": \"Individual weights for each sample \",", "{ \"fit_intercept\": { \"type\": \"boolean\", \"default\": True, \"description\": \"whether to", "\"Any\", \"XXX TODO XXX\": \"item type\"}, \"XXX TODO XXX\": \"array_like", "}, ], } _input_fit_schema = { \"$schema\": \"http://json-schema.org/draft-04/schema#\", \"description\": \"Fit", "\"Samples.\", } }, } _output_predict_schema = { \"$schema\": \"http://json-schema.org/draft-04/schema#\", \"description\":", "True, X will be copied; else, it may be overwritten.\",", "to use for the computation\", }, }, }, { \"XXX", "\"Returns predicted values.\", \"type\": \"array\", \"items\": {\"type\": \"number\"}, } _combined_schemas", "sample \", }, }, } _input_predict_schema = { \"$schema\": \"http://json-schema.org/draft-04/schema#\",", "\"array\", \"items\": {\"type\": \"number\"}}, }, ], \"description\": \"Samples.\", } },", "\"array\", \"items\": {\"type\": \"array\", \"items\": {\"type\": \"number\"}}, \"description\": \"Target values\",", "None: self._wrapped_model.fit(X, y) else: self._wrapped_model.fit(X) return self def predict(self, X):", "the computation\", }, }, }, { \"XXX TODO XXX\": \"Parameter:", "} _input_fit_schema = { \"$schema\": \"http://json-schema.org/draft-04/schema#\", \"description\": \"Fit linear model.\",", "\"additionalProperties\": False, \"properties\": { \"fit_intercept\": { \"type\": \"boolean\", \"default\": True,", "\"description\": \"Individual weights for each sample \", }, }, }", "_input_fit_schema, \"input_predict\": _input_predict_schema, \"output_predict\": _output_predict_schema, }, } set_docstrings(LinearRegressionImpl, _combined_schemas) LinearRegression", "}, } _input_predict_schema = { \"$schema\": \"http://json-schema.org/draft-04/schema#\", \"description\": \"Predict using", "{\"type\": \"number\"}}, }, ], \"description\": \"Training data\", }, \"y\": {", "[\"X\"], \"properties\": { \"X\": { \"anyOf\": [ { \"type\": \"array\",", "{\"type\": \"array\", \"items\": {\"type\": \"number\"}}, }, ], \"description\": \"Training data\",", "\"type\": \"array\", \"items\": {\"laleType\": \"Any\", \"XXX TODO XXX\": \"item type\"},", "{ \"hyperparams\": _hyperparams_schema, \"input_fit\": _input_fit_schema, \"input_predict\": _input_predict_schema, \"output_predict\": _output_predict_schema, },", "only provide speedup for n_targets > 1 and sufficient large", "XXX\": \"array-like or sparse matrix, shape (n_samples, n_features)\", }, {", "\"Individual weights for each sample \", }, }, } _input_predict_schema", "\"description\": \"inherited docstring for LinearRegression Ordinary least squares Linear Regression.\",", "shape (n_samples, n_features)\", }, { \"type\": \"array\", \"items\": {\"type\": \"array\",", "it may be overwritten.\", }, \"n_jobs\": { \"anyOf\": [{\"type\": \"integer\"},", "\"tags\": {\"pre\": [], \"op\": [\"estimator\"], \"post\": []}, \"properties\": { \"hyperparams\":", "\"properties\": { \"hyperparams\": _hyperparams_schema, \"input_fit\": _input_fit_schema, \"input_predict\": _input_predict_schema, \"output_predict\": _output_predict_schema,", "\"array\", \"items\": {\"laleType\": \"Any\", \"XXX TODO XXX\": \"item type\"}, \"XXX", "], \"description\": \"Training data\", }, \"y\": { \"type\": \"array\", \"items\":", "{ \"type\": \"array\", \"items\": {\"type\": \"number\"}, \"description\": \"Individual weights for", "use for the computation\", }, }, }, { \"XXX TODO", "\"XXX TODO XXX\": \"item type\"}, \"XXX TODO XXX\": \"array-like or", "\"The number of jobs to use for the computation\", },", "\"Target values\", }, \"sample_weight\": { \"type\": \"array\", \"items\": {\"type\": \"number\"},", "for this model\", }, \"normalize\": { \"type\": \"boolean\", \"default\": False,", "class LinearRegressionImpl: def __init__(self, **hyperparams): self._hyperparams = hyperparams self._wrapped_model =", "{ \"X\": { \"anyOf\": [ { \"type\": \"array\", \"items\": {\"laleType\":", "\"y\": { \"type\": \"array\", \"items\": {\"type\": \"array\", \"items\": {\"type\": \"number\"}},", "\"description\": \"Returns predicted values.\", \"type\": \"array\", \"items\": {\"type\": \"number\"}, }", "copied; else, it may be overwritten.\", }, \"n_jobs\": { \"anyOf\":", "\"type\": \"object\", \"required\": [\"fit_intercept\", \"normalize\", \"copy_X\", \"n_jobs\"], \"relevantToOptimizer\": [\"fit_intercept\", \"normalize\",", "{ \"type\": \"array\", \"items\": {\"type\": \"array\", \"items\": {\"type\": \"number\"}}, },", "\"description\": \"This parameter is ignored when ``fit_intercept`` is set to", "computation\", }, }, }, { \"XXX TODO XXX\": \"Parameter: n_jobs", "LinearRegressionImpl: def __init__(self, **hyperparams): self._hyperparams = hyperparams self._wrapped_model = Op(**self._hyperparams)", "XXX\": \"item type\"}, \"XXX TODO XXX\": \"array_like or sparse matrix,", "} _combined_schemas = { \"$schema\": \"http://json-schema.org/draft-04/schema#\", \"description\": \"Combined schema for", "\"object\", \"required\": [\"fit_intercept\", \"normalize\", \"copy_X\", \"n_jobs\"], \"relevantToOptimizer\": [\"fit_intercept\", \"normalize\", \"copy_X\"],", "{ \"$schema\": \"http://json-schema.org/draft-04/schema#\", \"description\": \"inherited docstring for LinearRegression Ordinary least", "X, y=None): if y is not None: self._wrapped_model.fit(X, y) else:", "\"description\": \"Predict using the linear model\", \"type\": \"object\", \"required\": [\"X\"],", "\"type\": \"array\", \"items\": {\"type\": \"number\"}, \"description\": \"Individual weights for each", "True, \"description\": \"whether to calculate the intercept for this model\",", "\"post\": []}, \"properties\": { \"hyperparams\": _hyperparams_schema, \"input_fit\": _input_fit_schema, \"input_predict\": _input_predict_schema,", "set to False\", }, \"copy_X\": { \"type\": \"boolean\", \"default\": True,", "nan from sklearn.linear_model import LinearRegression as Op from lale.docstrings import", "return self def predict(self, X): return self._wrapped_model.predict(X) _hyperparams_schema = {", "1, \"description\": \"The number of jobs to use for the", "= { \"$schema\": \"http://json-schema.org/draft-04/schema#\", \"description\": \"inherited docstring for LinearRegression Ordinary", "the linear model\", \"type\": \"object\", \"required\": [\"X\"], \"properties\": { \"X\":", "import make_operator class LinearRegressionImpl: def __init__(self, **hyperparams): self._hyperparams = hyperparams", "\"array\", \"items\": {\"type\": \"number\"}}, }, ], \"description\": \"Training data\", },", "[\"fit_intercept\", \"normalize\", \"copy_X\", \"n_jobs\"], \"relevantToOptimizer\": [\"fit_intercept\", \"normalize\", \"copy_X\"], \"additionalProperties\": False,", "{\"type\": \"array\", \"items\": {\"type\": \"number\"}}, \"description\": \"Target values\", }, \"sample_weight\":", "values.\", \"type\": \"array\", \"items\": {\"type\": \"number\"}, } _combined_schemas = {", "else: self._wrapped_model.fit(X) return self def predict(self, X): return self._wrapped_model.predict(X) _hyperparams_schema", "\"object\", \"tags\": {\"pre\": [], \"op\": [\"estimator\"], \"post\": []}, \"properties\": {", "n_targets > 1 and sufficient large problems\" }, ], }", "\"items\": {\"type\": \"array\", \"items\": {\"type\": \"number\"}}, }, ], \"description\": \"Samples.\",", "for LinearRegression Ordinary least squares Linear Regression.\", \"allOf\": [ {", "}, ], \"description\": \"Samples.\", } }, } _output_predict_schema = {", "def predict(self, X): return self._wrapped_model.predict(X) _hyperparams_schema = { \"$schema\": \"http://json-schema.org/draft-04/schema#\",", "Ordinary least squares Linear Regression.\", \"allOf\": [ { \"type\": \"object\",", "\"description\": \"Fit linear model.\", \"type\": \"object\", \"required\": [\"X\", \"y\"], \"properties\":", "from lale.docstrings import set_docstrings from lale.operators import make_operator class LinearRegressionImpl:", "Regression.\", \"allOf\": [ { \"type\": \"object\", \"required\": [\"fit_intercept\", \"normalize\", \"copy_X\",", "\"number\"}}, }, ], \"description\": \"Training data\", }, \"y\": { \"type\":", "\"http://json-schema.org/draft-04/schema#\", \"description\": \"Fit linear model.\", \"type\": \"object\", \"required\": [\"X\", \"y\"],", "be copied; else, it may be overwritten.\", }, \"n_jobs\": {", "False, \"properties\": { \"fit_intercept\": { \"type\": \"boolean\", \"default\": True, \"description\":", "}, { \"type\": \"array\", \"items\": {\"type\": \"array\", \"items\": {\"type\": \"number\"}},", "\"https://scikit-learn.org/0.20/modules/generated/sklearn.linear_model.LinearRegression#sklearn-linear_model-linearregression\", \"import_from\": \"sklearn.linear_model\", \"type\": \"object\", \"tags\": {\"pre\": [], \"op\": [\"estimator\"],", "\"normalize\", \"copy_X\", \"n_jobs\"], \"relevantToOptimizer\": [\"fit_intercept\", \"normalize\", \"copy_X\"], \"additionalProperties\": False, \"properties\":", "\"$schema\": \"http://json-schema.org/draft-04/schema#\", \"description\": \"Fit linear model.\", \"type\": \"object\", \"required\": [\"X\",", "when ``fit_intercept`` is set to False\", }, \"copy_X\": { \"type\":", "}, \"y\": { \"type\": \"array\", \"items\": {\"type\": \"array\", \"items\": {\"type\":", "{ \"type\": \"boolean\", \"default\": False, \"description\": \"This parameter is ignored", "\"anyOf\": [ { \"type\": \"array\", \"items\": {\"laleType\": \"Any\", \"XXX TODO", "\"copy_X\": { \"type\": \"boolean\", \"default\": True, \"description\": \"If True, X", "self._wrapped_model.predict(X) _hyperparams_schema = { \"$schema\": \"http://json-schema.org/draft-04/schema#\", \"description\": \"inherited docstring for", "\"description\": \"If True, X will be copied; else, it may", "least squares Linear Regression.\", \"allOf\": [ { \"type\": \"object\", \"required\":", "{ \"XXX TODO XXX\": \"Parameter: n_jobs > only provide speedup", "\"output_predict\": _output_predict_schema, }, } set_docstrings(LinearRegressionImpl, _combined_schemas) LinearRegression = make_operator(LinearRegressionImpl, _combined_schemas)", "number of jobs to use for the computation\", }, },", "return self._wrapped_model.predict(X) _hyperparams_schema = { \"$schema\": \"http://json-schema.org/draft-04/schema#\", \"description\": \"inherited docstring", "= { \"$schema\": \"http://json-schema.org/draft-04/schema#\", \"description\": \"Fit linear model.\", \"type\": \"object\",", "type\"}, \"XXX TODO XXX\": \"array_like or sparse matrix, shape (n_samples,", "\"integer\"}, {\"enum\": [None]}], \"default\": 1, \"description\": \"The number of jobs", "[None]}], \"default\": 1, \"description\": \"The number of jobs to use", "\"description\": \"whether to calculate the intercept for this model\", },", "False, \"description\": \"This parameter is ignored when ``fit_intercept`` is set", "model\", \"type\": \"object\", \"required\": [\"X\"], \"properties\": { \"X\": { \"anyOf\":", "linear model.\", \"type\": \"object\", \"required\": [\"X\", \"y\"], \"properties\": { \"X\":", "y=None): if y is not None: self._wrapped_model.fit(X, y) else: self._wrapped_model.fit(X)", "\"number\"}, } _combined_schemas = { \"$schema\": \"http://json-schema.org/draft-04/schema#\", \"description\": \"Combined schema", "jobs to use for the computation\", }, }, }, {", "\"XXX TODO XXX\": \"Parameter: n_jobs > only provide speedup for", "\"input_fit\": _input_fit_schema, \"input_predict\": _input_predict_schema, \"output_predict\": _output_predict_schema, }, } set_docstrings(LinearRegressionImpl, _combined_schemas)", "= { \"$schema\": \"http://json-schema.org/draft-04/schema#\", \"description\": \"Returns predicted values.\", \"type\": \"array\",", "_output_predict_schema = { \"$schema\": \"http://json-schema.org/draft-04/schema#\", \"description\": \"Returns predicted values.\", \"type\":", "\"array_like or sparse matrix, shape (n_samples, n_features)\", }, { \"type\":", "from numpy import inf, nan from sklearn.linear_model import LinearRegression as", "numpy import inf, nan from sklearn.linear_model import LinearRegression as Op", "}, \"n_jobs\": { \"anyOf\": [{\"type\": \"integer\"}, {\"enum\": [None]}], \"default\": 1,", "def fit(self, X, y=None): if y is not None: self._wrapped_model.fit(X,", "}, }, }, { \"XXX TODO XXX\": \"Parameter: n_jobs >", "\"copy_X\", \"n_jobs\"], \"relevantToOptimizer\": [\"fit_intercept\", \"normalize\", \"copy_X\"], \"additionalProperties\": False, \"properties\": {", "the intercept for this model\", }, \"normalize\": { \"type\": \"boolean\",", "_input_predict_schema = { \"$schema\": \"http://json-schema.org/draft-04/schema#\", \"description\": \"Predict using the linear", "\"XXX TODO XXX\": \"array_like or sparse matrix, shape (n_samples, n_features)\",", "squares Linear Regression.\", \"allOf\": [ { \"type\": \"object\", \"required\": [\"fit_intercept\",", "provide speedup for n_targets > 1 and sufficient large problems\"", "\"array\", \"items\": {\"type\": \"number\"}}, \"description\": \"Target values\", }, \"sample_weight\": {", "lale.operators import make_operator class LinearRegressionImpl: def __init__(self, **hyperparams): self._hyperparams =", "\"items\": {\"type\": \"number\"}}, \"description\": \"Target values\", }, \"sample_weight\": { \"type\":", "{\"type\": \"number\"}, } _combined_schemas = { \"$schema\": \"http://json-schema.org/draft-04/schema#\", \"description\": \"Combined", "make_operator class LinearRegressionImpl: def __init__(self, **hyperparams): self._hyperparams = hyperparams self._wrapped_model", "\"Any\", \"XXX TODO XXX\": \"item type\"}, \"XXX TODO XXX\": \"array-like", "\"If True, X will be copied; else, it may be", "XXX\": \"item type\"}, \"XXX TODO XXX\": \"array-like or sparse matrix,", "\"input_predict\": _input_predict_schema, \"output_predict\": _output_predict_schema, }, } set_docstrings(LinearRegressionImpl, _combined_schemas) LinearRegression =", "\"$schema\": \"http://json-schema.org/draft-04/schema#\", \"description\": \"Returns predicted values.\", \"type\": \"array\", \"items\": {\"type\":", "\"XXX TODO XXX\": \"array-like or sparse matrix, shape (n_samples, n_features)\",", "\"item type\"}, \"XXX TODO XXX\": \"array-like or sparse matrix, shape", "\"$schema\": \"http://json-schema.org/draft-04/schema#\", \"description\": \"Predict using the linear model\", \"type\": \"object\",", "\"$schema\": \"http://json-schema.org/draft-04/schema#\", \"description\": \"Combined schema for expected data and hyperparameters.\",", "XXX\": \"Parameter: n_jobs > only provide speedup for n_targets >", "{ \"anyOf\": [ { \"type\": \"array\", \"items\": {\"laleType\": \"Any\", \"XXX", "_input_predict_schema, \"output_predict\": _output_predict_schema, }, } set_docstrings(LinearRegressionImpl, _combined_schemas) LinearRegression = make_operator(LinearRegressionImpl,", "{\"enum\": [None]}], \"default\": 1, \"description\": \"The number of jobs to", "model\", }, \"normalize\": { \"type\": \"boolean\", \"default\": False, \"description\": \"This", "\"object\", \"required\": [\"X\"], \"properties\": { \"X\": { \"anyOf\": [ {", "\"number\"}}, }, ], \"description\": \"Samples.\", } }, } _output_predict_schema =", "calculate the intercept for this model\", }, \"normalize\": { \"type\":", "_combined_schemas = { \"$schema\": \"http://json-schema.org/draft-04/schema#\", \"description\": \"Combined schema for expected", "sufficient large problems\" }, ], } _input_fit_schema = { \"$schema\":", "**hyperparams): self._hyperparams = hyperparams self._wrapped_model = Op(**self._hyperparams) def fit(self, X,", "may be overwritten.\", }, \"n_jobs\": { \"anyOf\": [{\"type\": \"integer\"}, {\"enum\":", "{\"pre\": [], \"op\": [\"estimator\"], \"post\": []}, \"properties\": { \"hyperparams\": _hyperparams_schema,", "{ \"type\": \"object\", \"required\": [\"fit_intercept\", \"normalize\", \"copy_X\", \"n_jobs\"], \"relevantToOptimizer\": [\"fit_intercept\",", "from lale.operators import make_operator class LinearRegressionImpl: def __init__(self, **hyperparams): self._hyperparams", "y) else: self._wrapped_model.fit(X) return self def predict(self, X): return self._wrapped_model.predict(X)", "docstring for LinearRegression Ordinary least squares Linear Regression.\", \"allOf\": [", "\"number\"}}, \"description\": \"Target values\", }, \"sample_weight\": { \"type\": \"array\", \"items\":", "is not None: self._wrapped_model.fit(X, y) else: self._wrapped_model.fit(X) return self def", "[\"estimator\"], \"post\": []}, \"properties\": { \"hyperparams\": _hyperparams_schema, \"input_fit\": _input_fit_schema, \"input_predict\":", "\"allOf\": [ { \"type\": \"object\", \"required\": [\"fit_intercept\", \"normalize\", \"copy_X\", \"n_jobs\"],", "{ \"$schema\": \"http://json-schema.org/draft-04/schema#\", \"description\": \"Returns predicted values.\", \"type\": \"array\", \"items\":", "\"n_jobs\": { \"anyOf\": [{\"type\": \"integer\"}, {\"enum\": [None]}], \"default\": 1, \"description\":", "values\", }, \"sample_weight\": { \"type\": \"array\", \"items\": {\"type\": \"number\"}, \"description\":", "large problems\" }, ], } _input_fit_schema = { \"$schema\": \"http://json-schema.org/draft-04/schema#\",", "= hyperparams self._wrapped_model = Op(**self._hyperparams) def fit(self, X, y=None): if", "\"number\"}, \"description\": \"Individual weights for each sample \", }, }," ]
[ "cols and number of blocks are incompatible\" # calculate size", "name='Conv52_{}'.format(rr))(x51) if use_bn: x52 = BatchNormalization()(x52) x52 = ELU(name='elu_x52_{}'.format(rr))(x52) x", "decreased throughout the model numBlocks: number of processing blocks. The", "the number the deeper the model output_chan: number of output", "connections to match up properly \"\"\" use_bn = True #", "assert rows % 2**numBlocks == 0, \"Input rows and number", "padding='same', name='DeConv3_{}'.format(dd))(x) if use_bn: x3 = BatchNormalization()(x3) x3 = ELU(name='elu_Dx3_{}'.format(dd))(x3)", "for this many blocks. Use fewer blocks or larger input\"", "= ELU(name='elu_Dclean1_{}'.format(dd))(x) x = Conv2D(filt_num*dd, (3, 3), padding='same', name='DeConvClean2_{}'.format(dd))(x) if", "padding='same', name='Conv3_{}'.format(rr))(x) if use_bn: x3 = BatchNormalization()(x3) x3 = ELU(name='elu_x3_{}'.format(rr))(x3)", "task Args: input shape: a list or tuple of [rows,cols,channels]", "(3, 3), padding='same', name='DeConv51_{}'.format(dd))(x) if use_bn: x51 = BatchNormalization()(x51) x51", "= [l.output for l in model.layers if 'skip' in l.name]", "that are powers of 2 is recommended. Otherwise, the rows/cols", "layers # get input layer # model must be compiled", "keras.applications.inception_v3 import InceptionV3 from keras.initializers import RandomNormal from keras.layers import", "after using this function lay_input = model.input # get skip", "2D Block Model def BlockModel2D(input_shape, filt_num=16, numBlocks=3): \"\"\"Creates a Block", "= Conv2D(filt_num*dd, (3, 3), padding='same', name='DeConv51_{}'.format(dd))(x) if use_bn: x51 =", "= model.input # get skip connection layer outputs skip_list =", "number of filters in the first and last layers This", "rows/cols that are powers of 2 is recommended. Otherwise, the", "if use_bn: x = BatchNormalization()(x) x = ELU(name='elu_all_{}'.format(rr))(x) x =", "list or tuple of [rows,cols,channels] of input images filt_num: the", "on classification task Args: input shape: a list or tuple", "% 2**numBlocks == 0, \"Input cols and number of blocks", "Block Model def BlockModel_Classifier(input_shape, filt_num=16, numBlocks=3): \"\"\"Creates a Block model", "ELU(name='elu_skip_{}'.format(rr))(x) # average pooling x = GlobalAveragePooling2D()(x) # classifier lay_out", "(BatchNormalization, Conv2D, Conv2DTranspose, Conv3D, Cropping2D, Dense, Flatten, GlobalAveragePooling2D, Input, Lambda,", "the deeper the model output_chan: number of output channels. Set", "x = concatenate([skip_list[dd-1], x], name='skip_connect_{}'.format(dd)) x1 = Conv2D(filt_num*dd, (1, 1),", "= ELU(name='elu_Dall_{}'.format(dd))(x) x = UpSampling2D(size=(2, 2), name='UpSample_{}'.format(dd))(x) x = Conv2D(filt_num*dd,", "(1, 1), activation='sigmoid', name='output_layer')(x) return Model(lay_input, lay_out) def Inception_model(input_shape=(299, 299,", "this many blocks. Use fewer blocks or larger input\" #", "linear activation Returns: An unintialized Keras model Example useage: SegModel", "padding='same', name='ConvClean_{}'.format(rr))(x) if use_bn: x = BatchNormalization()(x) x = ELU(name='elu_clean_{}'.format(rr))(x)", "LeakyReLU from keras.models import Model # Parameterized 2D Block Model", "Conv2D(filt_num*rr, (3, 3), padding='same', name='Conv52_{}'.format(rr))(x51) if use_bn: x52 = BatchNormalization()(x52)", "using this function lay_input = model.input # get skip connection", "2^numBlocks for skip connections to match up properly \"\"\" use_bn", "(1, 1), padding='valid', name='ConvAll_{}'.format(rr))(x) if use_bn: x = BatchNormalization()(x) x", "Conv2D, Conv2DTranspose, Conv3D, Cropping2D, Dense, Flatten, GlobalAveragePooling2D, Input, Lambda, MaxPooling2D,", "# x = Conv2D(16, (3, 3), activation='relu')(incep_output) # x =", "== 0, \"Input cols and number of blocks are incompatible\"", "input layer lay_input = Input(shape=input_shape, name='input_layer') # contracting blocks x", "are incompatible\" assert cols % 2**numBlocks == 0, \"Input cols", "from keras.layers import (BatchNormalization, Conv2D, Conv2DTranspose, Conv3D, Cropping2D, Dense, Flatten,", "Conv2D(filt_num*dd, (3, 3), padding='same', name='DeConv51_{}'.format(dd))(x) if use_bn: x51 = BatchNormalization()(x51)", "this function lay_input = model.input # get skip connection layer", "with linear activation Returns: An unintialized Keras model Example useage:", "Conv3D, Cropping2D, Dense, Flatten, GlobalAveragePooling2D, Input, Lambda, MaxPooling2D, Reshape, UpSampling2D,", "299, 3)): incep_model = InceptionV3( include_top=False, weights=None, input_shape=input_shape, pooling='avg') input_layer", "name='merge_{}'.format(rr)) x = Conv2D(filt_num*rr, (1, 1), padding='valid', name='ConvAll_{}'.format(rr))(x) if use_bn:", "use_bn: x51 = BatchNormalization()(x51) x51 = ELU(name='elu_Dx51_{}'.format(dd))(x51) x52 = Conv2D(filt_num*dd,", "linearly increased and decreased throughout the model numBlocks: number of", "incep_model.output # x = Conv2D(16, (3, 3), activation='relu')(incep_output) # x", "Model(lay_input, lay_out) def Inception_model(input_shape=(299, 299, 3)): incep_model = InceptionV3( include_top=False,", "in expnums: if dd < len(skip_list): x = concatenate([skip_list[dd-1], x],", "for dd in expnums: if dd < len(skip_list): x =", "lay_input skip_list = [] for rr in range(1, numBlocks+1): x1", "= lay_input skip_list = [] for rr in range(1, numBlocks+1):", "3), padding='same', name='Conv51_{}'.format(rr))(x) if use_bn: x51 = BatchNormalization()(x51) x51 =", "padding='same', name='Conv51_{}'.format(rr))(x) if use_bn: x51 = BatchNormalization()(x51) x51 = ELU(name='elu_x51_{}'.format(rr))(x51)", "x = ELU(name='elu_skip_{}'.format(rr))(x) # average pooling x = GlobalAveragePooling2D()(x) #", "return Model(lay_input, lay_out) # Parameterized 2D Block Model def BlockModel_Classifier(input_shape,", "BatchNormalization()(x1) x1 = ELU(name='elu_Dx1_{}'.format(dd))(x1) x3 = Conv2D(filt_num*dd, (3, 3), padding='same',", "ZeroPadding2D(padding=(1, 1), name='PrePad_{}'.format(rr))(x) x = Conv2D(filt_num*rr, (4, 4), padding='valid', strides=(2,", "ELU, LeakyReLU from keras.models import Model # Parameterized 2D Block", "the number of filters in the first and last layers", "Returns: An unintialized Keras model Example useage: SegModel = BlockModel2D([256,256,1],filt_num=8)", "output_chan: number of output channels. Set if doing multi-class segmentation", "use_bn: x = BatchNormalization()(x) x = ELU(name='elu_Dall_{}'.format(dd))(x) x = UpSampling2D(size=(2,", "Parameterized 2D Block Model def BlockModel2D(input_shape, filt_num=16, numBlocks=3): \"\"\"Creates a", "of input images filt_num: the number of filters in the", "= BlockModel2D([256,256,1],filt_num=8) Notes: Using rows/cols that are powers of 2", "input_shape[0:2] assert rows % 2**numBlocks == 0, \"Input rows and", "(3, 3), padding='same', name='Conv52_{}'.format(rr))(x51) if use_bn: x52 = BatchNormalization()(x52) x52", "ZeroPadding3D, add, concatenate) from keras.layers.advanced_activations import ELU, LeakyReLU from keras.models", "Block model for pretraining on classification task Args: input shape:", "ELU(name='elu_x1_{}'.format(rr))(x1) x3 = Conv2D(filt_num*rr, (3, 3), padding='same', name='Conv3_{}'.format(rr))(x) if use_bn:", "Input, Lambda, MaxPooling2D, Reshape, UpSampling2D, ZeroPadding2D, ZeroPadding3D, add, concatenate) from", "numBlocks+1)))/2**numBlocks assert minsize > 4, \"Too small of input for", "range(1, numBlocks+1): x1 = Conv2D(filt_num*rr, (1, 1), padding='same', name='Conv1_{}'.format(rr))(x) if", "assert cols % 2**numBlocks == 0, \"Input cols and number", "= BatchNormalization()(x) x = ELU(name='elu_downsample_{}'.format(rr))(x) x = Conv2D(filt_num*rr, (3, 3),", "= incep_model.output # x = Conv2D(16, (3, 3), activation='relu')(incep_output) #", "BatchNormalization()(x52) x52 = ELU(name='elu_Dx52_{}'.format(dd))(x52) x = concatenate([x1, x3, x52], name='Dmerge_{}'.format(dd))", "# and complimentary, unfrozen decoder layers # get input layer", "Flatten, GlobalAveragePooling2D, Input, Lambda, MaxPooling2D, Reshape, UpSampling2D, ZeroPadding2D, ZeroPadding3D, add,", "doing multi-class segmentation regression: Whether to have a continuous output", "= InceptionV3( include_top=False, weights=None, input_shape=input_shape, pooling='avg') input_layer = incep_model.input incep_output", "name='Conv3_{}'.format(rr))(x) if use_bn: x3 = BatchNormalization()(x3) x3 = ELU(name='elu_x3_{}'.format(rr))(x3) x51", "ELU(name='elu_x52_{}'.format(rr))(x52) x = concatenate([x1, x3, x52], name='merge_{}'.format(rr)) x = Conv2D(filt_num*rr,", "encoder layers for layer in model.layers: layer.trainable = False use_bn", "Conv2D(filt_num*dd, (3, 3), padding='same', name='DeConv52_{}'.format(dd))(x51) if use_bn: x52 = BatchNormalization()(x52)", "= GlobalAveragePooling2D()(x) # classifier lay_out = Dense(1, activation='sigmoid', name='output_layer')(x) return", "x = ELU(name='elu_Dclean1_{}'.format(dd))(x) x = Conv2D(filt_num*dd, (3, 3), padding='same', name='DeConvClean2_{}'.format(dd))(x)", "padding='same', name='Conv1_{}'.format(rr))(x) if use_bn: x1 = BatchNormalization()(x1) x1 = ELU(name='elu_x1_{}'.format(rr))(x1)", "blocks. The larger the number the deeper the model output_chan:", "if doing multi-class segmentation regression: Whether to have a continuous", "in model.layers: layer.trainable = False use_bn = True # make", "input images filt_num: the number of filters in the first", "Conv2D(1, (1, 1), activation='sigmoid', name='output_layer')(x) return Model(lay_input, lay_out) def Inception_model(input_shape=(299,", "2**numBlocks == 0, \"Input cols and number of blocks are", "use_bn: x52 = BatchNormalization()(x52) x52 = ELU(name='elu_Dx52_{}'.format(dd))(x52) x = concatenate([x1,", "use_bn: x = BatchNormalization()(x) x = ELU(name='elu_clean_{}'.format(rr))(x) skip_list.append(x) # expanding", "use_bn: x52 = BatchNormalization()(x52) x52 = ELU(name='elu_x52_{}'.format(rr))(x52) x = concatenate([x1,", "2), name='UpSample_{}'.format(dd))(x) x = Conv2D(filt_num*dd, (3, 3), padding='same', name='DeConvClean1_{}'.format(dd))(x) if", "Conv2D(filt_num*rr, (3, 3), padding='same', name='ConvClean_{}'.format(rr))(x) if use_bn: x = BatchNormalization()(x)", "1), padding='same', name='Conv1_{}'.format(rr))(x) if use_bn: x1 = BatchNormalization()(x1) x1 =", "(3, 3), padding='same', name='DeConvClean1_{}'.format(dd))(x) if use_bn: x = BatchNormalization()(x) x", "BlockModel_Classifier(input_shape, filt_num=16, numBlocks=3): \"\"\"Creates a Block model for pretraining on", "numBlocks = len(skip_list) filt_num = int(skip_list[0].shape[-1]) x = model.layers[-3].output #", "list(range(1, numBlocks+1)) expnums.reverse() for dd in expnums: if dd <", "padding='same', name='DeConv52_{}'.format(dd))(x51) if use_bn: x52 = BatchNormalization()(x52) x52 = ELU(name='elu_Dx52_{}'.format(dd))(x52)", "name='output_layer')(x) return Model(lay_input, lay_out) def Inception_model(input_shape=(299, 299, 3)): incep_model =", "x1 = BatchNormalization()(x1) x1 = ELU(name='elu_x1_{}'.format(rr))(x1) x3 = Conv2D(filt_num*rr, (3,", "be compiled again after using this function lay_input = model.input", "minsize > 4, \"Too small of input for this many", "rows and number of blocks are incompatible\" assert cols %", "expnums.reverse() for dd in expnums: if dd < len(skip_list): x", "output channels. Set if doing multi-class segmentation regression: Whether to", "int(skip_list[0].shape[-1]) x = model.layers[-3].output # freeze encoder layers for layer", "4, \"Too small of input for this many blocks. Use", "== 0, \"Input rows and number of blocks are incompatible\"", "def BlockModel_Classifier(input_shape, filt_num=16, numBlocks=3): \"\"\"Creates a Block model for pretraining", "useage: SegModel = BlockModel2D([256,256,1],filt_num=8) Notes: Using rows/cols that are powers", "a Block CED model for segmentation problems Args: input shape:", "model.input # get skip connection layer outputs skip_list = [l.output", "1), activation='sigmoid', name='output_layer')(x) return Model(lay_input, lay_out) def Inception_model(input_shape=(299, 299, 3)):", "input_shape=input_shape, pooling='avg') input_layer = incep_model.input incep_output = incep_model.output # x", "= Dense(1, activation='sigmoid', name='output_layer')(x) return Model(lay_input, lay_out) def ConvertEncoderToCED(model): #", "Set if doing multi-class segmentation regression: Whether to have a", "the first and last layers This number is multipled linearly", "Cropping2D, Dense, Flatten, GlobalAveragePooling2D, Input, Lambda, MaxPooling2D, Reshape, UpSampling2D, ZeroPadding2D,", "x = lay_input skip_list = [] for rr in range(1,", "outputs skip_list = [l.output for l in model.layers if 'skip'", "rows, cols = input_shape[0:2] assert rows % 2**numBlocks == 0,", "= ELU(name='elu_x51_{}'.format(rr))(x51) x52 = Conv2D(filt_num*rr, (3, 3), padding='same', name='Conv52_{}'.format(rr))(x51) if", "Block Model def BlockModel2D(input_shape, filt_num=16, numBlocks=3): \"\"\"Creates a Block CED", "strides=(2, 2), name='DownSample_{}'.format(rr))(x) if use_bn: x = BatchNormalization()(x) x =", "x3, x52], name='Dmerge_{}'.format(dd)) x = Conv2D(filt_num*dd, (1, 1), padding='valid', name='DeConvAll_{}'.format(dd))(x)", "in model.layers if 'skip' in l.name] numBlocks = len(skip_list) filt_num", "RandomNormal from keras.layers import (BatchNormalization, Conv2D, Conv2DTranspose, Conv3D, Cropping2D, Dense,", "= ELU(name='elu_Dx52_{}'.format(dd))(x52) x = concatenate([x1, x3, x52], name='Dmerge_{}'.format(dd)) x =", "function lay_input = model.input # get skip connection layer outputs", "= BatchNormalization()(x52) x52 = ELU(name='elu_x52_{}'.format(rr))(x52) x = concatenate([x1, x3, x52],", "x1 = Conv2D(filt_num*dd, (1, 1), padding='same', name='DeConv1_{}'.format(dd))(x) if use_bn: x1", "get input layer # model must be compiled again after", "rr in range(1, numBlocks+1): x1 = Conv2D(filt_num*rr, (1, 1), padding='same',", "layers # and complimentary, unfrozen decoder layers # get input", "name='Conv51_{}'.format(rr))(x) if use_bn: x51 = BatchNormalization()(x51) x51 = ELU(name='elu_x51_{}'.format(rr))(x51) x52", "of blocks are incompatible\" assert cols % 2**numBlocks == 0,", "must be divisible by 2^numBlocks for skip connections to match", "= ELU(name='elu_Dx1_{}'.format(dd))(x1) x3 = Conv2D(filt_num*dd, (3, 3), padding='same', name='DeConv3_{}'.format(dd))(x) if", "Parameterized 2D Block Model def BlockModel_Classifier(input_shape, filt_num=16, numBlocks=3): \"\"\"Creates a", "are powers of 2 is recommended. Otherwise, the rows/cols must", "expanding blocks expnums = list(range(1, numBlocks+1)) expnums.reverse() for dd in", "layer outputs skip_list = [l.output for l in model.layers if", "= ELU(name='elu_x3_{}'.format(rr))(x3) x51 = Conv2D(filt_num*rr, (3, 3), padding='same', name='Conv51_{}'.format(rr))(x) if", "lay_input = Input(shape=input_shape, name='input_layer') # contracting blocks x = lay_input", "= True # make expanding blocks expnums = list(range(1, numBlocks+1))", "filters in the first and last layers This number is", "# Returns a model with frozen encoder layers # and", "x = UpSampling2D(size=(2, 2), name='UpSample_{}'.format(dd))(x) x = Conv2D(filt_num*dd, (3, 3),", "segmentation problems Args: input shape: a list or tuple of", "blocks or larger input\" # input layer lay_input = Input(shape=input_shape,", "= BatchNormalization()(x52) x52 = ELU(name='elu_Dx52_{}'.format(dd))(x52) x = concatenate([x1, x3, x52],", "skip_list = [l.output for l in model.layers if 'skip' in", "lay_out) # Parameterized 2D Block Model def BlockModel_Classifier(input_shape, filt_num=16, numBlocks=3):", "if 'skip' in l.name] numBlocks = len(skip_list) filt_num = int(skip_list[0].shape[-1])", "Reshape, UpSampling2D, ZeroPadding2D, ZeroPadding3D, add, concatenate) from keras.layers.advanced_activations import ELU,", "x3, x52], name='merge_{}'.format(rr)) x = Conv2D(filt_num*rr, (1, 1), padding='valid', name='ConvAll_{}'.format(rr))(x)", "pretraining on classification task Args: input shape: a list or", "(1, 1), activation='sigmoid', name='output_layer')(x) return Model(lay_input, lay_out) # Parameterized 2D", "return Model(lay_input, lay_out) def ConvertEncoderToCED(model): # Returns a model with", "Example useage: SegModel = BlockModel2D([256,256,1],filt_num=8) Notes: Using rows/cols that are", "(3, 3), padding='same', name='DeConv52_{}'.format(dd))(x51) if use_bn: x52 = BatchNormalization()(x52) x52", "import RandomNormal from keras.layers import (BatchNormalization, Conv2D, Conv2DTranspose, Conv3D, Cropping2D,", "= Conv2D(filt_num*dd, (3, 3), padding='same', name='DeConvClean1_{}'.format(dd))(x) if use_bn: x =", "model.layers[-3].output # freeze encoder layers for layer in model.layers: layer.trainable", "keras.models import Model # Parameterized 2D Block Model def BlockModel2D(input_shape,", "freeze encoder layers for layer in model.layers: layer.trainable = False", "Model # Parameterized 2D Block Model def BlockModel2D(input_shape, filt_num=16, numBlocks=3):", "= Conv2D(1, (1, 1), activation='sigmoid', name='output_layer')(x) return Model(lay_input, lay_out) #", "a Block model for pretraining on classification task Args: input", "unfrozen decoder layers # get input layer # model must", "activation='sigmoid', name='output_layer')(x) return Model(lay_input, lay_out) # Parameterized 2D Block Model", "of blocks are incompatible\" # calculate size reduction startsize =", "= np.max(input_shape[0:2]) minsize = (startsize-np.sum(2**np.arange(1, numBlocks+1)))/2**numBlocks assert minsize > 4,", "= BatchNormalization()(x51) x51 = ELU(name='elu_Dx51_{}'.format(dd))(x51) x52 = Conv2D(filt_num*dd, (3, 3),", "[l.output for l in model.layers if 'skip' in l.name] numBlocks", "(3, 3), padding='same', name='DeConvClean2_{}'.format(dd))(x) if use_bn: x = BatchNormalization()(x) x", "0, \"Input cols and number of blocks are incompatible\" #", "blocks. Use fewer blocks or larger input\" # input layer", "deeper the model output_chan: number of output channels. Set if", "This number is multipled linearly increased and decreased throughout the", "expnums = list(range(1, numBlocks+1)) expnums.reverse() for dd in expnums: if", "processing blocks. The larger the number the deeper the model", "filt_num=16, numBlocks=3): \"\"\"Creates a Block model for pretraining on classification", "ELU(name='elu_downsample_{}'.format(rr))(x) x = Conv2D(filt_num*rr, (3, 3), padding='same', name='ConvClean_{}'.format(rr))(x) if use_bn:", "rows/cols must be divisible by 2^numBlocks for skip connections to", "concatenate([x1, x3, x52], name='Dmerge_{}'.format(dd)) x = Conv2D(filt_num*dd, (1, 1), padding='valid',", "minsize = (startsize-np.sum(2**np.arange(1, numBlocks+1)))/2**numBlocks assert minsize > 4, \"Too small", "name='input_layer') # contracting blocks x = lay_input skip_list = []", "(1, 1), padding='valid', name='DeConvAll_{}'.format(dd))(x) if use_bn: x = BatchNormalization()(x) x", "ConvertEncoderToCED(model): # Returns a model with frozen encoder layers #", "import (BatchNormalization, Conv2D, Conv2DTranspose, Conv3D, Cropping2D, Dense, Flatten, GlobalAveragePooling2D, Input,", "and number of blocks are incompatible\" # calculate size reduction", "concatenate) from keras.layers.advanced_activations import ELU, LeakyReLU from keras.models import Model", "ELU(name='elu_x3_{}'.format(rr))(x3) x51 = Conv2D(filt_num*rr, (3, 3), padding='same', name='Conv51_{}'.format(rr))(x) if use_bn:", "and last layers This number is multipled linearly increased and", "BatchNormalization()(x3) x3 = ELU(name='elu_Dx3_{}'.format(dd))(x3) x51 = Conv2D(filt_num*dd, (3, 3), padding='same',", "of [rows,cols,channels] of input images filt_num: the number of filters", "incompatible\" assert cols % 2**numBlocks == 0, \"Input cols and", "= BatchNormalization()(x3) x3 = ELU(name='elu_Dx3_{}'.format(dd))(x3) x51 = Conv2D(filt_num*dd, (3, 3),", "l in model.layers if 'skip' in l.name] numBlocks = len(skip_list)", "ELU(name='elu_Dx1_{}'.format(dd))(x1) x3 = Conv2D(filt_num*dd, (3, 3), padding='same', name='DeConv3_{}'.format(dd))(x) if use_bn:", "# calculate size reduction startsize = np.max(input_shape[0:2]) minsize = (startsize-np.sum(2**np.arange(1,", "\"Input rows and number of blocks are incompatible\" assert cols", "if use_bn: x51 = BatchNormalization()(x51) x51 = ELU(name='elu_Dx51_{}'.format(dd))(x51) x52 =", "(3, 3), activation='relu')(incep_output) # x = Flatten()(x) x = Dense(1,", "\"\"\" use_bn = True # check for input shape compatibility", "padding='same', name='ConvClean_{}'.format(rr))(x) if use_bn: x = BatchNormalization()(x) x = ELU(name='elu_skip_{}'.format(rr))(x)", "len(skip_list): x = concatenate([skip_list[dd-1], x], name='skip_connect_{}'.format(dd)) x1 = Conv2D(filt_num*dd, (1,", "x = Conv2D(filt_num*dd, (3, 3), padding='same', name='DeConvClean2_{}'.format(dd))(x) if use_bn: x", "output with linear activation Returns: An unintialized Keras model Example", "use_bn: x1 = BatchNormalization()(x1) x1 = ELU(name='elu_x1_{}'.format(rr))(x1) x3 = Conv2D(filt_num*rr,", "BatchNormalization()(x) x = ELU(name='elu_skip_{}'.format(rr))(x) # average pooling x = GlobalAveragePooling2D()(x)", "name='ConvAll_{}'.format(rr))(x) if use_bn: x = BatchNormalization()(x) x = ELU(name='elu_all_{}'.format(rr))(x) x", "BatchNormalization()(x52) x52 = ELU(name='elu_x52_{}'.format(rr))(x52) x = concatenate([x1, x3, x52], name='merge_{}'.format(rr))", "(1, 1), padding='same', name='Conv1_{}'.format(rr))(x) if use_bn: x1 = BatchNormalization()(x1) x1", "Conv2D(filt_num*rr, (1, 1), padding='valid', name='ConvAll_{}'.format(rr))(x) if use_bn: x = BatchNormalization()(x)", "Model def BlockModel2D(input_shape, filt_num=16, numBlocks=3): \"\"\"Creates a Block CED model", "small of input for this many blocks. Use fewer blocks", "name='DownSample_{}'.format(rr))(x) if use_bn: x = BatchNormalization()(x) x = ELU(name='elu_downsample_{}'.format(rr))(x) x", "regression: Whether to have a continuous output with linear activation", "ELU(name='elu_x51_{}'.format(rr))(x51) x52 = Conv2D(filt_num*rr, (3, 3), padding='same', name='Conv52_{}'.format(rr))(x51) if use_bn:", "lay_input = model.input # get skip connection layer outputs skip_list", "concatenate([x1, x3, x52], name='merge_{}'.format(rr)) x = Conv2D(filt_num*rr, (1, 1), padding='valid',", "= ELU(name='elu_x52_{}'.format(rr))(x52) x = concatenate([x1, x3, x52], name='merge_{}'.format(rr)) x =", "from keras.models import Model # Parameterized 2D Block Model def", "x3 = ELU(name='elu_Dx3_{}'.format(dd))(x3) x51 = Conv2D(filt_num*dd, (3, 3), padding='same', name='DeConv51_{}'.format(dd))(x)", "x = ELU(name='elu_all_{}'.format(rr))(x) x = ZeroPadding2D(padding=(1, 1), name='PrePad_{}'.format(rr))(x) x =", "Conv2D(filt_num*rr, (1, 1), padding='same', name='Conv1_{}'.format(rr))(x) if use_bn: x1 = BatchNormalization()(x1)", "activation='sigmoid', name='output_layer')(x) return Model(lay_input, lay_out) def ConvertEncoderToCED(model): # Returns a", "= Input(shape=input_shape, name='input_layer') # contracting blocks x = lay_input skip_list", "a list or tuple of [rows,cols,channels] of input images filt_num:", "x3 = Conv2D(filt_num*dd, (3, 3), padding='same', name='DeConv3_{}'.format(dd))(x) if use_bn: x3", "x3 = BatchNormalization()(x3) x3 = ELU(name='elu_Dx3_{}'.format(dd))(x3) x51 = Conv2D(filt_num*dd, (3,", "filt_num = int(skip_list[0].shape[-1]) x = model.layers[-3].output # freeze encoder layers", "# classifier lay_out = Conv2D(1, (1, 1), activation='sigmoid', name='output_layer')(x) return", "BlockModel2D(input_shape, filt_num=16, numBlocks=3): \"\"\"Creates a Block CED model for segmentation", "if use_bn: x51 = BatchNormalization()(x51) x51 = ELU(name='elu_x51_{}'.format(rr))(x51) x52 =", "x52 = Conv2D(filt_num*dd, (3, 3), padding='same', name='DeConv52_{}'.format(dd))(x51) if use_bn: x52", "Otherwise, the rows/cols must be divisible by 2^numBlocks for skip", "0, \"Input rows and number of blocks are incompatible\" assert", "by 2^numBlocks for skip connections to match up properly \"\"\"", "layer # model must be compiled again after using this", "1), activation='sigmoid', name='output_layer')(x) return Model(lay_input, lay_out) # Parameterized 2D Block", "Dense, Flatten, GlobalAveragePooling2D, Input, Lambda, MaxPooling2D, Reshape, UpSampling2D, ZeroPadding2D, ZeroPadding3D,", "activation='sigmoid', name='output_layer')(x) return Model(lay_input, lay_out) def Inception_model(input_shape=(299, 299, 3)): incep_model", "x = ELU(name='elu_Dclean2_{}'.format(dd))(x) # classifier lay_out = Conv2D(1, (1, 1),", "model must be compiled again after using this function lay_input", "padding='same', name='DeConvClean1_{}'.format(dd))(x) if use_bn: x = BatchNormalization()(x) x = ELU(name='elu_Dclean1_{}'.format(dd))(x)", "use_bn: x1 = BatchNormalization()(x1) x1 = ELU(name='elu_Dx1_{}'.format(dd))(x1) x3 = Conv2D(filt_num*dd,", "larger the number the deeper the model output_chan: number of", "ELU(name='elu_Dx3_{}'.format(dd))(x3) x51 = Conv2D(filt_num*dd, (3, 3), padding='same', name='DeConv51_{}'.format(dd))(x) if use_bn:", "3), padding='same', name='Conv3_{}'.format(rr))(x) if use_bn: x3 = BatchNormalization()(x3) x3 =", "pooling x = GlobalAveragePooling2D()(x) # classifier lay_out = Dense(1, activation='sigmoid',", "Input(shape=input_shape, name='input_layer') # contracting blocks x = lay_input skip_list =", "filt_num: the number of filters in the first and last", "to match up properly \"\"\" use_bn = True # check", "BatchNormalization()(x) x = ELU(name='elu_Dclean2_{}'.format(dd))(x) # classifier lay_out = Conv2D(1, (1,", "(4, 4), padding='valid', strides=(2, 2), name='DownSample_{}'.format(rr))(x) if use_bn: x =", "cols % 2**numBlocks == 0, \"Input cols and number of", "= Conv2D(filt_num*dd, (3, 3), padding='same', name='DeConvClean2_{}'.format(dd))(x) if use_bn: x =", "x = Flatten()(x) x = Dense(1, activation='sigmoid')(incep_output) return Model(inputs=input_layer, outputs=x)", "problems Args: input shape: a list or tuple of [rows,cols,channels]", "incompatible\" # calculate size reduction startsize = np.max(input_shape[0:2]) minsize =", "BatchNormalization()(x51) x51 = ELU(name='elu_Dx51_{}'.format(dd))(x51) x52 = Conv2D(filt_num*dd, (3, 3), padding='same',", "larger input\" # input layer lay_input = Input(shape=input_shape, name='input_layer') #", "in l.name] numBlocks = len(skip_list) filt_num = int(skip_list[0].shape[-1]) x =", "layer.trainable = False use_bn = True # make expanding blocks", "= UpSampling2D(size=(2, 2), name='UpSample_{}'.format(dd))(x) x = Conv2D(filt_num*dd, (3, 3), padding='same',", "# average pooling x = GlobalAveragePooling2D()(x) # classifier lay_out =", "x = model.layers[-3].output # freeze encoder layers for layer in", "GlobalAveragePooling2D, Input, Lambda, MaxPooling2D, Reshape, UpSampling2D, ZeroPadding2D, ZeroPadding3D, add, concatenate)", "Conv2D(filt_num*dd, (3, 3), padding='same', name='DeConvClean2_{}'.format(dd))(x) if use_bn: x = BatchNormalization()(x)", "= BatchNormalization()(x) x = ELU(name='elu_all_{}'.format(rr))(x) x = ZeroPadding2D(padding=(1, 1), name='PrePad_{}'.format(rr))(x)", "# freeze encoder layers for layer in model.layers: layer.trainable =", "x = concatenate([x1, x3, x52], name='Dmerge_{}'.format(dd)) x = Conv2D(filt_num*dd, (1,", "classification task Args: input shape: a list or tuple of", "model.layers: layer.trainable = False use_bn = True # make expanding", "use_bn: x = BatchNormalization()(x) x = ELU(name='elu_Dclean1_{}'.format(dd))(x) x = Conv2D(filt_num*dd,", "True # check for input shape compatibility rows, cols =", "3), padding='same', name='Conv52_{}'.format(rr))(x51) if use_bn: x52 = BatchNormalization()(x52) x52 =", "x = Conv2D(filt_num*rr, (3, 3), padding='same', name='ConvClean_{}'.format(rr))(x) if use_bn: x", "= BatchNormalization()(x) x = ELU(name='elu_Dclean2_{}'.format(dd))(x) # classifier lay_out = Conv2D(1,", "% 2**numBlocks == 0, \"Input rows and number of blocks", "= ELU(name='elu_clean_{}'.format(rr))(x) skip_list.append(x) # expanding blocks expnums = list(range(1, numBlocks+1))", "for l in model.layers if 'skip' in l.name] numBlocks =", "use_bn: x = BatchNormalization()(x) x = ELU(name='elu_downsample_{}'.format(rr))(x) x = Conv2D(filt_num*rr,", "3), padding='same', name='DeConvClean1_{}'.format(dd))(x) if use_bn: x = BatchNormalization()(x) x =", "if use_bn: x = BatchNormalization()(x) x = ELU(name='elu_downsample_{}'.format(rr))(x) x =", "throughout the model numBlocks: number of processing blocks. The larger", "number of blocks are incompatible\" # calculate size reduction startsize", "= ELU(name='elu_skip_{}'.format(rr))(x) # average pooling x = GlobalAveragePooling2D()(x) # classifier", "2), name='DownSample_{}'.format(rr))(x) if use_bn: x = BatchNormalization()(x) x = ELU(name='elu_downsample_{}'.format(rr))(x)", "np from keras.applications.inception_v3 import InceptionV3 from keras.initializers import RandomNormal from", "x52], name='merge_{}'.format(rr)) x = Conv2D(filt_num*rr, (1, 1), padding='valid', name='ConvAll_{}'.format(rr))(x) if", "and complimentary, unfrozen decoder layers # get input layer #", "layer lay_input = Input(shape=input_shape, name='input_layer') # contracting blocks x =", "ELU(name='elu_Dall_{}'.format(dd))(x) x = UpSampling2D(size=(2, 2), name='UpSample_{}'.format(dd))(x) x = Conv2D(filt_num*dd, (3,", "BatchNormalization()(x) x = ELU(name='elu_downsample_{}'.format(rr))(x) x = Conv2D(filt_num*rr, (3, 3), padding='same',", "blocks are incompatible\" assert cols % 2**numBlocks == 0, \"Input", "connection layer outputs skip_list = [l.output for l in model.layers", "if use_bn: x = BatchNormalization()(x) x = ELU(name='elu_skip_{}'.format(rr))(x) # average", "numBlocks+1): x1 = Conv2D(filt_num*rr, (1, 1), padding='same', name='Conv1_{}'.format(rr))(x) if use_bn:", "blocks are incompatible\" # calculate size reduction startsize = np.max(input_shape[0:2])", "4), padding='valid', strides=(2, 2), name='DownSample_{}'.format(rr))(x) if use_bn: x = BatchNormalization()(x)", "= ZeroPadding2D(padding=(1, 1), name='PrePad_{}'.format(rr))(x) x = Conv2D(filt_num*rr, (4, 4), padding='valid',", "up properly \"\"\" use_bn = True # check for input", "1), padding='valid', name='ConvAll_{}'.format(rr))(x) if use_bn: x = BatchNormalization()(x) x =", "ELU(name='elu_all_{}'.format(rr))(x) x = ZeroPadding2D(padding=(1, 1), name='PrePad_{}'.format(rr))(x) x = Conv2D(filt_num*rr, (4,", "model with frozen encoder layers # and complimentary, unfrozen decoder", "x3 = BatchNormalization()(x3) x3 = ELU(name='elu_x3_{}'.format(rr))(x3) x51 = Conv2D(filt_num*rr, (3,", "skip_list = [] for rr in range(1, numBlocks+1): x1 =", "= ELU(name='elu_Dclean2_{}'.format(dd))(x) # classifier lay_out = Conv2D(1, (1, 1), activation='sigmoid',", "use_bn = True # check for input shape compatibility rows,", "use_bn: x = BatchNormalization()(x) x = ELU(name='elu_all_{}'.format(rr))(x) x = ZeroPadding2D(padding=(1,", "return Model(lay_input, lay_out) def Inception_model(input_shape=(299, 299, 3)): incep_model = InceptionV3(", "# get input layer # model must be compiled again", "pooling='avg') input_layer = incep_model.input incep_output = incep_model.output # x =", "= BatchNormalization()(x) x = ELU(name='elu_Dclean1_{}'.format(dd))(x) x = Conv2D(filt_num*dd, (3, 3),", "the model output_chan: number of output channels. Set if doing", "x = BatchNormalization()(x) x = ELU(name='elu_all_{}'.format(rr))(x) x = ZeroPadding2D(padding=(1, 1),", "input shape: a list or tuple of [rows,cols,channels] of input", "x = ELU(name='elu_clean_{}'.format(rr))(x) skip_list.append(x) # expanding blocks expnums = list(range(1,", "in range(1, numBlocks+1): x1 = Conv2D(filt_num*rr, (1, 1), padding='same', name='Conv1_{}'.format(rr))(x)", "lay_out = Conv2D(1, (1, 1), activation='sigmoid', name='output_layer')(x) return Model(lay_input, lay_out)", "of 2 is recommended. Otherwise, the rows/cols must be divisible", "1), padding='valid', name='DeConvAll_{}'.format(dd))(x) if use_bn: x = BatchNormalization()(x) x =", "x = Conv2D(filt_num*rr, (4, 4), padding='valid', strides=(2, 2), name='DownSample_{}'.format(rr))(x) if", "model for pretraining on classification task Args: input shape: a", "Dense(1, activation='sigmoid', name='output_layer')(x) return Model(lay_input, lay_out) def ConvertEncoderToCED(model): # Returns", "= True # check for input shape compatibility rows, cols", "x1 = ELU(name='elu_x1_{}'.format(rr))(x1) x3 = Conv2D(filt_num*rr, (3, 3), padding='same', name='Conv3_{}'.format(rr))(x)", "Model(lay_input, lay_out) # Parameterized 2D Block Model def BlockModel_Classifier(input_shape, filt_num=16,", "name='output_layer')(x) return Model(lay_input, lay_out) def ConvertEncoderToCED(model): # Returns a model", "first and last layers This number is multipled linearly increased", "3), padding='same', name='DeConv51_{}'.format(dd))(x) if use_bn: x51 = BatchNormalization()(x51) x51 =", "input shape compatibility rows, cols = input_shape[0:2] assert rows %", "expnums: if dd < len(skip_list): x = concatenate([skip_list[dd-1], x], name='skip_connect_{}'.format(dd))", "= Conv2D(filt_num*rr, (1, 1), padding='same', name='Conv1_{}'.format(rr))(x) if use_bn: x1 =", "padding='valid', name='ConvAll_{}'.format(rr))(x) if use_bn: x = BatchNormalization()(x) x = ELU(name='elu_all_{}'.format(rr))(x)", "BlockModel2D([256,256,1],filt_num=8) Notes: Using rows/cols that are powers of 2 is", "np.max(input_shape[0:2]) minsize = (startsize-np.sum(2**np.arange(1, numBlocks+1)))/2**numBlocks assert minsize > 4, \"Too", "activation='relu')(incep_output) # x = Flatten()(x) x = Dense(1, activation='sigmoid')(incep_output) return", "= ELU(name='elu_Dx51_{}'.format(dd))(x51) x52 = Conv2D(filt_num*dd, (3, 3), padding='same', name='DeConv52_{}'.format(dd))(x51) if", "x52 = Conv2D(filt_num*rr, (3, 3), padding='same', name='Conv52_{}'.format(rr))(x51) if use_bn: x52", "len(skip_list) filt_num = int(skip_list[0].shape[-1]) x = model.layers[-3].output # freeze encoder", "x = concatenate([x1, x3, x52], name='merge_{}'.format(rr)) x = Conv2D(filt_num*rr, (1,", "name='Dmerge_{}'.format(dd)) x = Conv2D(filt_num*dd, (1, 1), padding='valid', name='DeConvAll_{}'.format(dd))(x) if use_bn:", "Model def BlockModel_Classifier(input_shape, filt_num=16, numBlocks=3): \"\"\"Creates a Block model for", "encoder layers # and complimentary, unfrozen decoder layers # get", "input for this many blocks. Use fewer blocks or larger", "(3, 3), padding='same', name='ConvClean_{}'.format(rr))(x) if use_bn: x = BatchNormalization()(x) x", "(3, 3), padding='same', name='DeConv3_{}'.format(dd))(x) if use_bn: x3 = BatchNormalization()(x3) x3", "# Parameterized 2D Block Model def BlockModel2D(input_shape, filt_num=16, numBlocks=3): \"\"\"Creates", "l.name] numBlocks = len(skip_list) filt_num = int(skip_list[0].shape[-1]) x = model.layers[-3].output", "keras.initializers import RandomNormal from keras.layers import (BatchNormalization, Conv2D, Conv2DTranspose, Conv3D,", "Block CED model for segmentation problems Args: input shape: a", "use_bn = True # make expanding blocks expnums = list(range(1,", "# check for input shape compatibility rows, cols = input_shape[0:2]", "Using rows/cols that are powers of 2 is recommended. Otherwise,", "shape: a list or tuple of [rows,cols,channels] of input images", "BatchNormalization()(x) x = ELU(name='elu_clean_{}'.format(rr))(x) skip_list.append(x) # expanding blocks expnums =", "3), padding='same', name='DeConvClean2_{}'.format(dd))(x) if use_bn: x = BatchNormalization()(x) x =", "\"\"\"Creates a Block model for pretraining on classification task Args:", "3)): incep_model = InceptionV3( include_top=False, weights=None, input_shape=input_shape, pooling='avg') input_layer =", "numpy as np from keras.applications.inception_v3 import InceptionV3 from keras.initializers import", "if use_bn: x = BatchNormalization()(x) x = ELU(name='elu_Dclean1_{}'.format(dd))(x) x =", "[rows,cols,channels] of input images filt_num: the number of filters in", "BatchNormalization()(x3) x3 = ELU(name='elu_x3_{}'.format(rr))(x3) x51 = Conv2D(filt_num*rr, (3, 3), padding='same',", "BatchNormalization()(x) x = ELU(name='elu_Dclean1_{}'.format(dd))(x) x = Conv2D(filt_num*dd, (3, 3), padding='same',", "numBlocks=3): \"\"\"Creates a Block model for pretraining on classification task", "in the first and last layers This number is multipled", "match up properly \"\"\" use_bn = True # check for", "Notes: Using rows/cols that are powers of 2 is recommended.", "> 4, \"Too small of input for this many blocks.", "if use_bn: x52 = BatchNormalization()(x52) x52 = ELU(name='elu_Dx52_{}'.format(dd))(x52) x =", "Model(lay_input, lay_out) def ConvertEncoderToCED(model): # Returns a model with frozen", "Conv2D(filt_num*rr, (4, 4), padding='valid', strides=(2, 2), name='DownSample_{}'.format(rr))(x) if use_bn: x", "= Conv2D(filt_num*dd, (1, 1), padding='valid', name='DeConvAll_{}'.format(dd))(x) if use_bn: x =", "= BatchNormalization()(x51) x51 = ELU(name='elu_x51_{}'.format(rr))(x51) x52 = Conv2D(filt_num*rr, (3, 3),", "frozen encoder layers # and complimentary, unfrozen decoder layers #", "number of output channels. Set if doing multi-class segmentation regression:", "of output channels. Set if doing multi-class segmentation regression: Whether", "x = ELU(name='elu_Dall_{}'.format(dd))(x) x = UpSampling2D(size=(2, 2), name='UpSample_{}'.format(dd))(x) x =", "padding='same', name='DeConvClean2_{}'.format(dd))(x) if use_bn: x = BatchNormalization()(x) x = ELU(name='elu_Dclean2_{}'.format(dd))(x)", "if dd < len(skip_list): x = concatenate([skip_list[dd-1], x], name='skip_connect_{}'.format(dd)) x1", "def BlockModel2D(input_shape, filt_num=16, numBlocks=3): \"\"\"Creates a Block CED model for", "Conv2D(filt_num*dd, (3, 3), padding='same', name='DeConvClean1_{}'.format(dd))(x) if use_bn: x = BatchNormalization()(x)", "ELU(name='elu_clean_{}'.format(rr))(x) skip_list.append(x) # expanding blocks expnums = list(range(1, numBlocks+1)) expnums.reverse()", "= Conv2D(1, (1, 1), activation='sigmoid', name='output_layer')(x) return Model(lay_input, lay_out) def", "x1 = Conv2D(filt_num*rr, (1, 1), padding='same', name='Conv1_{}'.format(rr))(x) if use_bn: x1", "many blocks. Use fewer blocks or larger input\" # input", "for rr in range(1, numBlocks+1): x1 = Conv2D(filt_num*rr, (1, 1),", "use_bn: x3 = BatchNormalization()(x3) x3 = ELU(name='elu_x3_{}'.format(rr))(x3) x51 = Conv2D(filt_num*rr,", "segmentation regression: Whether to have a continuous output with linear", "get skip connection layer outputs skip_list = [l.output for l", "for skip connections to match up properly \"\"\" use_bn =", "layers This number is multipled linearly increased and decreased throughout", "MaxPooling2D, Reshape, UpSampling2D, ZeroPadding2D, ZeroPadding3D, add, concatenate) from keras.layers.advanced_activations import", "weights=None, input_shape=input_shape, pooling='avg') input_layer = incep_model.input incep_output = incep_model.output #", "again after using this function lay_input = model.input # get", "CED model for segmentation problems Args: input shape: a list", "incep_model = InceptionV3( include_top=False, weights=None, input_shape=input_shape, pooling='avg') input_layer = incep_model.input", "activation Returns: An unintialized Keras model Example useage: SegModel =", "classifier lay_out = Dense(1, activation='sigmoid', name='output_layer')(x) return Model(lay_input, lay_out) def", "padding='same', name='DeConv51_{}'.format(dd))(x) if use_bn: x51 = BatchNormalization()(x51) x51 = ELU(name='elu_Dx51_{}'.format(dd))(x51)", "as np from keras.applications.inception_v3 import InceptionV3 from keras.initializers import RandomNormal", "tuple of [rows,cols,channels] of input images filt_num: the number of", "contracting blocks x = lay_input skip_list = [] for rr", "(1, 1), padding='same', name='DeConv1_{}'.format(dd))(x) if use_bn: x1 = BatchNormalization()(x1) x1", "= BatchNormalization()(x1) x1 = ELU(name='elu_x1_{}'.format(rr))(x1) x3 = Conv2D(filt_num*rr, (3, 3),", "complimentary, unfrozen decoder layers # get input layer # model", "x51 = BatchNormalization()(x51) x51 = ELU(name='elu_x51_{}'.format(rr))(x51) x52 = Conv2D(filt_num*rr, (3,", "BatchNormalization()(x) x = ELU(name='elu_Dall_{}'.format(dd))(x) x = UpSampling2D(size=(2, 2), name='UpSample_{}'.format(dd))(x) x", "x3 = Conv2D(filt_num*rr, (3, 3), padding='same', name='Conv3_{}'.format(rr))(x) if use_bn: x3", "The larger the number the deeper the model output_chan: number", "= model.layers[-3].output # freeze encoder layers for layer in model.layers:", "2**numBlocks == 0, \"Input rows and number of blocks are", "= [] for rr in range(1, numBlocks+1): x1 = Conv2D(filt_num*rr,", "assert minsize > 4, \"Too small of input for this", "1), padding='same', name='DeConv1_{}'.format(dd))(x) if use_bn: x1 = BatchNormalization()(x1) x1 =", "= BatchNormalization()(x) x = ELU(name='elu_skip_{}'.format(rr))(x) # average pooling x =", "import Model # Parameterized 2D Block Model def BlockModel2D(input_shape, filt_num=16,", "of processing blocks. The larger the number the deeper the", "= BatchNormalization()(x3) x3 = ELU(name='elu_x3_{}'.format(rr))(x3) x51 = Conv2D(filt_num*rr, (3, 3),", "= Conv2D(filt_num*rr, (4, 4), padding='valid', strides=(2, 2), name='DownSample_{}'.format(rr))(x) if use_bn:", "UpSampling2D, ZeroPadding2D, ZeroPadding3D, add, concatenate) from keras.layers.advanced_activations import ELU, LeakyReLU", "numBlocks=3): \"\"\"Creates a Block CED model for segmentation problems Args:", "incep_model.input incep_output = incep_model.output # x = Conv2D(16, (3, 3),", "[] for rr in range(1, numBlocks+1): x1 = Conv2D(filt_num*rr, (1,", "keras.layers import (BatchNormalization, Conv2D, Conv2DTranspose, Conv3D, Cropping2D, Dense, Flatten, GlobalAveragePooling2D,", "skip connections to match up properly \"\"\" use_bn = True", "BatchNormalization()(x1) x1 = ELU(name='elu_x1_{}'.format(rr))(x1) x3 = Conv2D(filt_num*rr, (3, 3), padding='same',", "= Conv2D(filt_num*rr, (1, 1), padding='valid', name='ConvAll_{}'.format(rr))(x) if use_bn: x =", "x1 = ELU(name='elu_Dx1_{}'.format(dd))(x1) x3 = Conv2D(filt_num*dd, (3, 3), padding='same', name='DeConv3_{}'.format(dd))(x)", "decoder layers # get input layer # model must be", "if use_bn: x3 = BatchNormalization()(x3) x3 = ELU(name='elu_x3_{}'.format(rr))(x3) x51 =", "Args: input shape: a list or tuple of [rows,cols,channels] of", "blocks x = lay_input skip_list = [] for rr in", "unintialized Keras model Example useage: SegModel = BlockModel2D([256,256,1],filt_num=8) Notes: Using", "from keras.applications.inception_v3 import InceptionV3 from keras.initializers import RandomNormal from keras.layers", "a model with frozen encoder layers # and complimentary, unfrozen", "(3, 3), padding='same', name='Conv3_{}'.format(rr))(x) if use_bn: x3 = BatchNormalization()(x3) x3", "# x = Flatten()(x) x = Dense(1, activation='sigmoid')(incep_output) return Model(inputs=input_layer,", "BatchNormalization()(x) x = ELU(name='elu_all_{}'.format(rr))(x) x = ZeroPadding2D(padding=(1, 1), name='PrePad_{}'.format(rr))(x) x", "x = Conv2D(filt_num*dd, (3, 3), padding='same', name='DeConvClean1_{}'.format(dd))(x) if use_bn: x", "x52 = ELU(name='elu_Dx52_{}'.format(dd))(x52) x = concatenate([x1, x3, x52], name='Dmerge_{}'.format(dd)) x", "2 is recommended. Otherwise, the rows/cols must be divisible by", "model output_chan: number of output channels. Set if doing multi-class", "from keras.layers.advanced_activations import ELU, LeakyReLU from keras.models import Model #", "input_layer = incep_model.input incep_output = incep_model.output # x = Conv2D(16,", "= concatenate([x1, x3, x52], name='merge_{}'.format(rr)) x = Conv2D(filt_num*rr, (1, 1),", "= Conv2D(filt_num*dd, (3, 3), padding='same', name='DeConv3_{}'.format(dd))(x) if use_bn: x3 =", "make expanding blocks expnums = list(range(1, numBlocks+1)) expnums.reverse() for dd", "Conv2D(filt_num*rr, (3, 3), padding='same', name='Conv51_{}'.format(rr))(x) if use_bn: x51 = BatchNormalization()(x51)", "lay_out) def ConvertEncoderToCED(model): # Returns a model with frozen encoder", "name='DeConv3_{}'.format(dd))(x) if use_bn: x3 = BatchNormalization()(x3) x3 = ELU(name='elu_Dx3_{}'.format(dd))(x3) x51", "if use_bn: x3 = BatchNormalization()(x3) x3 = ELU(name='elu_Dx3_{}'.format(dd))(x3) x51 =", "x = Conv2D(filt_num*rr, (1, 1), padding='valid', name='ConvAll_{}'.format(rr))(x) if use_bn: x", "= Conv2D(filt_num*rr, (3, 3), padding='same', name='ConvClean_{}'.format(rr))(x) if use_bn: x =", "model numBlocks: number of processing blocks. The larger the number", "name='skip_connect_{}'.format(dd)) x1 = Conv2D(filt_num*dd, (1, 1), padding='same', name='DeConv1_{}'.format(dd))(x) if use_bn:", "x52 = ELU(name='elu_x52_{}'.format(rr))(x52) x = concatenate([x1, x3, x52], name='merge_{}'.format(rr)) x", "name='ConvClean_{}'.format(rr))(x) if use_bn: x = BatchNormalization()(x) x = ELU(name='elu_skip_{}'.format(rr))(x) #", "x51 = Conv2D(filt_num*rr, (3, 3), padding='same', name='Conv51_{}'.format(rr))(x) if use_bn: x51", "lay_out = Dense(1, activation='sigmoid', name='output_layer')(x) return Model(lay_input, lay_out) def ConvertEncoderToCED(model):", "number of processing blocks. The larger the number the deeper", "Conv2DTranspose, Conv3D, Cropping2D, Dense, Flatten, GlobalAveragePooling2D, Input, Lambda, MaxPooling2D, Reshape,", "and decreased throughout the model numBlocks: number of processing blocks.", "Conv2D(16, (3, 3), activation='relu')(incep_output) # x = Flatten()(x) x =", "= ELU(name='elu_all_{}'.format(rr))(x) x = ZeroPadding2D(padding=(1, 1), name='PrePad_{}'.format(rr))(x) x = Conv2D(filt_num*rr,", "name='Conv1_{}'.format(rr))(x) if use_bn: x1 = BatchNormalization()(x1) x1 = ELU(name='elu_x1_{}'.format(rr))(x1) x3", "\"Too small of input for this many blocks. Use fewer", "'skip' in l.name] numBlocks = len(skip_list) filt_num = int(skip_list[0].shape[-1]) x", "be divisible by 2^numBlocks for skip connections to match up", "ZeroPadding2D, ZeroPadding3D, add, concatenate) from keras.layers.advanced_activations import ELU, LeakyReLU from", "last layers This number is multipled linearly increased and decreased", "Keras model Example useage: SegModel = BlockModel2D([256,256,1],filt_num=8) Notes: Using rows/cols", "x = BatchNormalization()(x) x = ELU(name='elu_skip_{}'.format(rr))(x) # average pooling x", "= int(skip_list[0].shape[-1]) x = model.layers[-3].output # freeze encoder layers for", "layer in model.layers: layer.trainable = False use_bn = True #", "if use_bn: x = BatchNormalization()(x) x = ELU(name='elu_clean_{}'.format(rr))(x) skip_list.append(x) #", "x = ZeroPadding2D(padding=(1, 1), name='PrePad_{}'.format(rr))(x) x = Conv2D(filt_num*rr, (4, 4),", "# expanding blocks expnums = list(range(1, numBlocks+1)) expnums.reverse() for dd", "padding='valid', strides=(2, 2), name='DownSample_{}'.format(rr))(x) if use_bn: x = BatchNormalization()(x) x", "UpSampling2D(size=(2, 2), name='UpSample_{}'.format(dd))(x) x = Conv2D(filt_num*dd, (3, 3), padding='same', name='DeConvClean1_{}'.format(dd))(x)", "of filters in the first and last layers This number", "x52], name='Dmerge_{}'.format(dd)) x = Conv2D(filt_num*dd, (1, 1), padding='valid', name='DeConvAll_{}'.format(dd))(x) if", "average pooling x = GlobalAveragePooling2D()(x) # classifier lay_out = Dense(1,", "startsize = np.max(input_shape[0:2]) minsize = (startsize-np.sum(2**np.arange(1, numBlocks+1)))/2**numBlocks assert minsize >", "calculate size reduction startsize = np.max(input_shape[0:2]) minsize = (startsize-np.sum(2**np.arange(1, numBlocks+1)))/2**numBlocks", "numBlocks: number of processing blocks. The larger the number the", "padding='same', name='DeConv1_{}'.format(dd))(x) if use_bn: x1 = BatchNormalization()(x1) x1 = ELU(name='elu_Dx1_{}'.format(dd))(x1)", "blocks expnums = list(range(1, numBlocks+1)) expnums.reverse() for dd in expnums:", "2D Block Model def BlockModel_Classifier(input_shape, filt_num=16, numBlocks=3): \"\"\"Creates a Block", "use_bn: x51 = BatchNormalization()(x51) x51 = ELU(name='elu_x51_{}'.format(rr))(x51) x52 = Conv2D(filt_num*rr,", "= ELU(name='elu_x1_{}'.format(rr))(x1) x3 = Conv2D(filt_num*rr, (3, 3), padding='same', name='Conv3_{}'.format(rr))(x) if", "is multipled linearly increased and decreased throughout the model numBlocks:", "multi-class segmentation regression: Whether to have a continuous output with", "powers of 2 is recommended. Otherwise, the rows/cols must be", "(3, 3), padding='same', name='Conv51_{}'.format(rr))(x) if use_bn: x51 = BatchNormalization()(x51) x51", "BatchNormalization()(x51) x51 = ELU(name='elu_x51_{}'.format(rr))(x51) x52 = Conv2D(filt_num*rr, (3, 3), padding='same',", "properly \"\"\" use_bn = True # check for input shape", "fewer blocks or larger input\" # input layer lay_input =", "a continuous output with linear activation Returns: An unintialized Keras", "(startsize-np.sum(2**np.arange(1, numBlocks+1)))/2**numBlocks assert minsize > 4, \"Too small of input", "increased and decreased throughout the model numBlocks: number of processing", "Conv2D(filt_num*dd, (1, 1), padding='same', name='DeConv1_{}'.format(dd))(x) if use_bn: x1 = BatchNormalization()(x1)", "False use_bn = True # make expanding blocks expnums =", "# make expanding blocks expnums = list(range(1, numBlocks+1)) expnums.reverse() for", "= BatchNormalization()(x1) x1 = ELU(name='elu_Dx1_{}'.format(dd))(x1) x3 = Conv2D(filt_num*dd, (3, 3),", "multipled linearly increased and decreased throughout the model numBlocks: number", "the model numBlocks: number of processing blocks. The larger the", "name='DeConvClean2_{}'.format(dd))(x) if use_bn: x = BatchNormalization()(x) x = ELU(name='elu_Dclean2_{}'.format(dd))(x) #", "< len(skip_list): x = concatenate([skip_list[dd-1], x], name='skip_connect_{}'.format(dd)) x1 = Conv2D(filt_num*dd,", "x = Conv2D(filt_num*dd, (1, 1), padding='valid', name='DeConvAll_{}'.format(dd))(x) if use_bn: x", "and number of blocks are incompatible\" assert cols % 2**numBlocks", "input\" # input layer lay_input = Input(shape=input_shape, name='input_layer') # contracting", "use_bn: x = BatchNormalization()(x) x = ELU(name='elu_skip_{}'.format(rr))(x) # average pooling", "recommended. Otherwise, the rows/cols must be divisible by 2^numBlocks for", "dd in expnums: if dd < len(skip_list): x = concatenate([skip_list[dd-1],", "= incep_model.input incep_output = incep_model.output # x = Conv2D(16, (3,", "= BatchNormalization()(x) x = ELU(name='elu_clean_{}'.format(rr))(x) skip_list.append(x) # expanding blocks expnums", "ELU(name='elu_Dclean2_{}'.format(dd))(x) # classifier lay_out = Conv2D(1, (1, 1), activation='sigmoid', name='output_layer')(x)", "x = ELU(name='elu_downsample_{}'.format(rr))(x) x = Conv2D(filt_num*rr, (3, 3), padding='same', name='ConvClean_{}'.format(rr))(x)", "Inception_model(input_shape=(299, 299, 3)): incep_model = InceptionV3( include_top=False, weights=None, input_shape=input_shape, pooling='avg')", "\"Input cols and number of blocks are incompatible\" # calculate", "name='DeConv51_{}'.format(dd))(x) if use_bn: x51 = BatchNormalization()(x51) x51 = ELU(name='elu_Dx51_{}'.format(dd))(x51) x52", "dd < len(skip_list): x = concatenate([skip_list[dd-1], x], name='skip_connect_{}'.format(dd)) x1 =", "or larger input\" # input layer lay_input = Input(shape=input_shape, name='input_layer')", "x51 = ELU(name='elu_Dx51_{}'.format(dd))(x51) x52 = Conv2D(filt_num*dd, (3, 3), padding='same', name='DeConv52_{}'.format(dd))(x51)", "if use_bn: x1 = BatchNormalization()(x1) x1 = ELU(name='elu_x1_{}'.format(rr))(x1) x3 =", "Conv2D(filt_num*dd, (1, 1), padding='valid', name='DeConvAll_{}'.format(dd))(x) if use_bn: x = BatchNormalization()(x)", "x = BatchNormalization()(x) x = ELU(name='elu_Dclean1_{}'.format(dd))(x) x = Conv2D(filt_num*dd, (3,", "name='DeConvAll_{}'.format(dd))(x) if use_bn: x = BatchNormalization()(x) x = ELU(name='elu_Dall_{}'.format(dd))(x) x", "classifier lay_out = Conv2D(1, (1, 1), activation='sigmoid', name='output_layer')(x) return Model(lay_input,", "of input for this many blocks. Use fewer blocks or", "import InceptionV3 from keras.initializers import RandomNormal from keras.layers import (BatchNormalization,", "ELU(name='elu_Dx51_{}'.format(dd))(x51) x52 = Conv2D(filt_num*dd, (3, 3), padding='same', name='DeConv52_{}'.format(dd))(x51) if use_bn:", "True # make expanding blocks expnums = list(range(1, numBlocks+1)) expnums.reverse()", "name='UpSample_{}'.format(dd))(x) x = Conv2D(filt_num*dd, (3, 3), padding='same', name='DeConvClean1_{}'.format(dd))(x) if use_bn:", "# input layer lay_input = Input(shape=input_shape, name='input_layer') # contracting blocks", "for pretraining on classification task Args: input shape: a list", "number of blocks are incompatible\" assert cols % 2**numBlocks ==", "Conv2D(1, (1, 1), activation='sigmoid', name='output_layer')(x) return Model(lay_input, lay_out) # Parameterized", "x = BatchNormalization()(x) x = ELU(name='elu_downsample_{}'.format(rr))(x) x = Conv2D(filt_num*rr, (3,", "cols = input_shape[0:2] assert rows % 2**numBlocks == 0, \"Input", "model.layers if 'skip' in l.name] numBlocks = len(skip_list) filt_num =", "= Conv2D(filt_num*rr, (3, 3), padding='same', name='Conv52_{}'.format(rr))(x51) if use_bn: x52 =", "rows % 2**numBlocks == 0, \"Input rows and number of", "= BatchNormalization()(x) x = ELU(name='elu_Dall_{}'.format(dd))(x) x = UpSampling2D(size=(2, 2), name='UpSample_{}'.format(dd))(x)", "if use_bn: x1 = BatchNormalization()(x1) x1 = ELU(name='elu_Dx1_{}'.format(dd))(x1) x3 =", "def ConvertEncoderToCED(model): # Returns a model with frozen encoder layers", "have a continuous output with linear activation Returns: An unintialized", "if use_bn: x = BatchNormalization()(x) x = ELU(name='elu_Dclean2_{}'.format(dd))(x) # classifier", "padding='valid', name='DeConvAll_{}'.format(dd))(x) if use_bn: x = BatchNormalization()(x) x = ELU(name='elu_Dall_{}'.format(dd))(x)", "layers for layer in model.layers: layer.trainable = False use_bn =", "incep_output = incep_model.output # x = Conv2D(16, (3, 3), activation='relu')(incep_output)", "Conv2D(filt_num*dd, (3, 3), padding='same', name='DeConv3_{}'.format(dd))(x) if use_bn: x3 = BatchNormalization()(x3)", "# contracting blocks x = lay_input skip_list = [] for", "input layer # model must be compiled again after using", "import numpy as np from keras.applications.inception_v3 import InceptionV3 from keras.initializers", "# classifier lay_out = Dense(1, activation='sigmoid', name='output_layer')(x) return Model(lay_input, lay_out)", "= concatenate([x1, x3, x52], name='Dmerge_{}'.format(dd)) x = Conv2D(filt_num*dd, (1, 1),", "lay_out) def Inception_model(input_shape=(299, 299, 3)): incep_model = InceptionV3( include_top=False, weights=None,", "channels. Set if doing multi-class segmentation regression: Whether to have", "Lambda, MaxPooling2D, Reshape, UpSampling2D, ZeroPadding2D, ZeroPadding3D, add, concatenate) from keras.layers.advanced_activations", "are incompatible\" # calculate size reduction startsize = np.max(input_shape[0:2]) minsize", "if use_bn: x = BatchNormalization()(x) x = ELU(name='elu_Dall_{}'.format(dd))(x) x =", "An unintialized Keras model Example useage: SegModel = BlockModel2D([256,256,1],filt_num=8) Notes:", "= ELU(name='elu_Dx3_{}'.format(dd))(x3) x51 = Conv2D(filt_num*dd, (3, 3), padding='same', name='DeConv51_{}'.format(dd))(x) if", "Returns a model with frozen encoder layers # and complimentary,", "def Inception_model(input_shape=(299, 299, 3)): incep_model = InceptionV3( include_top=False, weights=None, input_shape=input_shape,", "x = BatchNormalization()(x) x = ELU(name='elu_Dclean2_{}'.format(dd))(x) # classifier lay_out =", "use_bn: x = BatchNormalization()(x) x = ELU(name='elu_Dclean2_{}'.format(dd))(x) # classifier lay_out", "= False use_bn = True # make expanding blocks expnums", "to have a continuous output with linear activation Returns: An", "model for segmentation problems Args: input shape: a list or", "must be compiled again after using this function lay_input =", "Use fewer blocks or larger input\" # input layer lay_input", "include_top=False, weights=None, input_shape=input_shape, pooling='avg') input_layer = incep_model.input incep_output = incep_model.output", "for input shape compatibility rows, cols = input_shape[0:2] assert rows", "= Conv2D(filt_num*rr, (3, 3), padding='same', name='Conv51_{}'.format(rr))(x) if use_bn: x51 =", "padding='same', name='Conv52_{}'.format(rr))(x51) if use_bn: x52 = BatchNormalization()(x52) x52 = ELU(name='elu_x52_{}'.format(rr))(x52)", "= Conv2D(filt_num*dd, (3, 3), padding='same', name='DeConv52_{}'.format(dd))(x51) if use_bn: x52 =", "3), padding='same', name='DeConv52_{}'.format(dd))(x51) if use_bn: x52 = BatchNormalization()(x52) x52 =", "= Conv2D(filt_num*rr, (3, 3), padding='same', name='Conv3_{}'.format(rr))(x) if use_bn: x3 =", "numBlocks+1)) expnums.reverse() for dd in expnums: if dd < len(skip_list):", "x52 = BatchNormalization()(x52) x52 = ELU(name='elu_Dx52_{}'.format(dd))(x52) x = concatenate([x1, x3,", "name='output_layer')(x) return Model(lay_input, lay_out) # Parameterized 2D Block Model def", "name='PrePad_{}'.format(rr))(x) x = Conv2D(filt_num*rr, (4, 4), padding='valid', strides=(2, 2), name='DownSample_{}'.format(rr))(x)", "3), padding='same', name='ConvClean_{}'.format(rr))(x) if use_bn: x = BatchNormalization()(x) x =", "if use_bn: x52 = BatchNormalization()(x52) x52 = ELU(name='elu_x52_{}'.format(rr))(x52) x =", "InceptionV3( include_top=False, weights=None, input_shape=input_shape, pooling='avg') input_layer = incep_model.input incep_output =", "# get skip connection layer outputs skip_list = [l.output for", "continuous output with linear activation Returns: An unintialized Keras model", "InceptionV3 from keras.initializers import RandomNormal from keras.layers import (BatchNormalization, Conv2D,", "keras.layers.advanced_activations import ELU, LeakyReLU from keras.models import Model # Parameterized", "check for input shape compatibility rows, cols = input_shape[0:2] assert", "or tuple of [rows,cols,channels] of input images filt_num: the number", "x51 = Conv2D(filt_num*dd, (3, 3), padding='same', name='DeConv51_{}'.format(dd))(x) if use_bn: x51", "ELU(name='elu_Dclean1_{}'.format(dd))(x) x = Conv2D(filt_num*dd, (3, 3), padding='same', name='DeConvClean2_{}'.format(dd))(x) if use_bn:", "the rows/cols must be divisible by 2^numBlocks for skip connections", "1), name='PrePad_{}'.format(rr))(x) x = Conv2D(filt_num*rr, (4, 4), padding='valid', strides=(2, 2),", "x = BatchNormalization()(x) x = ELU(name='elu_Dall_{}'.format(dd))(x) x = UpSampling2D(size=(2, 2),", "with frozen encoder layers # and complimentary, unfrozen decoder layers", "# model must be compiled again after using this function", "x3 = ELU(name='elu_x3_{}'.format(rr))(x3) x51 = Conv2D(filt_num*rr, (3, 3), padding='same', name='Conv51_{}'.format(rr))(x)", "name='DeConv52_{}'.format(dd))(x51) if use_bn: x52 = BatchNormalization()(x52) x52 = ELU(name='elu_Dx52_{}'.format(dd))(x52) x", "add, concatenate) from keras.layers.advanced_activations import ELU, LeakyReLU from keras.models import", "images filt_num: the number of filters in the first and", "= ELU(name='elu_downsample_{}'.format(rr))(x) x = Conv2D(filt_num*rr, (3, 3), padding='same', name='ConvClean_{}'.format(rr))(x) if", "compiled again after using this function lay_input = model.input #", "= Conv2D(filt_num*dd, (1, 1), padding='same', name='DeConv1_{}'.format(dd))(x) if use_bn: x1 =", "x1 = BatchNormalization()(x1) x1 = ELU(name='elu_Dx1_{}'.format(dd))(x1) x3 = Conv2D(filt_num*dd, (3,", "name='DeConvClean1_{}'.format(dd))(x) if use_bn: x = BatchNormalization()(x) x = ELU(name='elu_Dclean1_{}'.format(dd))(x) x", "divisible by 2^numBlocks for skip connections to match up properly", "# Parameterized 2D Block Model def BlockModel_Classifier(input_shape, filt_num=16, numBlocks=3): \"\"\"Creates", "x = Conv2D(16, (3, 3), activation='relu')(incep_output) # x = Flatten()(x)", "x = BatchNormalization()(x) x = ELU(name='elu_clean_{}'.format(rr))(x) skip_list.append(x) # expanding blocks", "x = GlobalAveragePooling2D()(x) # classifier lay_out = Dense(1, activation='sigmoid', name='output_layer')(x)", "x], name='skip_connect_{}'.format(dd)) x1 = Conv2D(filt_num*dd, (1, 1), padding='same', name='DeConv1_{}'.format(dd))(x) if", "= Conv2D(16, (3, 3), activation='relu')(incep_output) # x = Flatten()(x) x", "x52 = BatchNormalization()(x52) x52 = ELU(name='elu_x52_{}'.format(rr))(x52) x = concatenate([x1, x3,", "is recommended. Otherwise, the rows/cols must be divisible by 2^numBlocks", "GlobalAveragePooling2D()(x) # classifier lay_out = Dense(1, activation='sigmoid', name='output_layer')(x) return Model(lay_input,", "= input_shape[0:2] assert rows % 2**numBlocks == 0, \"Input rows", "SegModel = BlockModel2D([256,256,1],filt_num=8) Notes: Using rows/cols that are powers of", "reduction startsize = np.max(input_shape[0:2]) minsize = (startsize-np.sum(2**np.arange(1, numBlocks+1)))/2**numBlocks assert minsize", "\"\"\"Creates a Block CED model for segmentation problems Args: input", "3), activation='relu')(incep_output) # x = Flatten()(x) x = Dense(1, activation='sigmoid')(incep_output)", "3), padding='same', name='DeConv3_{}'.format(dd))(x) if use_bn: x3 = BatchNormalization()(x3) x3 =", "filt_num=16, numBlocks=3): \"\"\"Creates a Block CED model for segmentation problems", "from keras.initializers import RandomNormal from keras.layers import (BatchNormalization, Conv2D, Conv2DTranspose,", "x51 = BatchNormalization()(x51) x51 = ELU(name='elu_Dx51_{}'.format(dd))(x51) x52 = Conv2D(filt_num*dd, (3,", "= len(skip_list) filt_num = int(skip_list[0].shape[-1]) x = model.layers[-3].output # freeze", "for segmentation problems Args: input shape: a list or tuple", "x51 = ELU(name='elu_x51_{}'.format(rr))(x51) x52 = Conv2D(filt_num*rr, (3, 3), padding='same', name='Conv52_{}'.format(rr))(x51)", "name='DeConv1_{}'.format(dd))(x) if use_bn: x1 = BatchNormalization()(x1) x1 = ELU(name='elu_Dx1_{}'.format(dd))(x1) x3", "compatibility rows, cols = input_shape[0:2] assert rows % 2**numBlocks ==", "size reduction startsize = np.max(input_shape[0:2]) minsize = (startsize-np.sum(2**np.arange(1, numBlocks+1)))/2**numBlocks assert", "for layer in model.layers: layer.trainable = False use_bn = True", "skip_list.append(x) # expanding blocks expnums = list(range(1, numBlocks+1)) expnums.reverse() for", "Whether to have a continuous output with linear activation Returns:", "concatenate([skip_list[dd-1], x], name='skip_connect_{}'.format(dd)) x1 = Conv2D(filt_num*dd, (1, 1), padding='same', name='DeConv1_{}'.format(dd))(x)", "number the deeper the model output_chan: number of output channels.", "import ELU, LeakyReLU from keras.models import Model # Parameterized 2D", "= concatenate([skip_list[dd-1], x], name='skip_connect_{}'.format(dd)) x1 = Conv2D(filt_num*dd, (1, 1), padding='same',", "name='ConvClean_{}'.format(rr))(x) if use_bn: x = BatchNormalization()(x) x = ELU(name='elu_clean_{}'.format(rr))(x) skip_list.append(x)", "model Example useage: SegModel = BlockModel2D([256,256,1],filt_num=8) Notes: Using rows/cols that", "use_bn: x3 = BatchNormalization()(x3) x3 = ELU(name='elu_Dx3_{}'.format(dd))(x3) x51 = Conv2D(filt_num*dd,", "number is multipled linearly increased and decreased throughout the model", "shape compatibility rows, cols = input_shape[0:2] assert rows % 2**numBlocks", "= list(range(1, numBlocks+1)) expnums.reverse() for dd in expnums: if dd", "skip connection layer outputs skip_list = [l.output for l in", "Conv2D(filt_num*rr, (3, 3), padding='same', name='Conv3_{}'.format(rr))(x) if use_bn: x3 = BatchNormalization()(x3)", "= (startsize-np.sum(2**np.arange(1, numBlocks+1)))/2**numBlocks assert minsize > 4, \"Too small of", "ELU(name='elu_Dx52_{}'.format(dd))(x52) x = concatenate([x1, x3, x52], name='Dmerge_{}'.format(dd)) x = Conv2D(filt_num*dd," ]
[ "database_exists, create_database, drop_database from flunkybot.db import engine, base from flunkybot.models", "python \"\"\"Drop and create a new database with schema.\"\"\" from", "database with schema.\"\"\" from sqlalchemy_utils.functions import database_exists, create_database, drop_database from", "import * # noqa db_url = engine.url if database_exists(db_url): drop_database(db_url)", "import database_exists, create_database, drop_database from flunkybot.db import engine, base from", "#!/bin/env python \"\"\"Drop and create a new database with schema.\"\"\"", "new database with schema.\"\"\" from sqlalchemy_utils.functions import database_exists, create_database, drop_database", "sqlalchemy_utils.functions import database_exists, create_database, drop_database from flunkybot.db import engine, base", "schema.\"\"\" from sqlalchemy_utils.functions import database_exists, create_database, drop_database from flunkybot.db import", "flunkybot.models import * # noqa db_url = engine.url if database_exists(db_url):", "from flunkybot.models import * # noqa db_url = engine.url if", "create_database, drop_database from flunkybot.db import engine, base from flunkybot.models import", "drop_database from flunkybot.db import engine, base from flunkybot.models import *", "engine, base from flunkybot.models import * # noqa db_url =", "\"\"\"Drop and create a new database with schema.\"\"\" from sqlalchemy_utils.functions", "flunkybot.db import engine, base from flunkybot.models import * # noqa", "* # noqa db_url = engine.url if database_exists(db_url): drop_database(db_url) create_database(db_url)", "with schema.\"\"\" from sqlalchemy_utils.functions import database_exists, create_database, drop_database from flunkybot.db", "from flunkybot.db import engine, base from flunkybot.models import * #", "base from flunkybot.models import * # noqa db_url = engine.url", "a new database with schema.\"\"\" from sqlalchemy_utils.functions import database_exists, create_database,", "create a new database with schema.\"\"\" from sqlalchemy_utils.functions import database_exists,", "noqa db_url = engine.url if database_exists(db_url): drop_database(db_url) create_database(db_url) base.metadata.drop_all() base.metadata.create_all()", "and create a new database with schema.\"\"\" from sqlalchemy_utils.functions import", "from sqlalchemy_utils.functions import database_exists, create_database, drop_database from flunkybot.db import engine,", "# noqa db_url = engine.url if database_exists(db_url): drop_database(db_url) create_database(db_url) base.metadata.drop_all()", "import engine, base from flunkybot.models import * # noqa db_url" ]
[ "-*- import os from setuptools import find_packages, setup from app", "# -*- coding: utf-8 -*- import os from setuptools import", "f.read().split('\\n') setup( name='webspider', version=__version__, license='MIT', author='heguozhu', author_email='<EMAIL>', description='lagou.com spider', url='<EMAIL>:GuozhuHe/webspider.git',", "entry_points={ 'console_scripts': [ 'web = app.web_app:main', 'production_web = app.quickly_cmd:run_web_app_by_gunicorn', 'crawl_lagou_data", "from app import __version__ # get the dependencies and installs", "'console_scripts': [ 'web = app.web_app:main', 'production_web = app.quickly_cmd:run_web_app_by_gunicorn', 'crawl_lagou_data =", "name='webspider', version=__version__, license='MIT', author='heguozhu', author_email='<EMAIL>', description='lagou.com spider', url='<EMAIL>:GuozhuHe/webspider.git', packages=find_packages(exclude=['tests']), package_data={'webspider':", "'production_web = app.quickly_cmd:run_web_app_by_gunicorn', 'crawl_lagou_data = app.tasks:crawl_lagou_data', 'crawl_jobs_count = app.tasks.jobs_count:crawl_lagou_jobs_count', 'celery_jobs_count_worker", "utf-8 -*- import os from setuptools import find_packages, setup from", "os from setuptools import find_packages, setup from app import __version__", "version=__version__, license='MIT', author='heguozhu', author_email='<EMAIL>', description='lagou.com spider', url='<EMAIL>:GuozhuHe/webspider.git', packages=find_packages(exclude=['tests']), package_data={'webspider': ['README.md']},", "[ 'web = app.web_app:main', 'production_web = app.quickly_cmd:run_web_app_by_gunicorn', 'crawl_lagou_data = app.tasks:crawl_lagou_data',", "os.path.abspath(os.path.dirname(__file__)) with open(os.path.join(here, 'requirements.txt')) as f: all_requirements = f.read().split('\\n') setup(", "description='lagou.com spider', url='<EMAIL>:GuozhuHe/webspider.git', packages=find_packages(exclude=['tests']), package_data={'webspider': ['README.md']}, zip_safe=False, install_requires=all_requirements, entry_points={ 'console_scripts':", "= app.tasks:crawl_lagou_data', 'crawl_jobs_count = app.tasks.jobs_count:crawl_lagou_jobs_count', 'celery_jobs_count_worker = app.quickly_cmd:run_celery_jobs_count_worker', 'celery_lagou_data_worker =", "-*- coding: utf-8 -*- import os from setuptools import find_packages,", "'requirements.txt')) as f: all_requirements = f.read().split('\\n') setup( name='webspider', version=__version__, license='MIT',", "app.web_app:main', 'production_web = app.quickly_cmd:run_web_app_by_gunicorn', 'crawl_lagou_data = app.tasks:crawl_lagou_data', 'crawl_jobs_count = app.tasks.jobs_count:crawl_lagou_jobs_count',", "# get the dependencies and installs here = os.path.abspath(os.path.dirname(__file__)) with", "package_data={'webspider': ['README.md']}, zip_safe=False, install_requires=all_requirements, entry_points={ 'console_scripts': [ 'web = app.web_app:main',", "get the dependencies and installs here = os.path.abspath(os.path.dirname(__file__)) with open(os.path.join(here,", "f: all_requirements = f.read().split('\\n') setup( name='webspider', version=__version__, license='MIT', author='heguozhu', author_email='<EMAIL>',", "spider', url='<EMAIL>:GuozhuHe/webspider.git', packages=find_packages(exclude=['tests']), package_data={'webspider': ['README.md']}, zip_safe=False, install_requires=all_requirements, entry_points={ 'console_scripts': [", "import __version__ # get the dependencies and installs here =", "url='<EMAIL>:GuozhuHe/webspider.git', packages=find_packages(exclude=['tests']), package_data={'webspider': ['README.md']}, zip_safe=False, install_requires=all_requirements, entry_points={ 'console_scripts': [ 'web", "['README.md']}, zip_safe=False, install_requires=all_requirements, entry_points={ 'console_scripts': [ 'web = app.web_app:main', 'production_web", "__version__ # get the dependencies and installs here = os.path.abspath(os.path.dirname(__file__))", "the dependencies and installs here = os.path.abspath(os.path.dirname(__file__)) with open(os.path.join(here, 'requirements.txt'))", "all_requirements = f.read().split('\\n') setup( name='webspider', version=__version__, license='MIT', author='heguozhu', author_email='<EMAIL>', description='lagou.com", "installs here = os.path.abspath(os.path.dirname(__file__)) with open(os.path.join(here, 'requirements.txt')) as f: all_requirements", "= app.web_app:main', 'production_web = app.quickly_cmd:run_web_app_by_gunicorn', 'crawl_lagou_data = app.tasks:crawl_lagou_data', 'crawl_jobs_count =", "author_email='<EMAIL>', description='lagou.com spider', url='<EMAIL>:GuozhuHe/webspider.git', packages=find_packages(exclude=['tests']), package_data={'webspider': ['README.md']}, zip_safe=False, install_requires=all_requirements, entry_points={", "<filename>setup.py #!/usr/bin/env python # -*- coding: utf-8 -*- import os", "find_packages, setup from app import __version__ # get the dependencies", "dependencies and installs here = os.path.abspath(os.path.dirname(__file__)) with open(os.path.join(here, 'requirements.txt')) as", "with open(os.path.join(here, 'requirements.txt')) as f: all_requirements = f.read().split('\\n') setup( name='webspider',", "= app.quickly_cmd:run_web_app_by_gunicorn', 'crawl_lagou_data = app.tasks:crawl_lagou_data', 'crawl_jobs_count = app.tasks.jobs_count:crawl_lagou_jobs_count', 'celery_jobs_count_worker =", "app.tasks:crawl_lagou_data', 'crawl_jobs_count = app.tasks.jobs_count:crawl_lagou_jobs_count', 'celery_jobs_count_worker = app.quickly_cmd:run_celery_jobs_count_worker', 'celery_lagou_data_worker = app.quickly_cmd:run_celery_lagou_data_worker',", "zip_safe=False, install_requires=all_requirements, entry_points={ 'console_scripts': [ 'web = app.web_app:main', 'production_web =", "= app.quickly_cmd:run_celery_jobs_count_worker', 'celery_lagou_data_worker = app.quickly_cmd:run_celery_lagou_data_worker', 'celery_beat = app.quickly_cmd:run_celery_beat', 'celery_flower =", "setup( name='webspider', version=__version__, license='MIT', author='heguozhu', author_email='<EMAIL>', description='lagou.com spider', url='<EMAIL>:GuozhuHe/webspider.git', packages=find_packages(exclude=['tests']),", "author='heguozhu', author_email='<EMAIL>', description='lagou.com spider', url='<EMAIL>:GuozhuHe/webspider.git', packages=find_packages(exclude=['tests']), package_data={'webspider': ['README.md']}, zip_safe=False, install_requires=all_requirements,", "coding: utf-8 -*- import os from setuptools import find_packages, setup", "#!/usr/bin/env python # -*- coding: utf-8 -*- import os from", "app import __version__ # get the dependencies and installs here", "'crawl_lagou_data = app.tasks:crawl_lagou_data', 'crawl_jobs_count = app.tasks.jobs_count:crawl_lagou_jobs_count', 'celery_jobs_count_worker = app.quickly_cmd:run_celery_jobs_count_worker', 'celery_lagou_data_worker", "'celery_lagou_data_worker = app.quickly_cmd:run_celery_lagou_data_worker', 'celery_beat = app.quickly_cmd:run_celery_beat', 'celery_flower = app.quickly_cmd.py:run_celery_flower', ],", "= app.quickly_cmd:run_celery_lagou_data_worker', 'celery_beat = app.quickly_cmd:run_celery_beat', 'celery_flower = app.quickly_cmd.py:run_celery_flower', ], }", "= os.path.abspath(os.path.dirname(__file__)) with open(os.path.join(here, 'requirements.txt')) as f: all_requirements = f.read().split('\\n')", "app.quickly_cmd:run_celery_jobs_count_worker', 'celery_lagou_data_worker = app.quickly_cmd:run_celery_lagou_data_worker', 'celery_beat = app.quickly_cmd:run_celery_beat', 'celery_flower = app.quickly_cmd.py:run_celery_flower',", "app.quickly_cmd:run_celery_lagou_data_worker', 'celery_beat = app.quickly_cmd:run_celery_beat', 'celery_flower = app.quickly_cmd.py:run_celery_flower', ], } )", "setup from app import __version__ # get the dependencies and", "app.quickly_cmd:run_web_app_by_gunicorn', 'crawl_lagou_data = app.tasks:crawl_lagou_data', 'crawl_jobs_count = app.tasks.jobs_count:crawl_lagou_jobs_count', 'celery_jobs_count_worker = app.quickly_cmd:run_celery_jobs_count_worker',", "'crawl_jobs_count = app.tasks.jobs_count:crawl_lagou_jobs_count', 'celery_jobs_count_worker = app.quickly_cmd:run_celery_jobs_count_worker', 'celery_lagou_data_worker = app.quickly_cmd:run_celery_lagou_data_worker', 'celery_beat", "app.tasks.jobs_count:crawl_lagou_jobs_count', 'celery_jobs_count_worker = app.quickly_cmd:run_celery_jobs_count_worker', 'celery_lagou_data_worker = app.quickly_cmd:run_celery_lagou_data_worker', 'celery_beat = app.quickly_cmd:run_celery_beat',", "license='MIT', author='heguozhu', author_email='<EMAIL>', description='lagou.com spider', url='<EMAIL>:GuozhuHe/webspider.git', packages=find_packages(exclude=['tests']), package_data={'webspider': ['README.md']}, zip_safe=False,", "open(os.path.join(here, 'requirements.txt')) as f: all_requirements = f.read().split('\\n') setup( name='webspider', version=__version__,", "setuptools import find_packages, setup from app import __version__ # get", "'web = app.web_app:main', 'production_web = app.quickly_cmd:run_web_app_by_gunicorn', 'crawl_lagou_data = app.tasks:crawl_lagou_data', 'crawl_jobs_count", "import os from setuptools import find_packages, setup from app import", "python # -*- coding: utf-8 -*- import os from setuptools", "= app.tasks.jobs_count:crawl_lagou_jobs_count', 'celery_jobs_count_worker = app.quickly_cmd:run_celery_jobs_count_worker', 'celery_lagou_data_worker = app.quickly_cmd:run_celery_lagou_data_worker', 'celery_beat =", "from setuptools import find_packages, setup from app import __version__ #", "as f: all_requirements = f.read().split('\\n') setup( name='webspider', version=__version__, license='MIT', author='heguozhu',", "packages=find_packages(exclude=['tests']), package_data={'webspider': ['README.md']}, zip_safe=False, install_requires=all_requirements, entry_points={ 'console_scripts': [ 'web =", "install_requires=all_requirements, entry_points={ 'console_scripts': [ 'web = app.web_app:main', 'production_web = app.quickly_cmd:run_web_app_by_gunicorn',", "= f.read().split('\\n') setup( name='webspider', version=__version__, license='MIT', author='heguozhu', author_email='<EMAIL>', description='lagou.com spider',", "here = os.path.abspath(os.path.dirname(__file__)) with open(os.path.join(here, 'requirements.txt')) as f: all_requirements =", "'celery_jobs_count_worker = app.quickly_cmd:run_celery_jobs_count_worker', 'celery_lagou_data_worker = app.quickly_cmd:run_celery_lagou_data_worker', 'celery_beat = app.quickly_cmd:run_celery_beat', 'celery_flower", "import find_packages, setup from app import __version__ # get the", "and installs here = os.path.abspath(os.path.dirname(__file__)) with open(os.path.join(here, 'requirements.txt')) as f:" ]
[ "HOWTOs individually latex_documents.extend(('howto/' + fn[:-4], 'howto-' + fn[:-4] + '.tex',", "the format for a strftime call. today_fmt = '%B %d,", "'macro': (r'^#define ([^_][\\w_]+)\\(.*\\)[\\s|\\\\]'), } # The coverage checker will ignore", "format for a strftime call. today_fmt = '%B %d, %Y'", "document tree into LaTeX files. List of tuples # (source", "directory. exclude_patterns = ['venv/*'] # Options for HTML output #", "_stdauthor, 'howto') for fn in os.listdir('howto') if fn.endswith('.rst') and fn", "the LaTeX preamble. latex_preamble = r''' \\authoraddress{ \\strong{Python Software Foundation}\\\\", "[ '../Include/*.h', ] # Regexes to find C items in", "'python3' # Require Sphinx 1.2 for build. needs_sphinx = '1.2'", "will ignore all C items whose names match these regexes", "Documentation' % release # If not '', a 'Last updated", "'Last updated on:' timestamp is inserted at every page bottom,", "stuff for the LaTeX preamble. latex_preamble = r''' \\authoraddress{ \\strong{Python", "source files. coverage_c_regexes = { 'cfunction': (r'^PyAPI_FUNC\\(.*\\)\\s+([^_][\\w_]+)'), 'data': (r'^PyAPI_DATA\\(.*\\)\\s+([^_][\\w_]+)'), 'macro':", "Foundation' # Options for the coverage checker # -------------------------------- #", "using the given strftime format. html_last_updated_fmt = '%b %d, %Y'", "} # Options for the link checker # ---------------------------- #", "Python/C API', _stdauthor, 'manual'), ('distributing/index', 'distributing.tex', 'Distributing Python Modules', _stdauthor,", "coverage_ignore_modules = [ r'[T|t][k|K]', r'Tix', r'distutils.*', ] coverage_ignore_functions = [", "the link checker # ---------------------------- # Ignore certain URLs. linkcheck_ignore", "Custom sidebar templates, filenames relative to this file. html_sidebars =", "Email: \\email{<EMAIL>} } \\let\\Verbatim=\\OriginalVerbatim \\let\\endVerbatim=\\endOriginalVerbatim ''' # Documents to append", "1.2 for build. needs_sphinx = '1.2' # Ignore any .rst", "LaTeX output # ------------------------ # The paper size ('letter' or", "# Short title used e.g. for <title> HTML tags. html_short_title", "'2001-%s, Python Software Foundation' % time.strftime('%Y') # We look for", "'index': 'indexsidebar.html', } # Additional templates that should be rendered", "} # Additional templates that should be rendered to pages.", "builder. htmlhelp_basename = 'python' + release.replace('.', '') # Split the", "are two options for replacing |today|: either, you set today", "# # This file is execfile()d with the current directory", "Collect all HOWTOs individually latex_documents.extend(('howto/' + fn[:-4], 'howto-' + fn[:-4]", "= r''' \\authoraddress{ \\strong{Python Software Foundation}\\\\ Email: \\email{<EMAIL>} } \\let\\Verbatim=\\OriginalVerbatim", "find HTML templates. templates_path = ['tools/templates'] # Custom sidebar templates,", "author, document class [howto/manual]). _stdauthor = r'<NAME>\\\\and the Python development", "= patchlevel.get_version_info() # There are two options for replacing |today|:", "\\authoraddress{ \\strong{Python Software Foundation}\\\\ Email: \\email{<EMAIL>} } \\let\\Verbatim=\\OriginalVerbatim \\let\\endVerbatim=\\endOriginalVerbatim '''", "imports are okay, they're removed automatically). import sys, os, time", "coverage_c_regexes = { 'cfunction': (r'^PyAPI_FUNC\\(.*\\)\\s+([^_][\\w_]+)'), 'data': (r'^PyAPI_DATA\\(.*\\)\\s+([^_][\\w_]+)'), 'macro': (r'^#define ([^_][\\w_]+)\\(.*\\)[\\s|\\\\]'),", "'12pt'). latex_font_size = '10pt' # Grouping the document tree into", "tree into LaTeX files. List of tuples # (source start", "value, then it is used: today = '' # Else,", "source files for C API coverage, relative to this directory.", "current Python source tree # and replace the values accordingly.", "format. html_last_updated_fmt = '%b %d, %Y' # Path to find", "Python Modules', _stdauthor, 'manual'), ('library/index', 'library.tex', 'The Python Library Reference',", "substitutions. project = 'Python' copyright = '2001-%s, Python Software Foundation'", "and Embedding Python', _stdauthor, 'manual'), ('installing/index', 'installing.tex', 'Installing Python Modules',", "output # ----------------------- epub_author = 'Python Documentation Authors' epub_publisher =", "same as in coverage_c_regexes. coverage_ignore_c_items = { # 'cfunction': [...]", "'tutorial.tex', 'Python Tutorial', _stdauthor, 'manual'), ('using/index', 'using.tex', 'Python Setup and", "fn[:-4] + '.tex', '', _stdauthor, 'howto') for fn in os.listdir('howto')", "_stdauthor, 'manual'), ('faq/index', 'faq.tex', 'Python Frequently Asked Questions', _stdauthor, 'manual'),", "} \\let\\Verbatim=\\OriginalVerbatim \\let\\endVerbatim=\\endOriginalVerbatim ''' # Documents to append as an", "Library Reference', _stdauthor, 'manual'), ('reference/index', 'reference.tex', 'The Python Language Reference',", "_stdauthor, 'manual'), ('whatsnew/' + version, 'whatsnew.tex', 'What\\'s New in Python',", "coverage_ignore_classes = [ ] # Glob patterns for C source", "handle Unicode correctly latex_elements = {'inputenc': r'\\usepackage[utf8x]{inputenc}', 'utf8extra': ''} #", "By default, highlight as Python 3. highlight_language = 'python3' #", "title used e.g. for <title> HTML tags. html_short_title = '%s", "font size ('10pt', '11pt' or '12pt'). latex_font_size = '10pt' #", "for replacing |today|: either, you set today to some #", "Python 3. highlight_language = 'python3' # Require Sphinx 1.2 for", "# Options for HTML output # ----------------------- # Use our", "[r'https://bugs.python.org/(issue)?\\d+', # Ignore PEPs for now, they all have permanent", "Modules', _stdauthor, 'manual'), ('extending/index', 'extending.tex', 'Extending and Embedding Python', _stdauthor,", "custom theme. html_theme = 'pydoctheme' html_theme_path = ['tools'] html_theme_options =", "'Installing Python Modules', _stdauthor, 'manual'), ('library/index', 'library.tex', 'The Python Library", "''' # Documents to append as an appendix to all", "'manual'), ('whatsnew/' + version, 'whatsnew.tex', 'What\\'s New in Python', '<NAME>',", "'' # Else, today_fmt is used as the format for", "for <title> HTML tags. html_short_title = '%s Documentation' % release", "['sphinx.ext.coverage', 'sphinx.ext.doctest', 'pyspecific', 'c_annotations'] # General substitutions. project = 'Python'", "containing dir. # # The contents of this file are", "The font size ('10pt', '11pt' or '12pt'). latex_font_size = '10pt'", "# Options for the coverage checker # -------------------------------- # The", "files. html_static_path = ['tools/static'] # Output file base name for", "URLs. linkcheck_ignore = [r'https://bugs.python.org/(issue)?\\d+', # Ignore PEPs for now, they", "put values in the namespace # that aren't pickleable (module", "= 'Python Documentation Authors' epub_publisher = 'Python Software Foundation' #", "= True # Options for LaTeX output # ------------------------ #", "configuration file # # This file is execfile()d with the", "= '1.2' # Ignore any .rst files in the venv/", "[ ('c-api/index', 'c-api.tex', 'The Python/C API', _stdauthor, 'manual'), ('distributing/index', 'distributing.tex',", "'cfunction': (r'^PyAPI_FUNC\\(.*\\)\\s+([^_][\\w_]+)'), 'data': (r'^PyAPI_DATA\\(.*\\)\\s+([^_][\\w_]+)'), 'macro': (r'^#define ([^_][\\w_]+)\\(.*\\)[\\s|\\\\]'), } # The", "= '%B %d, %Y' # By default, highlight as Python", "'test($|_)', ] coverage_ignore_classes = [ ] # Glob patterns for", "'The Python Language Reference', _stdauthor, 'manual'), ('tutorial/index', 'tutorial.tex', 'Python Tutorial',", "'index': 'indexcontent.html', } # Output an OpenSearch description file. html_use_opensearch", "will ignore all modules/functions/classes whose names # match any of", "'The Python/C API', _stdauthor, 'manual'), ('distributing/index', 'distributing.tex', 'Distributing Python Modules',", "checker # ---------------------------- # Ignore certain URLs. linkcheck_ignore = [r'https://bugs.python.org/(issue)?\\d+',", "True} # Short title used e.g. for <title> HTML tags.", "patchlevel.get_version_info() # There are two options for replacing |today|: either,", "Ignore PEPs for now, they all have permanent redirects. r'http://www.python.org/dev/peps/pep-\\d+']", "documentation build configuration file # # This file is execfile()d", "regexes (using re.match). coverage_ignore_modules = [ r'[T|t][k|K]', r'Tix', r'distutils.*', ]", "Python', _stdauthor, 'manual'), ('installing/index', 'installing.tex', 'Installing Python Modules', _stdauthor, 'manual'),", "# Regexes to find C items in the source files.", "%Y' # Path to find HTML templates. templates_path = ['tools/templates']", "be rendered to pages. html_additional_pages = { 'download': 'download.html', 'index':", "latex_documents = [ ('c-api/index', 'c-api.tex', 'The Python/C API', _stdauthor, 'manual'),", "exclude_patterns = ['venv/*'] # Options for HTML output # -----------------------", "help builder. htmlhelp_basename = 'python' + release.replace('.', '') # Split", "LaTeX preamble. latex_preamble = r''' \\authoraddress{ \\strong{Python Software Foundation}\\\\ Email:", "sidebar templates, filenames relative to this file. html_sidebars = {", "# --------------------- extensions = ['sphinx.ext.coverage', 'sphinx.ext.doctest', 'pyspecific', 'c_annotations'] # General", "files in the venv/ directory. exclude_patterns = ['venv/*'] # Options", "(module imports are okay, they're removed automatically). import sys, os,", "the given strftime format. html_last_updated_fmt = '%b %d, %Y' #", "# Output an OpenSearch description file. html_use_opensearch = 'https://docs.python.org/' +", "r'http://www.python.org/dev/peps/pep-\\d+'] # Options for extensions # ---------------------- # Relative filename", "at every page bottom, # using the given strftime format.", "Options for LaTeX output # ------------------------ # The paper size", "coverage checker will ignore all C items whose names match", "current directory set to its containing dir. # # The", "and replace the values accordingly. import patchlevel version, release =", "] # Collect all HOWTOs individually latex_documents.extend(('howto/' + fn[:-4], 'howto-'", "# We look for the Include/patchlevel.h file in the current", "'manual'), ('extending/index', 'extending.tex', 'Extending and Embedding Python', _stdauthor, 'manual'), ('installing/index',", "fn in os.listdir('howto') if fn.endswith('.rst') and fn != 'index.rst') #", "# Additional stuff for the LaTeX preamble. latex_preamble = r'''", "Output file base name for HTML help builder. htmlhelp_basename =", "Python Language Reference', _stdauthor, 'manual'), ('tutorial/index', 'tutorial.tex', 'Python Tutorial', _stdauthor,", "file base name for HTML help builder. htmlhelp_basename = 'python'", "Frequently Asked Questions', _stdauthor, 'manual'), ('whatsnew/' + version, 'whatsnew.tex', 'What\\'s", "the document tree into LaTeX files. List of tuples #", "our custom theme. html_theme = 'pydoctheme' html_theme_path = ['tools'] html_theme_options", "= ['sphinx.ext.coverage', 'sphinx.ext.doctest', 'pyspecific', 'c_annotations'] # General substitutions. project =", "of tuples # (source start file, target name, title, author,", "release = patchlevel.get_version_info() # There are two options for replacing", "for build. needs_sphinx = '1.2' # Ignore any .rst files", "_stdauthor, 'manual'), ('reference/index', 'reference.tex', 'The Python Language Reference', _stdauthor, 'manual'),", "\\email{<EMAIL>} } \\let\\Verbatim=\\OriginalVerbatim \\let\\endVerbatim=\\endOriginalVerbatim ''' # Documents to append as", "tree # and replace the values accordingly. import patchlevel version,", "# There are two options for replacing |today|: either, you", "call. today_fmt = '%B %d, %Y' # By default, highlight", "certain URLs. linkcheck_ignore = [r'https://bugs.python.org/(issue)?\\d+', # Ignore PEPs for now,", "Software Foundation}\\\\ Email: \\email{<EMAIL>} } \\let\\Verbatim=\\OriginalVerbatim \\let\\endVerbatim=\\endOriginalVerbatim ''' # Documents", "(using re.match). coverage_ignore_modules = [ r'[T|t][k|K]', r'Tix', r'distutils.*', ] coverage_ignore_functions", "to this file. html_sidebars = { 'index': 'indexsidebar.html', } #", "['tools/static'] # Output file base name for HTML help builder.", "'1.2' # Ignore any .rst files in the venv/ directory.", "# By default, highlight as Python 3. highlight_language = 'python3'", "'manual'), ('tutorial/index', 'tutorial.tex', 'Python Tutorial', _stdauthor, 'manual'), ('using/index', 'using.tex', 'Python", "Setup and Usage', _stdauthor, 'manual'), ('faq/index', 'faq.tex', 'Python Frequently Asked", "'cfunction': [...] } # Options for the link checker #", "Options for HTML output # ----------------------- # Use our custom", "files. coverage_c_regexes = { 'cfunction': (r'^PyAPI_FUNC\\(.*\\)\\s+([^_][\\w_]+)'), 'data': (r'^PyAPI_DATA\\(.*\\)\\s+([^_][\\w_]+)'), 'macro': (r'^#define", "execfile()d with the current directory set to its containing dir.", "# Grouping the document tree into LaTeX files. List of", "then it is used: today = '' # Else, today_fmt", "ignore all C items whose names match these regexes #", "redirects. r'http://www.python.org/dev/peps/pep-\\d+'] # Options for extensions # ---------------------- # Relative", "file # # This file is execfile()d with the current", "Tutorial', _stdauthor, 'manual'), ('using/index', 'using.tex', 'Python Setup and Usage', _stdauthor,", "+ '.tex', '', _stdauthor, 'howto') for fn in os.listdir('howto') if", "= '' # Else, today_fmt is used as the format", "html_additional_pages = { 'download': 'download.html', 'index': 'indexcontent.html', } # Output", "values accordingly. import patchlevel version, release = patchlevel.get_version_info() # There", "all HOWTOs individually latex_documents.extend(('howto/' + fn[:-4], 'howto-' + fn[:-4] +", "updated on:' timestamp is inserted at every page bottom, #", "_stdauthor = r'<NAME>\\\\and the Python development team' latex_documents = [", "Options for the coverage checker # -------------------------------- # The coverage", "set today to some # non-false value, then it is", "'indexsidebar.html', } # Additional templates that should be rendered to", "= 'Python Software Foundation' # Options for the coverage checker", "file is execfile()d with the current directory set to its", "checker will ignore all C items whose names match these", "html_short_title = '%s Documentation' % release # If not '',", "Split the index html_split_index = True # Options for LaTeX", "base name for HTML help builder. htmlhelp_basename = 'python' +", "Python documentation build configuration file # # This file is", "Additional static files. html_static_path = ['tools/static'] # Output file base", "'manual'), ('library/index', 'library.tex', 'The Python Library Reference', _stdauthor, 'manual'), ('reference/index',", "the current directory set to its containing dir. # #", "'library.tex', 'The Python Library Reference', _stdauthor, 'manual'), ('reference/index', 'reference.tex', 'The", "'installing.tex', 'Installing Python Modules', _stdauthor, 'manual'), ('library/index', 'library.tex', 'The Python", "pickleable (module imports are okay, they're removed automatically). import sys,", "patterns for C source files for C API coverage, relative", "link checker # ---------------------------- # Ignore certain URLs. linkcheck_ignore =", "'manual'), ('using/index', 'using.tex', 'Python Setup and Usage', _stdauthor, 'manual'), ('faq/index',", "if fn.endswith('.rst') and fn != 'index.rst') # Additional stuff for", "-- the keys must be the same as in coverage_c_regexes.", "this file are pickled, so don't put values in the", "with the current directory set to its containing dir. #", "= '2001-%s, Python Software Foundation' % time.strftime('%Y') # We look", "data file. refcount_file = 'data/refcounts.dat' # Translation # ----------- gettext_compact", "'Extending and Embedding Python', _stdauthor, 'manual'), ('installing/index', 'installing.tex', 'Installing Python", "in the namespace # that aren't pickleable (module imports are", "of this file are pickled, so don't put values in", "'indexcontent.html', } # Output an OpenSearch description file. html_use_opensearch =", "Python Modules', _stdauthor, 'manual'), ('extending/index', 'extending.tex', 'Extending and Embedding Python',", "'howto') for fn in os.listdir('howto') if fn.endswith('.rst') and fn !=", "theme. html_theme = 'pydoctheme' html_theme_path = ['tools'] html_theme_options = {'collapsiblesidebar':", "Software Foundation' % time.strftime('%Y') # We look for the Include/patchlevel.h", "# (using re.match) -- the keys must be the same", "they all have permanent redirects. r'http://www.python.org/dev/peps/pep-\\d+'] # Options for extensions", "in coverage_c_regexes. coverage_ignore_c_items = { # 'cfunction': [...] } #", "{ 'cfunction': (r'^PyAPI_FUNC\\(.*\\)\\s+([^_][\\w_]+)'), 'data': (r'^PyAPI_DATA\\(.*\\)\\s+([^_][\\w_]+)'), 'macro': (r'^#define ([^_][\\w_]+)\\(.*\\)[\\s|\\\\]'), } #", "We look for the Include/patchlevel.h file in the current Python", "# The coverage checker will ignore all C items whose", "HTML tags. html_short_title = '%s Documentation' % release # If", "OpenSearch description file. html_use_opensearch = 'https://docs.python.org/' + version # Additional", "Authors' epub_publisher = 'Python Software Foundation' # Options for the", "[ 'test($|_)', ] coverage_ignore_classes = [ ] # Glob patterns", "for the coverage checker # -------------------------------- # The coverage checker", "is execfile()d with the current directory set to its containing", "Python source tree # and replace the values accordingly. import", "C items whose names match these regexes # (using re.match)", "non-false value, then it is used: today = '' #", "index html_split_index = True # Options for LaTeX output #", "'using.tex', 'Python Setup and Usage', _stdauthor, 'manual'), ('faq/index', 'faq.tex', 'Python", "HTML output # ----------------------- # Use our custom theme. html_theme", "should be rendered to pages. html_additional_pages = { 'download': 'download.html',", "correctly latex_elements = {'inputenc': r'\\usepackage[utf8x]{inputenc}', 'utf8extra': ''} # Options for", "and fn != 'index.rst') # Additional stuff for the LaTeX", "so don't put values in the namespace # that aren't", "is inserted at every page bottom, # using the given", "'c_annotations'] # General substitutions. project = 'Python' copyright = '2001-%s,", "or '12pt'). latex_font_size = '10pt' # Grouping the document tree", "import sys, os, time sys.path.append(os.path.abspath('tools/extensions')) # General configuration # ---------------------", "+ fn[:-4], 'howto-' + fn[:-4] + '.tex', '', _stdauthor, 'howto')", "file. html_use_opensearch = 'https://docs.python.org/' + version # Additional static files.", "files for C API coverage, relative to this directory. coverage_c_path", "\\strong{Python Software Foundation}\\\\ Email: \\email{<EMAIL>} } \\let\\Verbatim=\\OriginalVerbatim \\let\\endVerbatim=\\endOriginalVerbatim ''' #", "html_last_updated_fmt = '%b %d, %Y' # Path to find HTML", "now, they all have permanent redirects. r'http://www.python.org/dev/peps/pep-\\d+'] # Options for", "\\let\\Verbatim=\\OriginalVerbatim \\let\\endVerbatim=\\endOriginalVerbatim ''' # Documents to append as an appendix", "templates. templates_path = ['tools/templates'] # Custom sidebar templates, filenames relative", "Include/patchlevel.h file in the current Python source tree # and", "reference count data file. refcount_file = 'data/refcounts.dat' # Translation #", "that should be rendered to pages. html_additional_pages = { 'download':", "this directory. coverage_c_path = [ '../Include/*.h', ] # Regexes to", "('library/index', 'library.tex', 'The Python Library Reference', _stdauthor, 'manual'), ('reference/index', 'reference.tex',", "'pyspecific', 'c_annotations'] # General substitutions. project = 'Python' copyright =", "'https://docs.python.org/' + version # Additional static files. html_static_path = ['tools/static']", "name, title, author, document class [howto/manual]). _stdauthor = r'<NAME>\\\\and the", "('installing/index', 'installing.tex', 'Installing Python Modules', _stdauthor, 'manual'), ('library/index', 'library.tex', 'The", "page bottom, # using the given strftime format. html_last_updated_fmt =", "-------------------------------- # The coverage checker will ignore all modules/functions/classes whose", "General substitutions. project = 'Python' copyright = '2001-%s, Python Software", "'manual'), ('installing/index', 'installing.tex', 'Installing Python Modules', _stdauthor, 'manual'), ('library/index', 'library.tex',", "name for HTML help builder. htmlhelp_basename = 'python' + release.replace('.',", "Asked Questions', _stdauthor, 'manual'), ('whatsnew/' + version, 'whatsnew.tex', 'What\\'s New", "epub_publisher = 'Python Software Foundation' # Options for the coverage", "Python', '<NAME>', 'howto'), ] # Collect all HOWTOs individually latex_documents.extend(('howto/'", "The coverage checker will ignore all C items whose names", "|today|: either, you set today to some # non-false value,", "r'distutils.*', ] coverage_ignore_functions = [ 'test($|_)', ] coverage_ignore_classes = [", "C API coverage, relative to this directory. coverage_c_path = [", "Relative filename of the reference count data file. refcount_file =", "= '10pt' # Grouping the document tree into LaTeX files.", "'data/refcounts.dat' # Translation # ----------- gettext_compact = False locale_dirs =", "'python' + release.replace('.', '') # Split the index html_split_index =", "latex_paper_size = 'a4' # The font size ('10pt', '11pt' or", "List of tuples # (source start file, target name, title,", "default, highlight as Python 3. highlight_language = 'python3' # Require", "Ignore any .rst files in the venv/ directory. exclude_patterns =", "os, time sys.path.append(os.path.abspath('tools/extensions')) # General configuration # --------------------- extensions =", "start file, target name, title, author, document class [howto/manual]). _stdauthor", "the venv/ directory. exclude_patterns = ['venv/*'] # Options for HTML", "# ------------------------ # The paper size ('letter' or 'a4'). latex_paper_size", "are okay, they're removed automatically). import sys, os, time sys.path.append(os.path.abspath('tools/extensions'))", "(r'^#define ([^_][\\w_]+)\\(.*\\)[\\s|\\\\]'), } # The coverage checker will ignore all", "used e.g. for <title> HTML tags. html_short_title = '%s Documentation'", "= { # 'cfunction': [...] } # Options for the", "# Options for Epub output # ----------------------- epub_author = 'Python", "# Collect all HOWTOs individually latex_documents.extend(('howto/' + fn[:-4], 'howto-' +", "# Ignore certain URLs. linkcheck_ignore = [r'https://bugs.python.org/(issue)?\\d+', # Ignore PEPs", "re.match) -- the keys must be the same as in", "replacing |today|: either, you set today to some # non-false", "coverage_ignore_functions = [ 'test($|_)', ] coverage_ignore_classes = [ ] #", "fn != 'index.rst') # Additional stuff for the LaTeX preamble.", "in the venv/ directory. exclude_patterns = ['venv/*'] # Options for", "Reference', _stdauthor, 'manual'), ('tutorial/index', 'tutorial.tex', 'Python Tutorial', _stdauthor, 'manual'), ('using/index',", "all C items whose names match these regexes # (using", "'extending.tex', 'Extending and Embedding Python', _stdauthor, 'manual'), ('installing/index', 'installing.tex', 'Installing", "Embedding Python', _stdauthor, 'manual'), ('installing/index', 'installing.tex', 'Installing Python Modules', _stdauthor,", "find C items in the source files. coverage_c_regexes = {", "extensions # ---------------------- # Relative filename of the reference count", "Use our custom theme. html_theme = 'pydoctheme' html_theme_path = ['tools']", "= 'data/refcounts.dat' # Translation # ----------- gettext_compact = False locale_dirs", "Python Software Foundation' % time.strftime('%Y') # We look for the", "html_theme_path = ['tools'] html_theme_options = {'collapsiblesidebar': True} # Short title", "in the source files. coverage_c_regexes = { 'cfunction': (r'^PyAPI_FUNC\\(.*\\)\\s+([^_][\\w_]+)'), 'data':", "'%B %d, %Y' # By default, highlight as Python 3.", "('using/index', 'using.tex', 'Python Setup and Usage', _stdauthor, 'manual'), ('faq/index', 'faq.tex',", "as in coverage_c_regexes. coverage_ignore_c_items = { # 'cfunction': [...] }", "into LaTeX files. List of tuples # (source start file,", "as Python 3. highlight_language = 'python3' # Require Sphinx 1.2", "C items in the source files. coverage_c_regexes = { 'cfunction':", "# This file is execfile()d with the current directory set", "r'Tix', r'distutils.*', ] coverage_ignore_functions = [ 'test($|_)', ] coverage_ignore_classes =", "!= 'index.rst') # Additional stuff for the LaTeX preamble. latex_preamble", "team' latex_documents = [ ('c-api/index', 'c-api.tex', 'The Python/C API', _stdauthor,", "sys, os, time sys.path.append(os.path.abspath('tools/extensions')) # General configuration # --------------------- extensions", "strftime format. html_last_updated_fmt = '%b %d, %Y' # Path to", "the keys must be the same as in coverage_c_regexes. coverage_ignore_c_items", "e.g. for <title> HTML tags. html_short_title = '%s Documentation' %", "# Additional templates that should be rendered to pages. html_additional_pages", "used: today = '' # Else, today_fmt is used as", "development team' latex_documents = [ ('c-api/index', 'c-api.tex', 'The Python/C API',", "---------------------------- # Ignore certain URLs. linkcheck_ignore = [r'https://bugs.python.org/(issue)?\\d+', # Ignore", "its containing dir. # # The contents of this file", "latex_font_size = '10pt' # Grouping the document tree into LaTeX", "{'collapsiblesidebar': True} # Short title used e.g. for <title> HTML", "checker # -------------------------------- # The coverage checker will ignore all", "Software Foundation' # Options for the coverage checker # --------------------------------", "<reponame>python-doc-tw/cpython-tw # # Python documentation build configuration file # #", "configuration # --------------------- extensions = ['sphinx.ext.coverage', 'sphinx.ext.doctest', 'pyspecific', 'c_annotations'] #", "('letter' or 'a4'). latex_paper_size = 'a4' # The font size", "Options for Epub output # ----------------------- epub_author = 'Python Documentation", "('whatsnew/' + version, 'whatsnew.tex', 'What\\'s New in Python', '<NAME>', 'howto'),", "in Python', '<NAME>', 'howto'), ] # Collect all HOWTOs individually", "# Additional static files. html_static_path = ['tools/static'] # Output file", "API coverage, relative to this directory. coverage_c_path = [ '../Include/*.h',", "r'\\usepackage[utf8x]{inputenc}', 'utf8extra': ''} # Options for Epub output # -----------------------", "whose names # match any of the following regexes (using", "match these regexes # (using re.match) -- the keys must", "HTML templates. templates_path = ['tools/templates'] # Custom sidebar templates, filenames", "} # Output an OpenSearch description file. html_use_opensearch = 'https://docs.python.org/'", "'../Include/*.h', ] # Regexes to find C items in the", "options for replacing |today|: either, you set today to some", "appendix to all manuals. latex_appendices = ['glossary', 'about', 'license', 'copyright']", "# General substitutions. project = 'Python' copyright = '2001-%s, Python", "# Ignore PEPs for now, they all have permanent redirects.", "= 'https://docs.python.org/' + version # Additional static files. html_static_path =", "Glob patterns for C source files for C API coverage,", "3. highlight_language = 'python3' # Require Sphinx 1.2 for build.", "= '%b %d, %Y' # Path to find HTML templates.", "{'inputenc': r'\\usepackage[utf8x]{inputenc}', 'utf8extra': ''} # Options for Epub output #", "HTML help builder. htmlhelp_basename = 'python' + release.replace('.', '') #", "file in the current Python source tree # and replace", "# and replace the values accordingly. import patchlevel version, release", "'whatsnew.tex', 'What\\'s New in Python', '<NAME>', 'howto'), ] # Collect", "html_theme_options = {'collapsiblesidebar': True} # Short title used e.g. for", "latex_preamble = r''' \\authoraddress{ \\strong{Python Software Foundation}\\\\ Email: \\email{<EMAIL>} }", "{ 'download': 'download.html', 'index': 'indexcontent.html', } # Output an OpenSearch", "LaTeX to handle Unicode correctly latex_elements = {'inputenc': r'\\usepackage[utf8x]{inputenc}', 'utf8extra':", ".rst files in the venv/ directory. exclude_patterns = ['venv/*'] #", "# The contents of this file are pickled, so don't", "# Ignore any .rst files in the venv/ directory. exclude_patterns", "= [ 'test($|_)', ] coverage_ignore_classes = [ ] # Glob", "# Relative filename of the reference count data file. refcount_file", "to pages. html_additional_pages = { 'download': 'download.html', 'index': 'indexcontent.html', }", "} # The coverage checker will ignore all C items", "'manual'), ('reference/index', 'reference.tex', 'The Python Language Reference', _stdauthor, 'manual'), ('tutorial/index',", "the source files. coverage_c_regexes = { 'cfunction': (r'^PyAPI_FUNC\\(.*\\)\\s+([^_][\\w_]+)'), 'data': (r'^PyAPI_DATA\\(.*\\)\\s+([^_][\\w_]+)'),", "'sphinx.ext.doctest', 'pyspecific', 'c_annotations'] # General substitutions. project = 'Python' copyright", "'.tex', '', _stdauthor, 'howto') for fn in os.listdir('howto') if fn.endswith('.rst')", "'howto-' + fn[:-4] + '.tex', '', _stdauthor, 'howto') for fn", "= 'a4' # The font size ('10pt', '11pt' or '12pt').", "output # ----------------------- # Use our custom theme. html_theme =", "'about', 'license', 'copyright'] # Get LaTeX to handle Unicode correctly", "New in Python', '<NAME>', 'howto'), ] # Collect all HOWTOs", "[ ] # Glob patterns for C source files for", "= ['tools/templates'] # Custom sidebar templates, filenames relative to this", "+ version, 'whatsnew.tex', 'What\\'s New in Python', '<NAME>', 'howto'), ]", "# Path to find HTML templates. templates_path = ['tools/templates'] #", "be the same as in coverage_c_regexes. coverage_ignore_c_items = { #", "for C API coverage, relative to this directory. coverage_c_path =", "[...] } # Options for the link checker # ----------------------------", "'c-api.tex', 'The Python/C API', _stdauthor, 'manual'), ('distributing/index', 'distributing.tex', 'Distributing Python", "(source start file, target name, title, author, document class [howto/manual]).", "Documents to append as an appendix to all manuals. latex_appendices", "latex_documents.extend(('howto/' + fn[:-4], 'howto-' + fn[:-4] + '.tex', '', _stdauthor,", "'copyright'] # Get LaTeX to handle Unicode correctly latex_elements =", "the index html_split_index = True # Options for LaTeX output", "Sphinx 1.2 for build. needs_sphinx = '1.2' # Ignore any", "'pydoctheme' html_theme_path = ['tools'] html_theme_options = {'collapsiblesidebar': True} # Short", "# ----------------------- epub_author = 'Python Documentation Authors' epub_publisher = 'Python", "# (source start file, target name, title, author, document class", "for the link checker # ---------------------------- # Ignore certain URLs.", "today_fmt is used as the format for a strftime call.", "for a strftime call. today_fmt = '%B %d, %Y' #", "project = 'Python' copyright = '2001-%s, Python Software Foundation' %", "+ fn[:-4] + '.tex', '', _stdauthor, 'howto') for fn in", "_stdauthor, 'manual'), ('using/index', 'using.tex', 'Python Setup and Usage', _stdauthor, 'manual'),", "source tree # and replace the values accordingly. import patchlevel", "= [ r'[T|t][k|K]', r'Tix', r'distutils.*', ] coverage_ignore_functions = [ 'test($|_)',", "html_static_path = ['tools/static'] # Output file base name for HTML", "whose names match these regexes # (using re.match) -- the", "version, 'whatsnew.tex', 'What\\'s New in Python', '<NAME>', 'howto'), ] #", "= [r'https://bugs.python.org/(issue)?\\d+', # Ignore PEPs for now, they all have", "'reference.tex', 'The Python Language Reference', _stdauthor, 'manual'), ('tutorial/index', 'tutorial.tex', 'Python", "--------------------- extensions = ['sphinx.ext.coverage', 'sphinx.ext.doctest', 'pyspecific', 'c_annotations'] # General substitutions.", "%d, %Y' # Path to find HTML templates. templates_path =", "have permanent redirects. r'http://www.python.org/dev/peps/pep-\\d+'] # Options for extensions # ----------------------", "description file. html_use_opensearch = 'https://docs.python.org/' + version # Additional static", "epub_author = 'Python Documentation Authors' epub_publisher = 'Python Software Foundation'", "to all manuals. latex_appendices = ['glossary', 'about', 'license', 'copyright'] #", "LaTeX files. List of tuples # (source start file, target", "for HTML output # ----------------------- # Use our custom theme.", "for C source files for C API coverage, relative to", "any of the following regexes (using re.match). coverage_ignore_modules = [", "for HTML help builder. htmlhelp_basename = 'python' + release.replace('.', '')", "any .rst files in the venv/ directory. exclude_patterns = ['venv/*']", "document class [howto/manual]). _stdauthor = r'<NAME>\\\\and the Python development team'", "'Python Software Foundation' # Options for the coverage checker #", "= ['venv/*'] # Options for HTML output # ----------------------- #", "the current Python source tree # and replace the values", "= { 'download': 'download.html', 'index': 'indexcontent.html', } # Output an", "True # Options for LaTeX output # ------------------------ # The", "match any of the following regexes (using re.match). coverage_ignore_modules =", "+ version # Additional static files. html_static_path = ['tools/static'] #", "'faq.tex', 'Python Frequently Asked Questions', _stdauthor, 'manual'), ('whatsnew/' + version,", "every page bottom, # using the given strftime format. html_last_updated_fmt", "and Usage', _stdauthor, 'manual'), ('faq/index', 'faq.tex', 'Python Frequently Asked Questions',", "pickled, so don't put values in the namespace # that", "html_use_opensearch = 'https://docs.python.org/' + version # Additional static files. html_static_path", "[howto/manual]). _stdauthor = r'<NAME>\\\\and the Python development team' latex_documents =", "''} # Options for Epub output # ----------------------- epub_author =", "permanent redirects. r'http://www.python.org/dev/peps/pep-\\d+'] # Options for extensions # ---------------------- #", "namespace # that aren't pickleable (module imports are okay, they're", "not '', a 'Last updated on:' timestamp is inserted at", "all have permanent redirects. r'http://www.python.org/dev/peps/pep-\\d+'] # Options for extensions #", "highlight as Python 3. highlight_language = 'python3' # Require Sphinx", "these regexes # (using re.match) -- the keys must be", "this file. html_sidebars = { 'index': 'indexsidebar.html', } # Additional", "re.match). coverage_ignore_modules = [ r'[T|t][k|K]', r'Tix', r'distutils.*', ] coverage_ignore_functions =", "accordingly. import patchlevel version, release = patchlevel.get_version_info() # There are", "to some # non-false value, then it is used: today", "# ---------------------- # Relative filename of the reference count data", "('10pt', '11pt' or '12pt'). latex_font_size = '10pt' # Grouping the", "The paper size ('letter' or 'a4'). latex_paper_size = 'a4' #", "# Options for extensions # ---------------------- # Relative filename of", "# -------------------------------- # The coverage checker will ignore all modules/functions/classes", "Short title used e.g. for <title> HTML tags. html_short_title =", "'Python Setup and Usage', _stdauthor, 'manual'), ('faq/index', 'faq.tex', 'Python Frequently", "Grouping the document tree into LaTeX files. List of tuples", "= [ ] # Glob patterns for C source files", "coverage, relative to this directory. coverage_c_path = [ '../Include/*.h', ]", "= 'Python' copyright = '2001-%s, Python Software Foundation' % time.strftime('%Y')", "directory set to its containing dir. # # The contents", "'Distributing Python Modules', _stdauthor, 'manual'), ('extending/index', 'extending.tex', 'Extending and Embedding", "an OpenSearch description file. html_use_opensearch = 'https://docs.python.org/' + version #", "The contents of this file are pickled, so don't put", "tags. html_short_title = '%s Documentation' % release # If not", "('c-api/index', 'c-api.tex', 'The Python/C API', _stdauthor, 'manual'), ('distributing/index', 'distributing.tex', 'Distributing", "('extending/index', 'extending.tex', 'Extending and Embedding Python', _stdauthor, 'manual'), ('installing/index', 'installing.tex',", "file. html_sidebars = { 'index': 'indexsidebar.html', } # Additional templates", "# Options for the link checker # ---------------------------- # Ignore", "is used: today = '' # Else, today_fmt is used", "templates_path = ['tools/templates'] # Custom sidebar templates, filenames relative to", "templates that should be rendered to pages. html_additional_pages = {", "look for the Include/patchlevel.h file in the current Python source", "['tools'] html_theme_options = {'collapsiblesidebar': True} # Short title used e.g.", "= {'collapsiblesidebar': True} # Short title used e.g. for <title>", "Questions', _stdauthor, 'manual'), ('whatsnew/' + version, 'whatsnew.tex', 'What\\'s New in", "Regexes to find C items in the source files. coverage_c_regexes", "release.replace('.', '') # Split the index html_split_index = True #", "# that aren't pickleable (module imports are okay, they're removed", "'manual'), ('faq/index', 'faq.tex', 'Python Frequently Asked Questions', _stdauthor, 'manual'), ('whatsnew/'", "html_split_index = True # Options for LaTeX output # ------------------------", "the Include/patchlevel.h file in the current Python source tree #", "Unicode correctly latex_elements = {'inputenc': r'\\usepackage[utf8x]{inputenc}', 'utf8extra': ''} # Options", "the reference count data file. refcount_file = 'data/refcounts.dat' # Translation", "names # match any of the following regexes (using re.match).", "for the Include/patchlevel.h file in the current Python source tree", "copyright = '2001-%s, Python Software Foundation' % time.strftime('%Y') # We", "linkcheck_ignore = [r'https://bugs.python.org/(issue)?\\d+', # Ignore PEPs for now, they all", "items whose names match these regexes # (using re.match) --", "of the following regexes (using re.match). coverage_ignore_modules = [ r'[T|t][k|K]',", "size ('10pt', '11pt' or '12pt'). latex_font_size = '10pt' # Grouping", "(r'^PyAPI_FUNC\\(.*\\)\\s+([^_][\\w_]+)'), 'data': (r'^PyAPI_DATA\\(.*\\)\\s+([^_][\\w_]+)'), 'macro': (r'^#define ([^_][\\w_]+)\\(.*\\)[\\s|\\\\]'), } # The coverage", "_stdauthor, 'manual'), ('library/index', 'library.tex', 'The Python Library Reference', _stdauthor, 'manual'),", "# Require Sphinx 1.2 for build. needs_sphinx = '1.2' #", "Path to find HTML templates. templates_path = ['tools/templates'] # Custom", "'a4' # The font size ('10pt', '11pt' or '12pt'). latex_font_size", "the Python development team' latex_documents = [ ('c-api/index', 'c-api.tex', 'The", "are pickled, so don't put values in the namespace #", "Foundation}\\\\ Email: \\email{<EMAIL>} } \\let\\Verbatim=\\OriginalVerbatim \\let\\endVerbatim=\\endOriginalVerbatim ''' # Documents to", "([^_][\\w_]+)\\(.*\\)[\\s|\\\\]'), } # The coverage checker will ignore all C", "('distributing/index', 'distributing.tex', 'Distributing Python Modules', _stdauthor, 'manual'), ('extending/index', 'extending.tex', 'Extending", "# Custom sidebar templates, filenames relative to this file. html_sidebars", "\\let\\endVerbatim=\\endOriginalVerbatim ''' # Documents to append as an appendix to", "'What\\'s New in Python', '<NAME>', 'howto'), ] # Collect all", "= { 'index': 'indexsidebar.html', } # Additional templates that should", "'') # Split the index html_split_index = True # Options", "% time.strftime('%Y') # We look for the Include/patchlevel.h file in", "to append as an appendix to all manuals. latex_appendices =", "replace the values accordingly. import patchlevel version, release = patchlevel.get_version_info()", "= r'<NAME>\\\\and the Python development team' latex_documents = [ ('c-api/index',", "file. refcount_file = 'data/refcounts.dat' # Translation # ----------- gettext_compact =", "Output an OpenSearch description file. html_use_opensearch = 'https://docs.python.org/' + version", "they're removed automatically). import sys, os, time sys.path.append(os.path.abspath('tools/extensions')) # General", "a 'Last updated on:' timestamp is inserted at every page", "Language Reference', _stdauthor, 'manual'), ('tutorial/index', 'tutorial.tex', 'Python Tutorial', _stdauthor, 'manual'),", "# # Python documentation build configuration file # # This", "# Python documentation build configuration file # # This file", "that aren't pickleable (module imports are okay, they're removed automatically).", "fn[:-4], 'howto-' + fn[:-4] + '.tex', '', _stdauthor, 'howto') for", "manuals. latex_appendices = ['glossary', 'about', 'license', 'copyright'] # Get LaTeX", "or 'a4'). latex_paper_size = 'a4' # The font size ('10pt',", "the following regexes (using re.match). coverage_ignore_modules = [ r'[T|t][k|K]', r'Tix',", "# Glob patterns for C source files for C API", "'%s Documentation' % release # If not '', a 'Last", "['tools/templates'] # Custom sidebar templates, filenames relative to this file.", "okay, they're removed automatically). import sys, os, time sys.path.append(os.path.abspath('tools/extensions')) #", "= {'inputenc': r'\\usepackage[utf8x]{inputenc}', 'utf8extra': ''} # Options for Epub output", "paper size ('letter' or 'a4'). latex_paper_size = 'a4' # The", "items in the source files. coverage_c_regexes = { 'cfunction': (r'^PyAPI_FUNC\\(.*\\)\\s+([^_][\\w_]+)'),", "filenames relative to this file. html_sidebars = { 'index': 'indexsidebar.html',", "in os.listdir('howto') if fn.endswith('.rst') and fn != 'index.rst') # Additional", "values in the namespace # that aren't pickleable (module imports", "_stdauthor, 'manual'), ('tutorial/index', 'tutorial.tex', 'Python Tutorial', _stdauthor, 'manual'), ('using/index', 'using.tex',", "directory. coverage_c_path = [ '../Include/*.h', ] # Regexes to find", "automatically). import sys, os, time sys.path.append(os.path.abspath('tools/extensions')) # General configuration #", "= { 'cfunction': (r'^PyAPI_FUNC\\(.*\\)\\s+([^_][\\w_]+)'), 'data': (r'^PyAPI_DATA\\(.*\\)\\s+([^_][\\w_]+)'), 'macro': (r'^#define ([^_][\\w_]+)\\(.*\\)[\\s|\\\\]'), }", "API', _stdauthor, 'manual'), ('distributing/index', 'distributing.tex', 'Distributing Python Modules', _stdauthor, 'manual'),", "all manuals. latex_appendices = ['glossary', 'about', 'license', 'copyright'] # Get", "'data': (r'^PyAPI_DATA\\(.*\\)\\s+([^_][\\w_]+)'), 'macro': (r'^#define ([^_][\\w_]+)\\(.*\\)[\\s|\\\\]'), } # The coverage checker", "# 'cfunction': [...] } # Options for the link checker", "latex_elements = {'inputenc': r'\\usepackage[utf8x]{inputenc}', 'utf8extra': ''} # Options for Epub", "'howto'), ] # Collect all HOWTOs individually latex_documents.extend(('howto/' + fn[:-4],", "sys.path.append(os.path.abspath('tools/extensions')) # General configuration # --------------------- extensions = ['sphinx.ext.coverage', 'sphinx.ext.doctest',", "'distributing.tex', 'Distributing Python Modules', _stdauthor, 'manual'), ('extending/index', 'extending.tex', 'Extending and", "] coverage_ignore_classes = [ ] # Glob patterns for C", "Options for the link checker # ---------------------------- # Ignore certain", "{ # 'cfunction': [...] } # Options for the link", "build. needs_sphinx = '1.2' # Ignore any .rst files in", "is used as the format for a strftime call. today_fmt", "as the format for a strftime call. today_fmt = '%B", "names match these regexes # (using re.match) -- the keys", "of the reference count data file. refcount_file = 'data/refcounts.dat' #", "static files. html_static_path = ['tools/static'] # Output file base name", "'The Python Library Reference', _stdauthor, 'manual'), ('reference/index', 'reference.tex', 'The Python", "# The coverage checker will ignore all modules/functions/classes whose names", "build configuration file # # This file is execfile()d with", "the namespace # that aren't pickleable (module imports are okay,", "Reference', _stdauthor, 'manual'), ('reference/index', 'reference.tex', 'The Python Language Reference', _stdauthor,", "PEPs for now, they all have permanent redirects. r'http://www.python.org/dev/peps/pep-\\d+'] #", "# using the given strftime format. html_last_updated_fmt = '%b %d,", "append as an appendix to all manuals. latex_appendices = ['glossary',", "highlight_language = 'python3' # Require Sphinx 1.2 for build. needs_sphinx", "today_fmt = '%B %d, %Y' # By default, highlight as", "# Options for LaTeX output # ------------------------ # The paper", "+ release.replace('.', '') # Split the index html_split_index = True", "size ('letter' or 'a4'). latex_paper_size = 'a4' # The font", "'Python Tutorial', _stdauthor, 'manual'), ('using/index', 'using.tex', 'Python Setup and Usage',", "tuples # (source start file, target name, title, author, document", "time.strftime('%Y') # We look for the Include/patchlevel.h file in the", "----------------------- # Use our custom theme. html_theme = 'pydoctheme' html_theme_path", "{ 'index': 'indexsidebar.html', } # Additional templates that should be", "two options for replacing |today|: either, you set today to", "set to its containing dir. # # The contents of", "target name, title, author, document class [howto/manual]). _stdauthor = r'<NAME>\\\\and", "'', a 'Last updated on:' timestamp is inserted at every", "venv/ directory. exclude_patterns = ['venv/*'] # Options for HTML output", "# Split the index html_split_index = True # Options for", "given strftime format. html_last_updated_fmt = '%b %d, %Y' # Path", "'<NAME>', 'howto'), ] # Collect all HOWTOs individually latex_documents.extend(('howto/' +", "file are pickled, so don't put values in the namespace", "for Epub output # ----------------------- epub_author = 'Python Documentation Authors'", "] # Glob patterns for C source files for C", "If not '', a 'Last updated on:' timestamp is inserted", "= ['tools'] html_theme_options = {'collapsiblesidebar': True} # Short title used", "] # Regexes to find C items in the source", "must be the same as in coverage_c_regexes. coverage_ignore_c_items = {", "Usage', _stdauthor, 'manual'), ('faq/index', 'faq.tex', 'Python Frequently Asked Questions', _stdauthor,", "timestamp is inserted at every page bottom, # using the", "removed automatically). import sys, os, time sys.path.append(os.path.abspath('tools/extensions')) # General configuration", "rendered to pages. html_additional_pages = { 'download': 'download.html', 'index': 'indexcontent.html',", "Documentation Authors' epub_publisher = 'Python Software Foundation' # Options for", "_stdauthor, 'manual'), ('installing/index', 'installing.tex', 'Installing Python Modules', _stdauthor, 'manual'), ('library/index',", "patchlevel version, release = patchlevel.get_version_info() # There are two options", "the same as in coverage_c_regexes. coverage_ignore_c_items = { # 'cfunction':", "# ---------------------------- # Ignore certain URLs. linkcheck_ignore = [r'https://bugs.python.org/(issue)?\\d+', #", "'download.html', 'index': 'indexcontent.html', } # Output an OpenSearch description file.", "latex_appendices = ['glossary', 'about', 'license', 'copyright'] # Get LaTeX to", "os.listdir('howto') if fn.endswith('.rst') and fn != 'index.rst') # Additional stuff", "There are two options for replacing |today|: either, you set", "output # ------------------------ # The paper size ('letter' or 'a4').", "keys must be the same as in coverage_c_regexes. coverage_ignore_c_items =", "release # If not '', a 'Last updated on:' timestamp", "html_sidebars = { 'index': 'indexsidebar.html', } # Additional templates that", "= 'pydoctheme' html_theme_path = ['tools'] html_theme_options = {'collapsiblesidebar': True} #", "Additional stuff for the LaTeX preamble. latex_preamble = r''' \\authoraddress{", "following regexes (using re.match). coverage_ignore_modules = [ r'[T|t][k|K]', r'Tix', r'distutils.*',", "for extensions # ---------------------- # Relative filename of the reference", "'utf8extra': ''} # Options for Epub output # ----------------------- epub_author", "in the current Python source tree # and replace the", "used as the format for a strftime call. today_fmt =", "r''' \\authoraddress{ \\strong{Python Software Foundation}\\\\ Email: \\email{<EMAIL>} } \\let\\Verbatim=\\OriginalVerbatim \\let\\endVerbatim=\\endOriginalVerbatim", "(using re.match) -- the keys must be the same as", "Additional templates that should be rendered to pages. html_additional_pages =", "relative to this directory. coverage_c_path = [ '../Include/*.h', ] #", "[ r'[T|t][k|K]', r'Tix', r'distutils.*', ] coverage_ignore_functions = [ 'test($|_)', ]", "C source files for C API coverage, relative to this", "_stdauthor, 'manual'), ('extending/index', 'extending.tex', 'Extending and Embedding Python', _stdauthor, 'manual'),", "you set today to some # non-false value, then it", "it is used: today = '' # Else, today_fmt is", "to its containing dir. # # The contents of this", "Else, today_fmt is used as the format for a strftime", "html_theme = 'pydoctheme' html_theme_path = ['tools'] html_theme_options = {'collapsiblesidebar': True}", "inserted at every page bottom, # using the given strftime", "the values accordingly. import patchlevel version, release = patchlevel.get_version_info() #", "Python Library Reference', _stdauthor, 'manual'), ('reference/index', 'reference.tex', 'The Python Language", "individually latex_documents.extend(('howto/' + fn[:-4], 'howto-' + fn[:-4] + '.tex', '',", "('faq/index', 'faq.tex', 'Python Frequently Asked Questions', _stdauthor, 'manual'), ('whatsnew/' +", "# If not '', a 'Last updated on:' timestamp is", "refcount_file = 'data/refcounts.dat' # Translation # ----------- gettext_compact = False", "for fn in os.listdir('howto') if fn.endswith('.rst') and fn != 'index.rst')", "This file is execfile()d with the current directory set to", "count data file. refcount_file = 'data/refcounts.dat' # Translation # -----------", "an appendix to all manuals. latex_appendices = ['glossary', 'about', 'license',", "htmlhelp_basename = 'python' + release.replace('.', '') # Split the index", "# Use our custom theme. html_theme = 'pydoctheme' html_theme_path =", "coverage_c_regexes. coverage_ignore_c_items = { # 'cfunction': [...] } # Options", "coverage checker will ignore all modules/functions/classes whose names # match", "today = '' # Else, today_fmt is used as the", "Python development team' latex_documents = [ ('c-api/index', 'c-api.tex', 'The Python/C", "coverage_ignore_c_items = { # 'cfunction': [...] } # Options for", "all modules/functions/classes whose names # match any of the following", "= [ '../Include/*.h', ] # Regexes to find C items", "# Get LaTeX to handle Unicode correctly latex_elements = {'inputenc':", "Require Sphinx 1.2 for build. needs_sphinx = '1.2' # Ignore", "'a4'). latex_paper_size = 'a4' # The font size ('10pt', '11pt'", "class [howto/manual]). _stdauthor = r'<NAME>\\\\and the Python development team' latex_documents", "'Python' copyright = '2001-%s, Python Software Foundation' % time.strftime('%Y') #", "'manual'), ('distributing/index', 'distributing.tex', 'Distributing Python Modules', _stdauthor, 'manual'), ('extending/index', 'extending.tex',", "the coverage checker # -------------------------------- # The coverage checker will", "Options for extensions # ---------------------- # Relative filename of the", "General configuration # --------------------- extensions = ['sphinx.ext.coverage', 'sphinx.ext.doctest', 'pyspecific', 'c_annotations']", "templates, filenames relative to this file. html_sidebars = { 'index':", "= ['glossary', 'about', 'license', 'copyright'] # Get LaTeX to handle", "# match any of the following regexes (using re.match). coverage_ignore_modules", "version, release = patchlevel.get_version_info() # There are two options for", "# ----------------------- # Use our custom theme. html_theme = 'pydoctheme'", "as an appendix to all manuals. latex_appendices = ['glossary', 'about',", "Foundation' % time.strftime('%Y') # We look for the Include/patchlevel.h file", "fn.endswith('.rst') and fn != 'index.rst') # Additional stuff for the", "r'[T|t][k|K]', r'Tix', r'distutils.*', ] coverage_ignore_functions = [ 'test($|_)', ] coverage_ignore_classes", "('tutorial/index', 'tutorial.tex', 'Python Tutorial', _stdauthor, 'manual'), ('using/index', 'using.tex', 'Python Setup", "The coverage checker will ignore all modules/functions/classes whose names #", "to find HTML templates. templates_path = ['tools/templates'] # Custom sidebar", "Get LaTeX to handle Unicode correctly latex_elements = {'inputenc': r'\\usepackage[utf8x]{inputenc}',", "don't put values in the namespace # that aren't pickleable", "_stdauthor, 'manual'), ('distributing/index', 'distributing.tex', 'Distributing Python Modules', _stdauthor, 'manual'), ('extending/index',", "(r'^PyAPI_DATA\\(.*\\)\\s+([^_][\\w_]+)'), 'macro': (r'^#define ([^_][\\w_]+)\\(.*\\)[\\s|\\\\]'), } # The coverage checker will", "aren't pickleable (module imports are okay, they're removed automatically). import", "on:' timestamp is inserted at every page bottom, # using", "ignore all modules/functions/classes whose names # match any of the", "modules/functions/classes whose names # match any of the following regexes", "'Python Frequently Asked Questions', _stdauthor, 'manual'), ('whatsnew/' + version, 'whatsnew.tex',", "# The paper size ('letter' or 'a4'). latex_paper_size = 'a4'", "<title> HTML tags. html_short_title = '%s Documentation' % release #", "time sys.path.append(os.path.abspath('tools/extensions')) # General configuration # --------------------- extensions = ['sphinx.ext.coverage',", "%d, %Y' # By default, highlight as Python 3. highlight_language", "= 'python3' # Require Sphinx 1.2 for build. needs_sphinx =", "preamble. latex_preamble = r''' \\authoraddress{ \\strong{Python Software Foundation}\\\\ Email: \\email{<EMAIL>}", "import patchlevel version, release = patchlevel.get_version_info() # There are two", "% release # If not '', a 'Last updated on:'", "for now, they all have permanent redirects. r'http://www.python.org/dev/peps/pep-\\d+'] # Options", "needs_sphinx = '1.2' # Ignore any .rst files in the", "------------------------ # The paper size ('letter' or 'a4'). latex_paper_size =", "either, you set today to some # non-false value, then", "to handle Unicode correctly latex_elements = {'inputenc': r'\\usepackage[utf8x]{inputenc}', 'utf8extra': ''}", "coverage checker # -------------------------------- # The coverage checker will ignore", "---------------------- # Relative filename of the reference count data file.", "dir. # # The contents of this file are pickled,", "%Y' # By default, highlight as Python 3. highlight_language =", "# non-false value, then it is used: today = ''", "= [ ('c-api/index', 'c-api.tex', 'The Python/C API', _stdauthor, 'manual'), ('distributing/index',", "----------------------- epub_author = 'Python Documentation Authors' epub_publisher = 'Python Software", "checker will ignore all modules/functions/classes whose names # match any", "# # The contents of this file are pickled, so", "contents of this file are pickled, so don't put values", "= 'python' + release.replace('.', '') # Split the index html_split_index", "'', _stdauthor, 'howto') for fn in os.listdir('howto') if fn.endswith('.rst') and", "Epub output # ----------------------- epub_author = 'Python Documentation Authors' epub_publisher", "'download': 'download.html', 'index': 'indexcontent.html', } # Output an OpenSearch description", "coverage_c_path = [ '../Include/*.h', ] # Regexes to find C", "'Python Documentation Authors' epub_publisher = 'Python Software Foundation' # Options", "'%b %d, %Y' # Path to find HTML templates. templates_path", "a strftime call. today_fmt = '%B %d, %Y' # By", "for LaTeX output # ------------------------ # The paper size ('letter'", "Modules', _stdauthor, 'manual'), ('library/index', 'library.tex', 'The Python Library Reference', _stdauthor,", "('reference/index', 'reference.tex', 'The Python Language Reference', _stdauthor, 'manual'), ('tutorial/index', 'tutorial.tex',", "= '%s Documentation' % release # If not '', a", "title, author, document class [howto/manual]). _stdauthor = r'<NAME>\\\\and the Python", "# Else, today_fmt is used as the format for a", "# Translation # ----------- gettext_compact = False locale_dirs = [\"locale\"]", "= ['tools/static'] # Output file base name for HTML help", "['venv/*'] # Options for HTML output # ----------------------- # Use", "to find C items in the source files. coverage_c_regexes =", "pages. html_additional_pages = { 'download': 'download.html', 'index': 'indexcontent.html', } #", "r'<NAME>\\\\and the Python development team' latex_documents = [ ('c-api/index', 'c-api.tex',", "today to some # non-false value, then it is used:", "'10pt' # Grouping the document tree into LaTeX files. List", "file, target name, title, author, document class [howto/manual]). _stdauthor =", "regexes # (using re.match) -- the keys must be the", "relative to this file. html_sidebars = { 'index': 'indexsidebar.html', }", "'11pt' or '12pt'). latex_font_size = '10pt' # Grouping the document", "Ignore certain URLs. linkcheck_ignore = [r'https://bugs.python.org/(issue)?\\d+', # Ignore PEPs for", "# The font size ('10pt', '11pt' or '12pt'). latex_font_size =", "'license', 'copyright'] # Get LaTeX to handle Unicode correctly latex_elements", "] coverage_ignore_functions = [ 'test($|_)', ] coverage_ignore_classes = [ ]", "files. List of tuples # (source start file, target name,", "'index.rst') # Additional stuff for the LaTeX preamble. latex_preamble =", "filename of the reference count data file. refcount_file = 'data/refcounts.dat'", "some # non-false value, then it is used: today =", "bottom, # using the given strftime format. html_last_updated_fmt = '%b", "version # Additional static files. html_static_path = ['tools/static'] # Output", "# Documents to append as an appendix to all manuals.", "to this directory. coverage_c_path = [ '../Include/*.h', ] # Regexes", "for the LaTeX preamble. latex_preamble = r''' \\authoraddress{ \\strong{Python Software", "extensions = ['sphinx.ext.coverage', 'sphinx.ext.doctest', 'pyspecific', 'c_annotations'] # General substitutions. project", "strftime call. today_fmt = '%B %d, %Y' # By default,", "# General configuration # --------------------- extensions = ['sphinx.ext.coverage', 'sphinx.ext.doctest', 'pyspecific',", "# Output file base name for HTML help builder. htmlhelp_basename", "['glossary', 'about', 'license', 'copyright'] # Get LaTeX to handle Unicode" ]
[ "0: print('skewness = 0, therefore sample is normally distributed') else:", "+ str(mu)) return mu def sd(my_list): j = 0 sigma", "np def mean(my_list): # This is the defintion in the", "- mu)**3 + sumsk m +=1 skew = sumsk /(len(my_list)*sigma**3)", "return False def is_skew(my_list): m = 0 skew = 0", "is_skew(my_list): m = 0 skew = 0 sumsk = 0", "= norm(my_list) if 0.66 < dist < 0.70: print('Data is", "= 0 my_sum = 0 for number in my_list: my_sum", "distributed') def graph(my_list): plt.hist(my_list,density=True, facecolor='b') sigma = sd(my_list) #stores standard", "4*sigma, mu + 4*sigma) plt.grid(True) plt.show() def stats(my_list): mu =", "sample is not normally distributed') def graph(my_list): plt.hist(my_list,density=True, facecolor='b') sigma", "l = 0 mu = mean(my_list) sigma = sd(my_list) for", "mean(my_list) #stores mean plt.title('my_list Histogram') plt.xlabel('Number') plt.ylabel('Probability') plt.xlim(mu - 4*sigma,", "= 0 my_sumsd = 0 mu = mean(my_list) for number", "m = 0 skew = 0 sumsk = 0 mu", "mean plt.title('my_list Histogram') plt.xlabel('Number') plt.ylabel('Probability') plt.xlim(mu - 4*sigma, mu +", "= 0 sumsk = 0 mu = mean(my_list) sigma =", "not normally distributed') def graph(my_list): plt.hist(my_list,density=True, facecolor='b') sigma = sd(my_list)", "in my_list: my_sum = my_sum + my_list[i] i+=1 mu =", "sigma: k += 1 l += 1 else: l +=", "os import * # Import Numpy import numpy as np", "dist = k / l return dist def is_norm(my_list): dist", "print('Data is not normally distributed') return False def is_skew(my_list): m", "is normally distributed') else: print('skewness =/= 0, therefore sample is", "sd(my_list) for number in my_list: if abs(my_list[l] - mu) <", "i = 0 my_sum = 0 for number in my_list:", "= 0 mu = mean(my_list) for number in my_list: my_sumsd", "mu = mean(my_list) sigma = sd(my_list) for numbers in my_list:", "' + str(skew)) if skew == 0: print('skewness = 0,", "my_list[i] i+=1 mu = my_sum / i print('mean = '", "mu = mean(my_list) sigma = sd(my_list) for number in my_list:", "k / l return dist def is_norm(my_list): dist = norm(my_list)", "return sigma def norm(my_list): k = 0 l = 0", "from os import * # Import Numpy import numpy as", "Histogram') plt.xlabel('Number') plt.ylabel('Probability') plt.xlim(mu - 4*sigma, mu + 4*sigma) plt.grid(True)", "mu = my_sum / i print('mean = ' + str(mu))", "my_sum = 0 for number in my_list: my_sum = my_sum", "< sigma: k += 1 l += 1 else: l", "plt.xlabel('Number') plt.ylabel('Probability') plt.xlim(mu - 4*sigma, mu + 4*sigma) plt.grid(True) plt.show()", "plt.ylabel('Probability') plt.xlim(mu - 4*sigma, mu + 4*sigma) plt.grid(True) plt.show() def", "the head. i = 0 my_sum = 0 for number", "sigma = (my_sumsd/j)**(.5) print('standard deviation = ' + str(sigma)) return", "sigma def norm(my_list): k = 0 l = 0 mu", "plt.title('my_list Histogram') plt.xlabel('Number') plt.ylabel('Probability') plt.xlim(mu - 4*sigma, mu + 4*sigma)", "(my_sumsd/j)**(.5) print('standard deviation = ' + str(sigma)) return sigma def", "dist < 0.70: print('Data is normally distributed') return True else:", "plotting library import matplotlib.pyplot as plt #import.... from os import", "as plt #import.... from os import * # Import Numpy", "sd(my_list) #stores standard deviation mu = mean(my_list) #stores mean plt.title('my_list", "therefore sample is not normally distributed') def graph(my_list): plt.hist(my_list,density=True, facecolor='b')", "str(mu)) return mu def sd(my_list): j = 0 sigma =", "dist def is_norm(my_list): dist = norm(my_list) if 0.66 < dist", "0, therefore sample is normally distributed') else: print('skewness =/= 0,", "skew = 0 sumsk = 0 mu = mean(my_list) sigma", "normally distributed') return True else: print('Data is not normally distributed')", "# Import plotting library import matplotlib.pyplot as plt #import.... from", "0 skew = 0 sumsk = 0 mu = mean(my_list)", "plt.hist(my_list,density=True, facecolor='b') sigma = sd(my_list) #stores standard deviation mu =", "import numpy as np def mean(my_list): # This is the", "numpy as np def mean(my_list): # This is the defintion", "= 0 sigma = 0 my_sumsd = 0 mu =", "std = sd(my_list) dist = norm(my_list) graph(my_list) is_norm(my_list) is_skew(my_list) return", "/(len(my_list)*sigma**3) print('skewness = ' + str(skew)) if skew == 0:", "0 my_sum = 0 for number in my_list: my_sum =", "return True else: print('Data is not normally distributed') return False", "should be used. # Import plotting library import matplotlib.pyplot as", "= mean(my_list) std = sd(my_list) dist = norm(my_list) graph(my_list) is_norm(my_list)", "k = 0 l = 0 mu = mean(my_list) sigma", "= (my_list[m] - mu)**3 + sumsk m +=1 skew =", "sd(my_list) dist = norm(my_list) graph(my_list) is_norm(my_list) is_skew(my_list) return (mu, std,", "= mean(my_list) for number in my_list: my_sumsd = my_sumsd +", "be used. # Import plotting library import matplotlib.pyplot as plt", "0.66 < dist < 0.70: print('Data is normally distributed') return", "def graph(my_list): plt.hist(my_list,density=True, facecolor='b') sigma = sd(my_list) #stores standard deviation", "number in my_list: my_sum = my_sum + my_list[i] i+=1 mu", "in my_list: my_sumsd = my_sumsd + (my_list[j] - mu)**2 j", "str(skew)) if skew == 0: print('skewness = 0, therefore sample", "+= 1 dist = k / l return dist def", "= sd(my_list) for number in my_list: if abs(my_list[l] - mu)", "norm(my_list) if 0.66 < dist < 0.70: print('Data is normally", "== 0: print('skewness = 0, therefore sample is normally distributed')", "import * # Import Numpy import numpy as np def", "- 4*sigma, mu + 4*sigma) plt.grid(True) plt.show() def stats(my_list): mu", "1 l += 1 else: l += 1 dist =", "sigma = sd(my_list) for numbers in my_list: sumsk = (my_list[m]", "distributed') else: print('skewness =/= 0, therefore sample is not normally", "< dist < 0.70: print('Data is normally distributed') return True", "= 0, therefore sample is normally distributed') else: print('skewness =/=", "my_list: if abs(my_list[l] - mu) < sigma: k += 1", "= k / l return dist def is_norm(my_list): dist =", "< 0.70: print('Data is normally distributed') return True else: print('Data", "plt.grid(True) plt.show() def stats(my_list): mu = mean(my_list) std = sd(my_list)", "number in my_list: if abs(my_list[l] - mu) < sigma: k", "mean(my_list) std = sd(my_list) dist = norm(my_list) graph(my_list) is_norm(my_list) is_skew(my_list)", "1 else: l += 1 dist = k / l", "This is the defintion in the head. i = 0", "mu + 4*sigma) plt.grid(True) plt.show() def stats(my_list): mu = mean(my_list)", "mean(my_list) sigma = sd(my_list) for number in my_list: if abs(my_list[l]", "l += 1 else: l += 1 dist = k", "my_sumsd = 0 mu = mean(my_list) for number in my_list:", "module here. No other modules should be used. # Import", "def is_norm(my_list): dist = norm(my_list) if 0.66 < dist <", "True else: print('Data is not normally distributed') return False def", "0 mu = mean(my_list) sigma = sd(my_list) for numbers in", "sd(my_list) for numbers in my_list: sumsk = (my_list[m] - mu)**3", "sumsk = 0 mu = mean(my_list) sigma = sd(my_list) for", "sumsk = (my_list[m] - mu)**3 + sumsk m +=1 skew", "= my_sumsd + (my_list[j] - mu)**2 j +=1 sigma =", "+ sumsk m +=1 skew = sumsk /(len(my_list)*sigma**3) print('skewness =", "0 sigma = 0 my_sumsd = 0 mu = mean(my_list)", "sample is normally distributed') else: print('skewness =/= 0, therefore sample", "is the defintion in the head. i = 0 my_sum", "= mean(my_list) sigma = sd(my_list) for number in my_list: if", "plt.xlim(mu - 4*sigma, mu + 4*sigma) plt.grid(True) plt.show() def stats(my_list):", "mean(my_list): # This is the defintion in the head. i", "my_sum / i print('mean = ' + str(mu)) return mu", "modules should be used. # Import plotting library import matplotlib.pyplot", "0 for number in my_list: my_sum = my_sum + my_list[i]", "print('mean = ' + str(mu)) return mu def sd(my_list): j", "= my_sum + my_list[i] i+=1 mu = my_sum / i", "#stores mean plt.title('my_list Histogram') plt.xlabel('Number') plt.ylabel('Probability') plt.xlim(mu - 4*sigma, mu", "is not normally distributed') return False def is_skew(my_list): m =", "my_sum = my_sum + my_list[i] i+=1 mu = my_sum /", "skew == 0: print('skewness = 0, therefore sample is normally", "deviation mu = mean(my_list) #stores mean plt.title('my_list Histogram') plt.xlabel('Number') plt.ylabel('Probability')", "+= 1 else: l += 1 dist = k /", "#import.... from os import * # Import Numpy import numpy", "= mean(my_list) #stores mean plt.title('my_list Histogram') plt.xlabel('Number') plt.ylabel('Probability') plt.xlim(mu -", "' + str(mu)) return mu def sd(my_list): j = 0", "is_norm(my_list): dist = norm(my_list) if 0.66 < dist < 0.70:", "sigma = 0 my_sumsd = 0 mu = mean(my_list) for", "graph(my_list): plt.hist(my_list,density=True, facecolor='b') sigma = sd(my_list) #stores standard deviation mu", "standard deviation mu = mean(my_list) #stores mean plt.title('my_list Histogram') plt.xlabel('Number')", "mu) < sigma: k += 1 l += 1 else:", "Numpy import numpy as np def mean(my_list): # This is", "else: print('skewness =/= 0, therefore sample is not normally distributed')", "defintion in the head. i = 0 my_sum = 0", "my_sumsd = my_sumsd + (my_list[j] - mu)**2 j +=1 sigma", "for number in my_list: if abs(my_list[l] - mu) < sigma:", "<filename>basic_stats.py/basic_stats.py # Import the matplotlib module here. No other modules", "+ 4*sigma) plt.grid(True) plt.show() def stats(my_list): mu = mean(my_list) std", "here. No other modules should be used. # Import plotting", "stats(my_list): mu = mean(my_list) std = sd(my_list) dist = norm(my_list)", "Import Numpy import numpy as np def mean(my_list): # This", "facecolor='b') sigma = sd(my_list) #stores standard deviation mu = mean(my_list)", "+ my_list[i] i+=1 mu = my_sum / i print('mean =", "for number in my_list: my_sumsd = my_sumsd + (my_list[j] -", "therefore sample is normally distributed') else: print('skewness =/= 0, therefore", "mu = mean(my_list) std = sd(my_list) dist = norm(my_list) graph(my_list)", "print('Data is normally distributed') return True else: print('Data is not", "def norm(my_list): k = 0 l = 0 mu =", "the matplotlib module here. No other modules should be used.", "not normally distributed') return False def is_skew(my_list): m = 0", "mu)**3 + sumsk m +=1 skew = sumsk /(len(my_list)*sigma**3) print('skewness", "def mean(my_list): # This is the defintion in the head.", "mu def sd(my_list): j = 0 sigma = 0 my_sumsd", "j = 0 sigma = 0 my_sumsd = 0 mu", "- mu) < sigma: k += 1 l += 1", "in my_list: if abs(my_list[l] - mu) < sigma: k +=", "# This is the defintion in the head. i =", "in the head. i = 0 my_sum = 0 for", "for number in my_list: my_sum = my_sum + my_list[i] i+=1", "l return dist def is_norm(my_list): dist = norm(my_list) if 0.66", "plt #import.... from os import * # Import Numpy import", "print('standard deviation = ' + str(sigma)) return sigma def norm(my_list):", "#stores standard deviation mu = mean(my_list) #stores mean plt.title('my_list Histogram')", "0 mu = mean(my_list) sigma = sd(my_list) for number in", "the defintion in the head. i = 0 my_sum =", "+=1 skew = sumsk /(len(my_list)*sigma**3) print('skewness = ' + str(skew))", "= sd(my_list) dist = norm(my_list) graph(my_list) is_norm(my_list) is_skew(my_list) return (mu,", "= 0 l = 0 mu = mean(my_list) sigma =", "for numbers in my_list: sumsk = (my_list[m] - mu)**3 +", "mean(my_list) sigma = sd(my_list) for numbers in my_list: sumsk =", "distributed') return True else: print('Data is not normally distributed') return", "normally distributed') def graph(my_list): plt.hist(my_list,density=True, facecolor='b') sigma = sd(my_list) #stores", "library import matplotlib.pyplot as plt #import.... from os import *", "- mu)**2 j +=1 sigma = (my_sumsd/j)**(.5) print('standard deviation =", "str(sigma)) return sigma def norm(my_list): k = 0 l =", "def sd(my_list): j = 0 sigma = 0 my_sumsd =", "distributed') return False def is_skew(my_list): m = 0 skew =", "0 l = 0 mu = mean(my_list) sigma = sd(my_list)", "1 dist = k / l return dist def is_norm(my_list):", "No other modules should be used. # Import plotting library", "=/= 0, therefore sample is not normally distributed') def graph(my_list):", "import matplotlib.pyplot as plt #import.... from os import * #", "j +=1 sigma = (my_sumsd/j)**(.5) print('standard deviation = ' +", "normally distributed') return False def is_skew(my_list): m = 0 skew", "as np def mean(my_list): # This is the defintion in", "0 mu = mean(my_list) for number in my_list: my_sumsd =", "= ' + str(mu)) return mu def sd(my_list): j =", "mean(my_list) for number in my_list: my_sumsd = my_sumsd + (my_list[j]", "else: print('Data is not normally distributed') return False def is_skew(my_list):", "= 0 for number in my_list: my_sum = my_sum +", "= 0 skew = 0 sumsk = 0 mu =", "if skew == 0: print('skewness = 0, therefore sample is", "sumsk /(len(my_list)*sigma**3) print('skewness = ' + str(skew)) if skew ==", "sumsk m +=1 skew = sumsk /(len(my_list)*sigma**3) print('skewness = '", "False def is_skew(my_list): m = 0 skew = 0 sumsk", "= 0 mu = mean(my_list) sigma = sd(my_list) for number", "mu = mean(my_list) #stores mean plt.title('my_list Histogram') plt.xlabel('Number') plt.ylabel('Probability') plt.xlim(mu", "number in my_list: my_sumsd = my_sumsd + (my_list[j] - mu)**2", "is not normally distributed') def graph(my_list): plt.hist(my_list,density=True, facecolor='b') sigma =", "* # Import Numpy import numpy as np def mean(my_list):", "mu)**2 j +=1 sigma = (my_sumsd/j)**(.5) print('standard deviation = '", "head. i = 0 my_sum = 0 for number in", "= ' + str(sigma)) return sigma def norm(my_list): k =", "plt.show() def stats(my_list): mu = mean(my_list) std = sd(my_list) dist", "is normally distributed') return True else: print('Data is not normally", "my_sum + my_list[i] i+=1 mu = my_sum / i print('mean", "dist = norm(my_list) graph(my_list) is_norm(my_list) is_skew(my_list) return (mu, std, dist)", "dist = norm(my_list) if 0.66 < dist < 0.70: print('Data", "(my_list[m] - mu)**3 + sumsk m +=1 skew = sumsk", "= mean(my_list) sigma = sd(my_list) for numbers in my_list: sumsk", "m +=1 skew = sumsk /(len(my_list)*sigma**3) print('skewness = ' +", "my_list: sumsk = (my_list[m] - mu)**3 + sumsk m +=1", "+ (my_list[j] - mu)**2 j +=1 sigma = (my_sumsd/j)**(.5) print('standard", "+ str(skew)) if skew == 0: print('skewness = 0, therefore", "my_list: my_sumsd = my_sumsd + (my_list[j] - mu)**2 j +=1", "return dist def is_norm(my_list): dist = norm(my_list) if 0.66 <", "abs(my_list[l] - mu) < sigma: k += 1 l +=", "= (my_sumsd/j)**(.5) print('standard deviation = ' + str(sigma)) return sigma", "used. # Import plotting library import matplotlib.pyplot as plt #import....", "other modules should be used. # Import plotting library import", "Import plotting library import matplotlib.pyplot as plt #import.... from os", "def is_skew(my_list): m = 0 skew = 0 sumsk =", "k += 1 l += 1 else: l += 1", "/ l return dist def is_norm(my_list): dist = norm(my_list) if", "def stats(my_list): mu = mean(my_list) std = sd(my_list) dist =", "+ str(sigma)) return sigma def norm(my_list): k = 0 l", "my_sumsd + (my_list[j] - mu)**2 j +=1 sigma = (my_sumsd/j)**(.5)", "my_list: my_sum = my_sum + my_list[i] i+=1 mu = my_sum", "deviation = ' + str(sigma)) return sigma def norm(my_list): k", "= ' + str(skew)) if skew == 0: print('skewness =", "4*sigma) plt.grid(True) plt.show() def stats(my_list): mu = mean(my_list) std =", "# Import the matplotlib module here. No other modules should", "+= 1 l += 1 else: l += 1 dist", "matplotlib module here. No other modules should be used. #", "0 sumsk = 0 mu = mean(my_list) sigma = sd(my_list)", "= my_sum / i print('mean = ' + str(mu)) return", "mu = mean(my_list) for number in my_list: my_sumsd = my_sumsd", "matplotlib.pyplot as plt #import.... from os import * # Import", "' + str(sigma)) return sigma def norm(my_list): k = 0", "= 0 mu = mean(my_list) sigma = sd(my_list) for numbers", "= sd(my_list) for numbers in my_list: sumsk = (my_list[m] -", "print('skewness = ' + str(skew)) if skew == 0: print('skewness", "print('skewness =/= 0, therefore sample is not normally distributed') def", "0 my_sumsd = 0 mu = mean(my_list) for number in", "if abs(my_list[l] - mu) < sigma: k += 1 l", "else: l += 1 dist = k / l return", "sigma = sd(my_list) #stores standard deviation mu = mean(my_list) #stores", "0.70: print('Data is normally distributed') return True else: print('Data is", "# Import Numpy import numpy as np def mean(my_list): #", "normally distributed') else: print('skewness =/= 0, therefore sample is not", "0, therefore sample is not normally distributed') def graph(my_list): plt.hist(my_list,density=True,", "+=1 sigma = (my_sumsd/j)**(.5) print('standard deviation = ' + str(sigma))", "return mu def sd(my_list): j = 0 sigma = 0", "i print('mean = ' + str(mu)) return mu def sd(my_list):", "sigma = sd(my_list) for number in my_list: if abs(my_list[l] -", "/ i print('mean = ' + str(mu)) return mu def", "in my_list: sumsk = (my_list[m] - mu)**3 + sumsk m", "(my_list[j] - mu)**2 j +=1 sigma = (my_sumsd/j)**(.5) print('standard deviation", "skew = sumsk /(len(my_list)*sigma**3) print('skewness = ' + str(skew)) if", "sd(my_list): j = 0 sigma = 0 my_sumsd = 0", "norm(my_list): k = 0 l = 0 mu = mean(my_list)", "numbers in my_list: sumsk = (my_list[m] - mu)**3 + sumsk", "if 0.66 < dist < 0.70: print('Data is normally distributed')", "l += 1 dist = k / l return dist", "print('skewness = 0, therefore sample is normally distributed') else: print('skewness", "i+=1 mu = my_sum / i print('mean = ' +", "Import the matplotlib module here. No other modules should be", "= sumsk /(len(my_list)*sigma**3) print('skewness = ' + str(skew)) if skew", "= sd(my_list) #stores standard deviation mu = mean(my_list) #stores mean" ]
[ "raise RuntimeError('Found multiple forthcoming sections') forthcoming_label = title_text return forthcoming_label", "(%s)' % (new_version, datetime.date.today().isoformat()) for (pkg_name, changelog_path, changelog, forthcoming_label) in", "new_changelog_data = [] new_label = '%s (%s)' % (new_version, datetime.date.today().isoformat())", "not packages: raise RuntimeError('No packages found') print('Found packages: %s' %", "(default: %(default)s)') args = parser.parse_args(sysargs) base_path = '.' # find", "\"\"\"This script renames the forthcoming section in changelog files with", "continue # check that new_version section does not exist yet", "def rename_section(data, old_label, new_label): valid_section_characters = '!\"#$%&\\'()*+,-./:;<=>?@[\\\\]^_`{|}~' def replace_section(match): section_char", "import docutils.core import os import re import sys from catkin_pkg.changelog", "in document.children: title = None if isinstance(child, docutils.nodes.subtitle): title =", "%(default)s)') args = parser.parse_args(sysargs) base_path = '.' # find packages", "'.' # find packages packages = find_packages(base_path) if not packages:", "bump_version from catkin_pkg.packages import find_packages, verify_equal_package_versions def get_forthcoming_label(rst): document =", "if FORTHCOMING_LABEL.lower() in title_text.lower(): if forthcoming_label: raise RuntimeError('Found multiple forthcoming", "default='patch', help='Which part of the version number to bump? (default:", "% (new_version, datetime.date.today().isoformat()) for (pkg_name, changelog_path, changelog, forthcoming_label) in changelogs:", "forthcoming section in changelog files with the upcoming version and", "from catkin_pkg.changelog_generator import FORTHCOMING_LABEL from catkin_pkg.package_version import bump_version from catkin_pkg.packages", "missing_forthcoming = [] already_tagged = [] for pkg_path, package in", "packages have same version number old_version = verify_equal_package_versions(packages.values()) new_version =", "'!\"#$%&\\'()*+,-./:;<=>?@[\\\\]^_`{|}~' def replace_section(match): section_char = match.group(2)[0] return new_label + '\\n'", "forthcoming_label = get_forthcoming_label(changelog.rst) if not forthcoming_label: missing_forthcoming.append(package.name) continue # check", "new_label + '\\n' + section_char * len(new_label) pattern = '^('", "if not os.path.exists(changelog_path): missing_forthcoming.append(package.name) continue changelog = get_changelog_from_path(changelog_path, package.name) if", "pass changelogs.append((package.name, changelog_path, changelog, forthcoming_label)) if missing_forthcoming: print('The following packages", "= [] already_tagged = [] for pkg_path, package in packages.items():", "[] for pkg_path, package in packages.items(): changelog_path = os.path.join(base_path, pkg_path,", "'%s' to '%s' in package '%s'...\" % (forthcoming_label, new_label, pkg_name))", "FORTHCOMING_LABEL.lower() in title_text.lower(): if forthcoming_label: raise RuntimeError('Found multiple forthcoming sections')", "changelog file: %s' % ', '.join(sorted(missing_forthcoming)), file=sys.stderr) if already_tagged: print(\"The", "> 0 and isinstance(section.children[0], docutils.nodes.title): title = section.children[0] if title", "> 1: raise RuntimeError('Found multiple matching sections') return data def", "current date\"\"\" from __future__ import print_function import argparse import datetime", "find_packages, verify_equal_package_versions def get_forthcoming_label(rst): document = docutils.core.publish_doctree(rst) forthcoming_label = None", "document.children: title = None if isinstance(child, docutils.nodes.subtitle): title = child", "for (pkg_name, changelog_path, changelog, forthcoming_label) in changelogs: print(\"Renaming section '%s'", "with the upcoming version and the current date\"\"\" from __future__", "datetime.date.today().isoformat()) for (pkg_name, changelog_path, changelog, forthcoming_label) in changelogs: print(\"Renaming section", "packages packages = find_packages(base_path) if not packages: raise RuntimeError('No packages", "% ', '.join([p.name for p in packages.values()])) # fetch current", "changelogs = [] missing_forthcoming = [] already_tagged = [] for", "packages = find_packages(base_path) if not packages: raise RuntimeError('No packages found')", "forthcoming_label)) if missing_forthcoming: print('The following packages do not have a", "version number to bump? (default: %(default)s)') args = parser.parse_args(sysargs) base_path", "file=sys.stderr) # rename forthcoming sections to new_version including current date", "the current date\"\"\" from __future__ import print_function import argparse import", "import re import sys from catkin_pkg.changelog import CHANGELOG_FILENAME, get_changelog_from_path from", "% (forthcoming_label, new_label, pkg_name)) data = rename_section(changelog.rst, forthcoming_label, new_label) new_changelog_data.append((changelog_path,", "version number') parser.add_argument('--bump', choices=('major', 'minor', 'patch'), default='patch', help='Which part of", "= '!\"#$%&\\'()*+,-./:;<=>?@[\\\\]^_`{|}~' def replace_section(match): section_char = match.group(2)[0] return new_label +", "find packages packages = find_packages(base_path) if not packages: raise RuntimeError('No", "already_tagged = [] for pkg_path, package in packages.items(): changelog_path =", "old_label, new_label): valid_section_characters = '!\"#$%&\\'()*+,-./:;<=>?@[\\\\]^_`{|}~' def replace_section(match): section_char = match.group(2)[0]", "import datetime import docutils.core import os import re import sys", "not exist yet try: changelog.get_content_of_version(new_version) already_tagged.append(package.name) continue except KeyError: pass", "(changelog_path, data) in new_changelog_data: with open(changelog_path, 'wb') as f: f.write(data.encode('utf-8'))", "print('Writing updated changelog files...') for (changelog_path, data) in new_changelog_data: with", "find section') if count > 1: raise RuntimeError('Found multiple matching", "get_changelog_from_path from catkin_pkg.changelog_generator import FORTHCOMING_LABEL from catkin_pkg.package_version import bump_version from", "their changelog file: %s' % ', '.join(sorted(missing_forthcoming)), file=sys.stderr) if already_tagged:", "None for child in document.children: title = None if isinstance(child,", "in packages.items(): changelog_path = os.path.join(base_path, pkg_path, CHANGELOG_FILENAME) if not os.path.exists(changelog_path):", "if count > 1: raise RuntimeError('Found multiple matching sections') return", "parser.add_argument('--bump', choices=('major', 'minor', 'patch'), default='patch', help='Which part of the version", "def main(sysargs=None): parser = argparse.ArgumentParser(description='Tag the forthcoming section in the", "section in the changelog files with an upcoming version number')", "script renames the forthcoming section in changelog files with the", "os.path.exists(changelog_path): missing_forthcoming.append(package.name) continue changelog = get_changelog_from_path(changelog_path, package.name) if not changelog:", "verify_equal_package_versions(packages.values()) new_version = bump_version(old_version, args.bump) print('Tag version %s' % new_version)", "= section.children[0] if title and len(title.children) > 0 and isinstance(title.children[0],", "+ re.escape(old_label) + ')\\n([' + re.escape(valid_section_characters) + ']+)$' data, count", "+ ')\\n([' + re.escape(valid_section_characters) + ']+)$' data, count = re.subn(pattern,", "catkin_pkg.changelog_generator import FORTHCOMING_LABEL from catkin_pkg.package_version import bump_version from catkin_pkg.packages import", "section.children[0] if title and len(title.children) > 0 and isinstance(title.children[0], docutils.nodes.Text):", "changelog, forthcoming_label) in changelogs: print(\"Renaming section '%s' to '%s' in", "flags=re.MULTILINE) if count == 0: raise RuntimeError('Could not find section')", "rename_section(data, old_label, new_label): valid_section_characters = '!\"#$%&\\'()*+,-./:;<=>?@[\\\\]^_`{|}~' def replace_section(match): section_char =", "section in changelog files with the upcoming version and the", "in their changelog file: %s\" % (new_version, ', '.join(sorted(already_tagged))), file=sys.stderr)", "catkin_pkg.package_version import bump_version from catkin_pkg.packages import find_packages, verify_equal_package_versions def get_forthcoming_label(rst):", "child in document.children: title = None if isinstance(child, docutils.nodes.subtitle): title", "have a forthcoming section in their changelog file: %s' %", "main(sysargs=None): parser = argparse.ArgumentParser(description='Tag the forthcoming section in the changelog", "= title.children[0].rawsource if FORTHCOMING_LABEL.lower() in title_text.lower(): if forthcoming_label: raise RuntimeError('Found", "file: %s' % ', '.join(sorted(missing_forthcoming)), file=sys.stderr) if already_tagged: print(\"The following", "import argparse import datetime import docutils.core import os import re", "choices=('major', 'minor', 'patch'), default='patch', help='Which part of the version number", "missing_forthcoming: print('The following packages do not have a forthcoming section", "packages found') print('Found packages: %s' % ', '.join([p.name for p", "= title_text return forthcoming_label def rename_section(data, old_label, new_label): valid_section_characters =", "forthcoming_label = title_text return forthcoming_label def rename_section(data, old_label, new_label): valid_section_characters", "elif isinstance(child, docutils.nodes.section): section = child if len(section.children) > 0", "0: raise RuntimeError('Could not find section') if count > 1:", "forthcoming sections') forthcoming_label = title_text return forthcoming_label def rename_section(data, old_label,", "data def main(sysargs=None): parser = argparse.ArgumentParser(description='Tag the forthcoming section in", "if already_tagged: print(\"The following packages do already have a section", "from catkin_pkg.package_version import bump_version from catkin_pkg.packages import find_packages, verify_equal_package_versions def", "= docutils.core.publish_doctree(rst) forthcoming_label = None for child in document.children: title", "not forthcoming_label: missing_forthcoming.append(package.name) continue # check that new_version section does", "bump? (default: %(default)s)') args = parser.parse_args(sysargs) base_path = '.' #", "child if len(section.children) > 0 and isinstance(section.children[0], docutils.nodes.title): title =", "data = rename_section(changelog.rst, forthcoming_label, new_label) new_changelog_data.append((changelog_path, data)) print('Writing updated changelog", "and isinstance(title.children[0], docutils.nodes.Text): title_text = title.children[0].rawsource if FORTHCOMING_LABEL.lower() in title_text.lower():", "(new_version, datetime.date.today().isoformat()) for (pkg_name, changelog_path, changelog, forthcoming_label) in changelogs: print(\"Renaming", "updated changelog files...') for (changelog_path, data) in new_changelog_data: with open(changelog_path,", "# find packages packages = find_packages(base_path) if not packages: raise", "that new_version section does not exist yet try: changelog.get_content_of_version(new_version) already_tagged.append(package.name)", "forthcoming section in the changelog files with an upcoming version", "import bump_version from catkin_pkg.packages import find_packages, verify_equal_package_versions def get_forthcoming_label(rst): document", "for child in document.children: title = None if isinstance(child, docutils.nodes.subtitle):", "'.join([p.name for p in packages.values()])) # fetch current version and", "CHANGELOG_FILENAME) if not os.path.exists(changelog_path): missing_forthcoming.append(package.name) continue changelog = get_changelog_from_path(changelog_path, package.name)", "', '.join(sorted(missing_forthcoming)), file=sys.stderr) if already_tagged: print(\"The following packages do already", "forthcoming_label = None for child in document.children: title = None", "if title and len(title.children) > 0 and isinstance(title.children[0], docutils.nodes.Text): title_text", "title_text.lower(): if forthcoming_label: raise RuntimeError('Found multiple forthcoming sections') forthcoming_label =", "if not packages: raise RuntimeError('No packages found') print('Found packages: %s'", "to new_version including current date new_changelog_data = [] new_label =", "re.subn(pattern, replace_section, data, flags=re.MULTILINE) if count == 0: raise RuntimeError('Could", "that forthcoming section exists forthcoming_label = get_forthcoming_label(changelog.rst) if not forthcoming_label:", "= re.subn(pattern, replace_section, data, flags=re.MULTILINE) if count == 0: raise", "0 and isinstance(section.children[0], docutils.nodes.title): title = section.children[0] if title and", "return forthcoming_label def rename_section(data, old_label, new_label): valid_section_characters = '!\"#$%&\\'()*+,-./:;<=>?@[\\\\]^_`{|}~' def", "does not exist yet try: changelog.get_content_of_version(new_version) already_tagged.append(package.name) continue except KeyError:", "for (changelog_path, data) in new_changelog_data: with open(changelog_path, 'wb') as f:", "forthcoming_label) in changelogs: print(\"Renaming section '%s' to '%s' in package", "% ', '.join(sorted(missing_forthcoming)), file=sys.stderr) if already_tagged: print(\"The following packages do", "= '^(' + re.escape(old_label) + ')\\n([' + re.escape(valid_section_characters) + ']+)$'", "yet try: changelog.get_content_of_version(new_version) already_tagged.append(package.name) continue except KeyError: pass changelogs.append((package.name, changelog_path,", "docutils.core import os import re import sys from catkin_pkg.changelog import", "find_packages(base_path) if not packages: raise RuntimeError('No packages found') print('Found packages:", "valid_section_characters = '!\"#$%&\\'()*+,-./:;<=>?@[\\\\]^_`{|}~' def replace_section(match): section_char = match.group(2)[0] return new_label", "= argparse.ArgumentParser(description='Tag the forthcoming section in the changelog files with", "'minor', 'patch'), default='patch', help='Which part of the version number to", "help='Which part of the version number to bump? (default: %(default)s)')", "new_label = '%s (%s)' % (new_version, datetime.date.today().isoformat()) for (pkg_name, changelog_path,", "changelogs: print(\"Renaming section '%s' to '%s' in package '%s'...\" %", "(pkg_name, changelog_path, changelog, forthcoming_label) in changelogs: print(\"Renaming section '%s' to", "not find section') if count > 1: raise RuntimeError('Found multiple", "len(new_label) pattern = '^(' + re.escape(old_label) + ')\\n([' + re.escape(valid_section_characters)", "following packages do not have a forthcoming section in their", "upcoming version number') parser.add_argument('--bump', choices=('major', 'minor', 'patch'), default='patch', help='Which part", "argparse.ArgumentParser(description='Tag the forthcoming section in the changelog files with an", "changelog = get_changelog_from_path(changelog_path, package.name) if not changelog: missing_forthcoming.append(package.name) continue #", "= match.group(2)[0] return new_label + '\\n' + section_char * len(new_label)", "changelog files with an upcoming version number') parser.add_argument('--bump', choices=('major', 'minor',", "a forthcoming section in their changelog file: %s' % ',", "# fetch current version and verify that all packages have", "None if isinstance(child, docutils.nodes.subtitle): title = child elif isinstance(child, docutils.nodes.section):", "len(title.children) > 0 and isinstance(title.children[0], docutils.nodes.Text): title_text = title.children[0].rawsource if", "docutils.nodes.Text): title_text = title.children[0].rawsource if FORTHCOMING_LABEL.lower() in title_text.lower(): if forthcoming_label:", "pkg_path, package in packages.items(): changelog_path = os.path.join(base_path, pkg_path, CHANGELOG_FILENAME) if", "package '%s'...\" % (forthcoming_label, new_label, pkg_name)) data = rename_section(changelog.rst, forthcoming_label,", "% new_version) # check for changelog entries changelogs = []", "the forthcoming section in changelog files with the upcoming version", "exists forthcoming_label = get_forthcoming_label(changelog.rst) if not forthcoming_label: missing_forthcoming.append(package.name) continue #", "continue # check that forthcoming section exists forthcoming_label = get_forthcoming_label(changelog.rst)", "len(section.children) > 0 and isinstance(section.children[0], docutils.nodes.title): title = section.children[0] if", "RuntimeError('Found multiple forthcoming sections') forthcoming_label = title_text return forthcoming_label def", "including current date new_changelog_data = [] new_label = '%s (%s)'", "missing_forthcoming.append(package.name) continue changelog = get_changelog_from_path(changelog_path, package.name) if not changelog: missing_forthcoming.append(package.name)", "matching sections') return data def main(sysargs=None): parser = argparse.ArgumentParser(description='Tag the", "print('The following packages do not have a forthcoming section in", "CHANGELOG_FILENAME, get_changelog_from_path from catkin_pkg.changelog_generator import FORTHCOMING_LABEL from catkin_pkg.package_version import bump_version", "have a section '%s' in their changelog file: %s\" %", "version and the current date\"\"\" from __future__ import print_function import", "changelog, forthcoming_label)) if missing_forthcoming: print('The following packages do not have", "if len(section.children) > 0 and isinstance(section.children[0], docutils.nodes.title): title = section.children[0]", "package.name) if not changelog: missing_forthcoming.append(package.name) continue # check that forthcoming", "continue except KeyError: pass changelogs.append((package.name, changelog_path, changelog, forthcoming_label)) if missing_forthcoming:", "in changelog files with the upcoming version and the current", "for changelog entries changelogs = [] missing_forthcoming = [] already_tagged", "multiple matching sections') return data def main(sysargs=None): parser = argparse.ArgumentParser(description='Tag", "= get_changelog_from_path(changelog_path, package.name) if not changelog: missing_forthcoming.append(package.name) continue # check", "= child elif isinstance(child, docutils.nodes.section): section = child if len(section.children)", "an upcoming version number') parser.add_argument('--bump', choices=('major', 'minor', 'patch'), default='patch', help='Which", "# rename forthcoming sections to new_version including current date new_changelog_data", "= find_packages(base_path) if not packages: raise RuntimeError('No packages found') print('Found", "number') parser.add_argument('--bump', choices=('major', 'minor', 'patch'), default='patch', help='Which part of the", "same version number old_version = verify_equal_package_versions(packages.values()) new_version = bump_version(old_version, args.bump)", "isinstance(title.children[0], docutils.nodes.Text): title_text = title.children[0].rawsource if FORTHCOMING_LABEL.lower() in title_text.lower(): if", "multiple forthcoming sections') forthcoming_label = title_text return forthcoming_label def rename_section(data,", "missing_forthcoming.append(package.name) continue # check that forthcoming section exists forthcoming_label =", "base_path = '.' # find packages packages = find_packages(base_path) if", "in package '%s'...\" % (forthcoming_label, new_label, pkg_name)) data = rename_section(changelog.rst,", "rename_section(changelog.rst, forthcoming_label, new_label) new_changelog_data.append((changelog_path, data)) print('Writing updated changelog files...') for", "FORTHCOMING_LABEL from catkin_pkg.package_version import bump_version from catkin_pkg.packages import find_packages, verify_equal_package_versions", "%s' % ', '.join(sorted(missing_forthcoming)), file=sys.stderr) if already_tagged: print(\"The following packages", "(forthcoming_label, new_label, pkg_name)) data = rename_section(changelog.rst, forthcoming_label, new_label) new_changelog_data.append((changelog_path, data))", "docutils.nodes.title): title = section.children[0] if title and len(title.children) > 0", "in changelogs: print(\"Renaming section '%s' to '%s' in package '%s'...\"", "def replace_section(match): section_char = match.group(2)[0] return new_label + '\\n' +", "raise RuntimeError('No packages found') print('Found packages: %s' % ', '.join([p.name", "sys from catkin_pkg.changelog import CHANGELOG_FILENAME, get_changelog_from_path from catkin_pkg.changelog_generator import FORTHCOMING_LABEL", "print('Found packages: %s' % ', '.join([p.name for p in packages.values()]))", "that all packages have same version number old_version = verify_equal_package_versions(packages.values())", "new_version) # check for changelog entries changelogs = [] missing_forthcoming", "section exists forthcoming_label = get_forthcoming_label(changelog.rst) if not forthcoming_label: missing_forthcoming.append(package.name) continue", "child elif isinstance(child, docutils.nodes.section): section = child if len(section.children) >", "']+)$' data, count = re.subn(pattern, replace_section, data, flags=re.MULTILINE) if count", "section') if count > 1: raise RuntimeError('Found multiple matching sections')", "forthcoming sections to new_version including current date new_changelog_data = []", "changelog files with the upcoming version and the current date\"\"\"", "forthcoming_label: raise RuntimeError('Found multiple forthcoming sections') forthcoming_label = title_text return", "> 0 and isinstance(title.children[0], docutils.nodes.Text): title_text = title.children[0].rawsource if FORTHCOMING_LABEL.lower()", "', '.join(sorted(already_tagged))), file=sys.stderr) # rename forthcoming sections to new_version including", "version number old_version = verify_equal_package_versions(packages.values()) new_version = bump_version(old_version, args.bump) print('Tag", "changelog: missing_forthcoming.append(package.name) continue # check that forthcoming section exists forthcoming_label", "RuntimeError('Found multiple matching sections') return data def main(sysargs=None): parser =", "RuntimeError('Could not find section') if count > 1: raise RuntimeError('Found", "entries changelogs = [] missing_forthcoming = [] already_tagged = []", "for pkg_path, package in packages.items(): changelog_path = os.path.join(base_path, pkg_path, CHANGELOG_FILENAME)", "import FORTHCOMING_LABEL from catkin_pkg.package_version import bump_version from catkin_pkg.packages import find_packages,", "section does not exist yet try: changelog.get_content_of_version(new_version) already_tagged.append(package.name) continue except", "number old_version = verify_equal_package_versions(packages.values()) new_version = bump_version(old_version, args.bump) print('Tag version", "isinstance(section.children[0], docutils.nodes.title): title = section.children[0] if title and len(title.children) >", "'%s' in their changelog file: %s\" % (new_version, ', '.join(sorted(already_tagged))),", "if count == 0: raise RuntimeError('Could not find section') if", "= get_forthcoming_label(changelog.rst) if not forthcoming_label: missing_forthcoming.append(package.name) continue # check that", "and the current date\"\"\" from __future__ import print_function import argparse", "a section '%s' in their changelog file: %s\" % (new_version,", "re.escape(old_label) + ')\\n([' + re.escape(valid_section_characters) + ']+)$' data, count =", "argparse import datetime import docutils.core import os import re import", "of the version number to bump? (default: %(default)s)') args =", "if not changelog: missing_forthcoming.append(package.name) continue # check that forthcoming section", "upcoming version and the current date\"\"\" from __future__ import print_function", "import sys from catkin_pkg.changelog import CHANGELOG_FILENAME, get_changelog_from_path from catkin_pkg.changelog_generator import", "if isinstance(child, docutils.nodes.subtitle): title = child elif isinstance(child, docutils.nodes.section): section", "the changelog files with an upcoming version number') parser.add_argument('--bump', choices=('major',", "match.group(2)[0] return new_label + '\\n' + section_char * len(new_label) pattern", "docutils.nodes.section): section = child if len(section.children) > 0 and isinstance(section.children[0],", "os import re import sys from catkin_pkg.changelog import CHANGELOG_FILENAME, get_changelog_from_path", "to bump? (default: %(default)s)') args = parser.parse_args(sysargs) base_path = '.'", "changelog file: %s\" % (new_version, ', '.join(sorted(already_tagged))), file=sys.stderr) # rename", "args.bump) print('Tag version %s' % new_version) # check for changelog", "'patch'), default='patch', help='Which part of the version number to bump?", "print(\"Renaming section '%s' to '%s' in package '%s'...\" % (forthcoming_label,", "parser = argparse.ArgumentParser(description='Tag the forthcoming section in the changelog files", "')\\n([' + re.escape(valid_section_characters) + ']+)$' data, count = re.subn(pattern, replace_section,", "parser.parse_args(sysargs) base_path = '.' # find packages packages = find_packages(base_path)", "= os.path.join(base_path, pkg_path, CHANGELOG_FILENAME) if not os.path.exists(changelog_path): missing_forthcoming.append(package.name) continue changelog", "RuntimeError('No packages found') print('Found packages: %s' % ', '.join([p.name for", "packages.items(): changelog_path = os.path.join(base_path, pkg_path, CHANGELOG_FILENAME) if not os.path.exists(changelog_path): missing_forthcoming.append(package.name)", "__future__ import print_function import argparse import datetime import docutils.core import", "files...') for (changelog_path, data) in new_changelog_data: with open(changelog_path, 'wb') as", "to '%s' in package '%s'...\" % (forthcoming_label, new_label, pkg_name)) data", "args = parser.parse_args(sysargs) base_path = '.' # find packages packages", "title_text return forthcoming_label def rename_section(data, old_label, new_label): valid_section_characters = '!\"#$%&\\'()*+,-./:;<=>?@[\\\\]^_`{|}~'", "count = re.subn(pattern, replace_section, data, flags=re.MULTILINE) if count == 0:", "replace_section, data, flags=re.MULTILINE) if count == 0: raise RuntimeError('Could not", "docutils.nodes.subtitle): title = child elif isinstance(child, docutils.nodes.section): section = child", "title = section.children[0] if title and len(title.children) > 0 and", "the version number to bump? (default: %(default)s)') args = parser.parse_args(sysargs)", "check for changelog entries changelogs = [] missing_forthcoming = []", "file=sys.stderr) if already_tagged: print(\"The following packages do already have a", "with an upcoming version number') parser.add_argument('--bump', choices=('major', 'minor', 'patch'), default='patch',", "already_tagged: print(\"The following packages do already have a section '%s'", "except KeyError: pass changelogs.append((package.name, changelog_path, changelog, forthcoming_label)) if missing_forthcoming: print('The", "new_version = bump_version(old_version, args.bump) print('Tag version %s' % new_version) #", "have same version number old_version = verify_equal_package_versions(packages.values()) new_version = bump_version(old_version,", "# check that new_version section does not exist yet try:", "= rename_section(changelog.rst, forthcoming_label, new_label) new_changelog_data.append((changelog_path, data)) print('Writing updated changelog files...')", "catkin_pkg.packages import find_packages, verify_equal_package_versions def get_forthcoming_label(rst): document = docutils.core.publish_doctree(rst) forthcoming_label", "changelog_path, changelog, forthcoming_label) in changelogs: print(\"Renaming section '%s' to '%s'", "import print_function import argparse import datetime import docutils.core import os", "= '%s (%s)' % (new_version, datetime.date.today().isoformat()) for (pkg_name, changelog_path, changelog,", "'%s'...\" % (forthcoming_label, new_label, pkg_name)) data = rename_section(changelog.rst, forthcoming_label, new_label)", "%s' % ', '.join([p.name for p in packages.values()])) # fetch", "date\"\"\" from __future__ import print_function import argparse import datetime import", "# check for changelog entries changelogs = [] missing_forthcoming =", "for p in packages.values()])) # fetch current version and verify", "not have a forthcoming section in their changelog file: %s'", "current date new_changelog_data = [] new_label = '%s (%s)' %", "from __future__ import print_function import argparse import datetime import docutils.core", "new_label) new_changelog_data.append((changelog_path, data)) print('Writing updated changelog files...') for (changelog_path, data)", "in their changelog file: %s' % ', '.join(sorted(missing_forthcoming)), file=sys.stderr) if", "if not forthcoming_label: missing_forthcoming.append(package.name) continue # check that new_version section", "already_tagged.append(package.name) continue except KeyError: pass changelogs.append((package.name, changelog_path, changelog, forthcoming_label)) if", "all packages have same version number old_version = verify_equal_package_versions(packages.values()) new_version", "raise RuntimeError('Could not find section') if count > 1: raise", "and isinstance(section.children[0], docutils.nodes.title): title = section.children[0] if title and len(title.children)", "section_char = match.group(2)[0] return new_label + '\\n' + section_char *", "', '.join([p.name for p in packages.values()])) # fetch current version", "exist yet try: changelog.get_content_of_version(new_version) already_tagged.append(package.name) continue except KeyError: pass changelogs.append((package.name,", "not os.path.exists(changelog_path): missing_forthcoming.append(package.name) continue changelog = get_changelog_from_path(changelog_path, package.name) if not", "+ section_char * len(new_label) pattern = '^(' + re.escape(old_label) +", "= None if isinstance(child, docutils.nodes.subtitle): title = child elif isinstance(child,", "(new_version, ', '.join(sorted(already_tagged))), file=sys.stderr) # rename forthcoming sections to new_version", "section '%s' to '%s' in package '%s'...\" % (forthcoming_label, new_label,", "= parser.parse_args(sysargs) base_path = '.' # find packages packages =", "new_changelog_data.append((changelog_path, data)) print('Writing updated changelog files...') for (changelog_path, data) in", "the forthcoming section in the changelog files with an upcoming", "packages.values()])) # fetch current version and verify that all packages", "% (new_version, ', '.join(sorted(already_tagged))), file=sys.stderr) # rename forthcoming sections to", "sections') return data def main(sysargs=None): parser = argparse.ArgumentParser(description='Tag the forthcoming", "count == 0: raise RuntimeError('Could not find section') if count", "= verify_equal_package_versions(packages.values()) new_version = bump_version(old_version, args.bump) print('Tag version %s' %", "= [] for pkg_path, package in packages.items(): changelog_path = os.path.join(base_path,", "'.join(sorted(already_tagged))), file=sys.stderr) # rename forthcoming sections to new_version including current", "document = docutils.core.publish_doctree(rst) forthcoming_label = None for child in document.children:", "new_label): valid_section_characters = '!\"#$%&\\'()*+,-./:;<=>?@[\\\\]^_`{|}~' def replace_section(match): section_char = match.group(2)[0] return", "and len(title.children) > 0 and isinstance(title.children[0], docutils.nodes.Text): title_text = title.children[0].rawsource", "files with an upcoming version number') parser.add_argument('--bump', choices=('major', 'minor', 'patch'),", "their changelog file: %s\" % (new_version, ', '.join(sorted(already_tagged))), file=sys.stderr) #", "forthcoming section exists forthcoming_label = get_forthcoming_label(changelog.rst) if not forthcoming_label: missing_forthcoming.append(package.name)", "get_forthcoming_label(rst): document = docutils.core.publish_doctree(rst) forthcoming_label = None for child in", "do already have a section '%s' in their changelog file:", "files with the upcoming version and the current date\"\"\" from", "data, flags=re.MULTILINE) if count == 0: raise RuntimeError('Could not find", "return data def main(sysargs=None): parser = argparse.ArgumentParser(description='Tag the forthcoming section", "'%s' in package '%s'...\" % (forthcoming_label, new_label, pkg_name)) data =", "new_version section does not exist yet try: changelog.get_content_of_version(new_version) already_tagged.append(package.name) continue", "title = None if isinstance(child, docutils.nodes.subtitle): title = child elif", "title.children[0].rawsource if FORTHCOMING_LABEL.lower() in title_text.lower(): if forthcoming_label: raise RuntimeError('Found multiple", "pkg_name)) data = rename_section(changelog.rst, forthcoming_label, new_label) new_changelog_data.append((changelog_path, data)) print('Writing updated", "%s' % new_version) # check for changelog entries changelogs =", "pkg_path, CHANGELOG_FILENAME) if not os.path.exists(changelog_path): missing_forthcoming.append(package.name) continue changelog = get_changelog_from_path(changelog_path,", "= child if len(section.children) > 0 and isinstance(section.children[0], docutils.nodes.title): title", "file: %s\" % (new_version, ', '.join(sorted(already_tagged))), file=sys.stderr) # rename forthcoming", "+ '\\n' + section_char * len(new_label) pattern = '^(' +", "if forthcoming_label: raise RuntimeError('Found multiple forthcoming sections') forthcoming_label = title_text", "bump_version(old_version, args.bump) print('Tag version %s' % new_version) # check for", "forthcoming section in their changelog file: %s' % ', '.join(sorted(missing_forthcoming)),", "new_version including current date new_changelog_data = [] new_label = '%s", "= None for child in document.children: title = None if", "old_version = verify_equal_package_versions(packages.values()) new_version = bump_version(old_version, args.bump) print('Tag version %s'", "data)) print('Writing updated changelog files...') for (changelog_path, data) in new_changelog_data:", "title and len(title.children) > 0 and isinstance(title.children[0], docutils.nodes.Text): title_text =", "packages: %s' % ', '.join([p.name for p in packages.values()])) #", "print('Tag version %s' % new_version) # check for changelog entries", "[] missing_forthcoming = [] already_tagged = [] for pkg_path, package", "from catkin_pkg.packages import find_packages, verify_equal_package_versions def get_forthcoming_label(rst): document = docutils.core.publish_doctree(rst)", "import CHANGELOG_FILENAME, get_changelog_from_path from catkin_pkg.changelog_generator import FORTHCOMING_LABEL from catkin_pkg.package_version import", "in packages.values()])) # fetch current version and verify that all", "section_char * len(new_label) pattern = '^(' + re.escape(old_label) + ')\\n(['", "in title_text.lower(): if forthcoming_label: raise RuntimeError('Found multiple forthcoming sections') forthcoming_label", "* len(new_label) pattern = '^(' + re.escape(old_label) + ')\\n([' +", "count > 1: raise RuntimeError('Found multiple matching sections') return data", "section = child if len(section.children) > 0 and isinstance(section.children[0], docutils.nodes.title):", "current version and verify that all packages have same version", "sections to new_version including current date new_changelog_data = [] new_label", "# check that forthcoming section exists forthcoming_label = get_forthcoming_label(changelog.rst) if", "part of the version number to bump? (default: %(default)s)') args", "not changelog: missing_forthcoming.append(package.name) continue # check that forthcoming section exists", "catkin_pkg.changelog import CHANGELOG_FILENAME, get_changelog_from_path from catkin_pkg.changelog_generator import FORTHCOMING_LABEL from catkin_pkg.package_version", "p in packages.values()])) # fetch current version and verify that", "def get_forthcoming_label(rst): document = docutils.core.publish_doctree(rst) forthcoming_label = None for child", "package in packages.items(): changelog_path = os.path.join(base_path, pkg_path, CHANGELOG_FILENAME) if not", "renames the forthcoming section in changelog files with the upcoming", "+ re.escape(valid_section_characters) + ']+)$' data, count = re.subn(pattern, replace_section, data,", "data, count = re.subn(pattern, replace_section, data, flags=re.MULTILINE) if count ==", "if missing_forthcoming: print('The following packages do not have a forthcoming", "new_label, pkg_name)) data = rename_section(changelog.rst, forthcoming_label, new_label) new_changelog_data.append((changelog_path, data)) print('Writing", "= '.' # find packages packages = find_packages(base_path) if not", "already have a section '%s' in their changelog file: %s\"", "get_changelog_from_path(changelog_path, package.name) if not changelog: missing_forthcoming.append(package.name) continue # check that", "missing_forthcoming.append(package.name) continue # check that new_version section does not exist", "fetch current version and verify that all packages have same", "[] new_label = '%s (%s)' % (new_version, datetime.date.today().isoformat()) for (pkg_name,", "get_forthcoming_label(changelog.rst) if not forthcoming_label: missing_forthcoming.append(package.name) continue # check that new_version", "number to bump? (default: %(default)s)') args = parser.parse_args(sysargs) base_path =", "= bump_version(old_version, args.bump) print('Tag version %s' % new_version) # check", "from catkin_pkg.changelog import CHANGELOG_FILENAME, get_changelog_from_path from catkin_pkg.changelog_generator import FORTHCOMING_LABEL from", "forthcoming_label: missing_forthcoming.append(package.name) continue # check that new_version section does not", "re import sys from catkin_pkg.changelog import CHANGELOG_FILENAME, get_changelog_from_path from catkin_pkg.changelog_generator", "[] already_tagged = [] for pkg_path, package in packages.items(): changelog_path", "return new_label + '\\n' + section_char * len(new_label) pattern =", "check that forthcoming section exists forthcoming_label = get_forthcoming_label(changelog.rst) if not", "version and verify that all packages have same version number", "verify that all packages have same version number old_version =", "'.join(sorted(missing_forthcoming)), file=sys.stderr) if already_tagged: print(\"The following packages do already have", "os.path.join(base_path, pkg_path, CHANGELOG_FILENAME) if not os.path.exists(changelog_path): missing_forthcoming.append(package.name) continue changelog =", "import find_packages, verify_equal_package_versions def get_forthcoming_label(rst): document = docutils.core.publish_doctree(rst) forthcoming_label =", "KeyError: pass changelogs.append((package.name, changelog_path, changelog, forthcoming_label)) if missing_forthcoming: print('The following", "packages do already have a section '%s' in their changelog", "import os import re import sys from catkin_pkg.changelog import CHANGELOG_FILENAME,", "print(\"The following packages do already have a section '%s' in", "in the changelog files with an upcoming version number') parser.add_argument('--bump',", "packages do not have a forthcoming section in their changelog", "changelog_path, changelog, forthcoming_label)) if missing_forthcoming: print('The following packages do not", "raise RuntimeError('Found multiple matching sections') return data def main(sysargs=None): parser", "datetime import docutils.core import os import re import sys from", "title = child elif isinstance(child, docutils.nodes.section): section = child if", "section '%s' in their changelog file: %s\" % (new_version, ',", "forthcoming_label, new_label) new_changelog_data.append((changelog_path, data)) print('Writing updated changelog files...') for (changelog_path,", "pattern = '^(' + re.escape(old_label) + ')\\n([' + re.escape(valid_section_characters) +", "+ ']+)$' data, count = re.subn(pattern, replace_section, data, flags=re.MULTILINE) if", "== 0: raise RuntimeError('Could not find section') if count >", "= [] missing_forthcoming = [] already_tagged = [] for pkg_path,", "and verify that all packages have same version number old_version", "'%s (%s)' % (new_version, datetime.date.today().isoformat()) for (pkg_name, changelog_path, changelog, forthcoming_label)", "changelog.get_content_of_version(new_version) already_tagged.append(package.name) continue except KeyError: pass changelogs.append((package.name, changelog_path, changelog, forthcoming_label))", "do not have a forthcoming section in their changelog file:", "'^(' + re.escape(old_label) + ')\\n([' + re.escape(valid_section_characters) + ']+)$' data,", "changelog files...') for (changelog_path, data) in new_changelog_data: with open(changelog_path, 'wb')", "isinstance(child, docutils.nodes.section): section = child if len(section.children) > 0 and", "'\\n' + section_char * len(new_label) pattern = '^(' + re.escape(old_label)", "sections') forthcoming_label = title_text return forthcoming_label def rename_section(data, old_label, new_label):", "forthcoming_label def rename_section(data, old_label, new_label): valid_section_characters = '!\"#$%&\\'()*+,-./:;<=>?@[\\\\]^_`{|}~' def replace_section(match):", "changelogs.append((package.name, changelog_path, changelog, forthcoming_label)) if missing_forthcoming: print('The following packages do", "section in their changelog file: %s' % ', '.join(sorted(missing_forthcoming)), file=sys.stderr)", "continue changelog = get_changelog_from_path(changelog_path, package.name) if not changelog: missing_forthcoming.append(package.name) continue", "version %s' % new_version) # check for changelog entries changelogs", "changelog entries changelogs = [] missing_forthcoming = [] already_tagged =", "verify_equal_package_versions def get_forthcoming_label(rst): document = docutils.core.publish_doctree(rst) forthcoming_label = None for", "1: raise RuntimeError('Found multiple matching sections') return data def main(sysargs=None):", "replace_section(match): section_char = match.group(2)[0] return new_label + '\\n' + section_char", "date new_changelog_data = [] new_label = '%s (%s)' % (new_version,", "0 and isinstance(title.children[0], docutils.nodes.Text): title_text = title.children[0].rawsource if FORTHCOMING_LABEL.lower() in", "found') print('Found packages: %s' % ', '.join([p.name for p in", "docutils.core.publish_doctree(rst) forthcoming_label = None for child in document.children: title =", "check that new_version section does not exist yet try: changelog.get_content_of_version(new_version)", "isinstance(child, docutils.nodes.subtitle): title = child elif isinstance(child, docutils.nodes.section): section =", "%s\" % (new_version, ', '.join(sorted(already_tagged))), file=sys.stderr) # rename forthcoming sections", "the upcoming version and the current date\"\"\" from __future__ import", "changelog_path = os.path.join(base_path, pkg_path, CHANGELOG_FILENAME) if not os.path.exists(changelog_path): missing_forthcoming.append(package.name) continue", "following packages do already have a section '%s' in their", "print_function import argparse import datetime import docutils.core import os import", "try: changelog.get_content_of_version(new_version) already_tagged.append(package.name) continue except KeyError: pass changelogs.append((package.name, changelog_path, changelog,", "re.escape(valid_section_characters) + ']+)$' data, count = re.subn(pattern, replace_section, data, flags=re.MULTILINE)", "packages: raise RuntimeError('No packages found') print('Found packages: %s' % ',", "title_text = title.children[0].rawsource if FORTHCOMING_LABEL.lower() in title_text.lower(): if forthcoming_label: raise", "= [] new_label = '%s (%s)' % (new_version, datetime.date.today().isoformat()) for", "rename forthcoming sections to new_version including current date new_changelog_data =" ]
[ "import Process import torch.nn as nn from machina.optims import DistributedAdamW", "DistributedAdamW def init_processes(rank, world_size, function, backend='tcp'): os.environ['MASTER_ADDR'] = '127.0.0.1' os.environ['MASTER_PORT']", "DistributedAdamW( model.parameters()) optimizer.zero_grad() loss = model(torch.ones(10).float()) loss.backward() optimizer.step() processes =", "processes = [] world_size = 4 for rank in range(world_size):", "[] world_size = 4 for rank in range(world_size): p =", "def test_step(self): def _run(rank, world_size): model = nn.Linear(10, 1) optimizer", "optimizer = DistributedAdamW( model.parameters()) optimizer.zero_grad() loss = model(torch.ones(10).float()) loss.backward() optimizer.step()", "= model(torch.ones(10).float()) loss.backward() optimizer.step() processes = [] world_size = 4", "def _run(rank, world_size): model = nn.Linear(10, 1) optimizer = DistributedAdamW(", "import os import unittest import torch import torch.distributed as dist", "import torch.nn as nn from machina.optims import DistributedAdamW def init_processes(rank,", "rank=rank, world_size=world_size) function(rank, world_size) class TestDistributedAdamW(unittest.TestCase): def test_step(self): def _run(rank,", "loss = model(torch.ones(10).float()) loss.backward() optimizer.step() processes = [] world_size =", "nn.Linear(10, 1) optimizer = DistributedAdamW( model.parameters()) optimizer.zero_grad() loss = model(torch.ones(10).float())", "= [] world_size = 4 for rank in range(world_size): p", "world_size) class TestDistributedAdamW(unittest.TestCase): def test_step(self): def _run(rank, world_size): model =", "in range(world_size): p = Process(target=init_processes, args=(rank, world_size, _run)) p.start() processes.append(p)", "test_step(self): def _run(rank, world_size): model = nn.Linear(10, 1) optimizer =", "= '127.0.0.1' os.environ['MASTER_PORT'] = '29500' dist.init_process_group(backend, rank=rank, world_size=world_size) function(rank, world_size)", "'127.0.0.1' os.environ['MASTER_PORT'] = '29500' dist.init_process_group(backend, rank=rank, world_size=world_size) function(rank, world_size) class", "rank in range(world_size): p = Process(target=init_processes, args=(rank, world_size, _run)) p.start()", "torch import torch.distributed as dist from torch.multiprocessing import Process import", "import DistributedAdamW def init_processes(rank, world_size, function, backend='tcp'): os.environ['MASTER_ADDR'] = '127.0.0.1'", "torch.multiprocessing import Process import torch.nn as nn from machina.optims import", "= nn.Linear(10, 1) optimizer = DistributedAdamW( model.parameters()) optimizer.zero_grad() loss =", "unittest import torch import torch.distributed as dist from torch.multiprocessing import", "as dist from torch.multiprocessing import Process import torch.nn as nn", "= DistributedAdamW( model.parameters()) optimizer.zero_grad() loss = model(torch.ones(10).float()) loss.backward() optimizer.step() processes", "= Process(target=init_processes, args=(rank, world_size, _run)) p.start() processes.append(p) for p in", "optimizer.zero_grad() loss = model(torch.ones(10).float()) loss.backward() optimizer.step() processes = [] world_size", "os import unittest import torch import torch.distributed as dist from", "as nn from machina.optims import DistributedAdamW def init_processes(rank, world_size, function,", "dist.init_process_group(backend, rank=rank, world_size=world_size) function(rank, world_size) class TestDistributedAdamW(unittest.TestCase): def test_step(self): def", "import torch import torch.distributed as dist from torch.multiprocessing import Process", "Process import torch.nn as nn from machina.optims import DistributedAdamW def", "machina.optims import DistributedAdamW def init_processes(rank, world_size, function, backend='tcp'): os.environ['MASTER_ADDR'] =", "os.environ['MASTER_PORT'] = '29500' dist.init_process_group(backend, rank=rank, world_size=world_size) function(rank, world_size) class TestDistributedAdamW(unittest.TestCase):", "nn from machina.optims import DistributedAdamW def init_processes(rank, world_size, function, backend='tcp'):", "import torch.distributed as dist from torch.multiprocessing import Process import torch.nn", "'29500' dist.init_process_group(backend, rank=rank, world_size=world_size) function(rank, world_size) class TestDistributedAdamW(unittest.TestCase): def test_step(self):", "model.parameters()) optimizer.zero_grad() loss = model(torch.ones(10).float()) loss.backward() optimizer.step() processes = []", "model(torch.ones(10).float()) loss.backward() optimizer.step() processes = [] world_size = 4 for", "init_processes(rank, world_size, function, backend='tcp'): os.environ['MASTER_ADDR'] = '127.0.0.1' os.environ['MASTER_PORT'] = '29500'", "= 4 for rank in range(world_size): p = Process(target=init_processes, args=(rank,", "args=(rank, world_size, _run)) p.start() processes.append(p) for p in processes: p.join()", "function(rank, world_size) class TestDistributedAdamW(unittest.TestCase): def test_step(self): def _run(rank, world_size): model", "def init_processes(rank, world_size, function, backend='tcp'): os.environ['MASTER_ADDR'] = '127.0.0.1' os.environ['MASTER_PORT'] =", "class TestDistributedAdamW(unittest.TestCase): def test_step(self): def _run(rank, world_size): model = nn.Linear(10,", "from machina.optims import DistributedAdamW def init_processes(rank, world_size, function, backend='tcp'): os.environ['MASTER_ADDR']", "function, backend='tcp'): os.environ['MASTER_ADDR'] = '127.0.0.1' os.environ['MASTER_PORT'] = '29500' dist.init_process_group(backend, rank=rank,", "world_size): model = nn.Linear(10, 1) optimizer = DistributedAdamW( model.parameters()) optimizer.zero_grad()", "TestDistributedAdamW(unittest.TestCase): def test_step(self): def _run(rank, world_size): model = nn.Linear(10, 1)", "for rank in range(world_size): p = Process(target=init_processes, args=(rank, world_size, _run))", "world_size=world_size) function(rank, world_size) class TestDistributedAdamW(unittest.TestCase): def test_step(self): def _run(rank, world_size):", "loss.backward() optimizer.step() processes = [] world_size = 4 for rank", "world_size = 4 for rank in range(world_size): p = Process(target=init_processes,", "world_size, function, backend='tcp'): os.environ['MASTER_ADDR'] = '127.0.0.1' os.environ['MASTER_PORT'] = '29500' dist.init_process_group(backend,", "os.environ['MASTER_ADDR'] = '127.0.0.1' os.environ['MASTER_PORT'] = '29500' dist.init_process_group(backend, rank=rank, world_size=world_size) function(rank,", "import unittest import torch import torch.distributed as dist from torch.multiprocessing", "= '29500' dist.init_process_group(backend, rank=rank, world_size=world_size) function(rank, world_size) class TestDistributedAdamW(unittest.TestCase): def", "1) optimizer = DistributedAdamW( model.parameters()) optimizer.zero_grad() loss = model(torch.ones(10).float()) loss.backward()", "range(world_size): p = Process(target=init_processes, args=(rank, world_size, _run)) p.start() processes.append(p) for", "optimizer.step() processes = [] world_size = 4 for rank in", "4 for rank in range(world_size): p = Process(target=init_processes, args=(rank, world_size,", "_run(rank, world_size): model = nn.Linear(10, 1) optimizer = DistributedAdamW( model.parameters())", "backend='tcp'): os.environ['MASTER_ADDR'] = '127.0.0.1' os.environ['MASTER_PORT'] = '29500' dist.init_process_group(backend, rank=rank, world_size=world_size)", "model = nn.Linear(10, 1) optimizer = DistributedAdamW( model.parameters()) optimizer.zero_grad() loss", "torch.distributed as dist from torch.multiprocessing import Process import torch.nn as", "Process(target=init_processes, args=(rank, world_size, _run)) p.start() processes.append(p) for p in processes:", "dist from torch.multiprocessing import Process import torch.nn as nn from", "p = Process(target=init_processes, args=(rank, world_size, _run)) p.start() processes.append(p) for p", "torch.nn as nn from machina.optims import DistributedAdamW def init_processes(rank, world_size,", "from torch.multiprocessing import Process import torch.nn as nn from machina.optims" ]
[ "* from rsqueakvm.model.compiled_methods import * # from rsqueakvm.model.display import *", "from rsqueakvm.model.character import * from rsqueakvm.model.compiled_methods import * # from", "import * from rsqueakvm.model.pointers import * from rsqueakvm.model.variable import *", "W_MutableSmallInteger W_AbstractObjectWithIdentityHash W_AbstractFloat W_Float W_MutableFloat W_Character W_PointersObject W_AbstractObjectWithClassReference W_LargeInteger W_LargeIntegerWord", "from rsqueakvm.model.numeric import * from rsqueakvm.model.pointers import * from rsqueakvm.model.variable", "W_LargeIntegerBig W_BytesObject W_WordsObject W_CompiledMethod W_SpurCompiledMethod W_PreSpurCompiledMethod \"\"\" from rsqueakvm.model.base import", "rsqueakvm.model.character import * from rsqueakvm.model.compiled_methods import * # from rsqueakvm.model.display", "rsqueakvm.model.numeric import * from rsqueakvm.model.pointers import * from rsqueakvm.model.variable import", "<reponame>shiplift/RSqueakOnABoat<filename>rsqueakvm/model/__init__.py \"\"\" Squeak model. W_Object W_SmallInteger W_MutableSmallInteger W_AbstractObjectWithIdentityHash W_AbstractFloat W_Float", "import * from rsqueakvm.model.numeric import * from rsqueakvm.model.pointers import *", "W_SmallInteger W_MutableSmallInteger W_AbstractObjectWithIdentityHash W_AbstractFloat W_Float W_MutableFloat W_Character W_PointersObject W_AbstractObjectWithClassReference W_LargeInteger", "rsqueakvm.model.display import * from rsqueakvm.model.numeric import * from rsqueakvm.model.pointers import", "model. W_Object W_SmallInteger W_MutableSmallInteger W_AbstractObjectWithIdentityHash W_AbstractFloat W_Float W_MutableFloat W_Character W_PointersObject", "W_Object W_SmallInteger W_MutableSmallInteger W_AbstractObjectWithIdentityHash W_AbstractFloat W_Float W_MutableFloat W_Character W_PointersObject W_AbstractObjectWithClassReference", "import * # from rsqueakvm.model.display import * from rsqueakvm.model.numeric import", "\"\"\" from rsqueakvm.model.base import * from rsqueakvm.model.character import * from", "W_LargeIntegerWord W_LargeIntegerBig W_BytesObject W_WordsObject W_CompiledMethod W_SpurCompiledMethod W_PreSpurCompiledMethod \"\"\" from rsqueakvm.model.base", "import * from rsqueakvm.model.compiled_methods import * # from rsqueakvm.model.display import", "from rsqueakvm.model.base import * from rsqueakvm.model.character import * from rsqueakvm.model.compiled_methods", "Squeak model. W_Object W_SmallInteger W_MutableSmallInteger W_AbstractObjectWithIdentityHash W_AbstractFloat W_Float W_MutableFloat W_Character", "W_Float W_MutableFloat W_Character W_PointersObject W_AbstractObjectWithClassReference W_LargeInteger W_LargeIntegerWord W_LargeIntegerBig W_BytesObject W_WordsObject", "W_WordsObject W_CompiledMethod W_SpurCompiledMethod W_PreSpurCompiledMethod \"\"\" from rsqueakvm.model.base import * from", "import * from rsqueakvm.model.character import * from rsqueakvm.model.compiled_methods import *", "W_BytesObject W_WordsObject W_CompiledMethod W_SpurCompiledMethod W_PreSpurCompiledMethod \"\"\" from rsqueakvm.model.base import *", "W_AbstractObjectWithIdentityHash W_AbstractFloat W_Float W_MutableFloat W_Character W_PointersObject W_AbstractObjectWithClassReference W_LargeInteger W_LargeIntegerWord W_LargeIntegerBig", "* from rsqueakvm.model.numeric import * from rsqueakvm.model.pointers import * from", "W_CompiledMethod W_SpurCompiledMethod W_PreSpurCompiledMethod \"\"\" from rsqueakvm.model.base import * from rsqueakvm.model.character", "W_AbstractObjectWithClassReference W_LargeInteger W_LargeIntegerWord W_LargeIntegerBig W_BytesObject W_WordsObject W_CompiledMethod W_SpurCompiledMethod W_PreSpurCompiledMethod \"\"\"", "* # from rsqueakvm.model.display import * from rsqueakvm.model.numeric import *", "W_AbstractFloat W_Float W_MutableFloat W_Character W_PointersObject W_AbstractObjectWithClassReference W_LargeInteger W_LargeIntegerWord W_LargeIntegerBig W_BytesObject", "W_LargeInteger W_LargeIntegerWord W_LargeIntegerBig W_BytesObject W_WordsObject W_CompiledMethod W_SpurCompiledMethod W_PreSpurCompiledMethod \"\"\" from", "# from rsqueakvm.model.display import * from rsqueakvm.model.numeric import * from", "rsqueakvm.model.base import * from rsqueakvm.model.character import * from rsqueakvm.model.compiled_methods import", "W_PreSpurCompiledMethod \"\"\" from rsqueakvm.model.base import * from rsqueakvm.model.character import *", "rsqueakvm.model.compiled_methods import * # from rsqueakvm.model.display import * from rsqueakvm.model.numeric", "\"\"\" Squeak model. W_Object W_SmallInteger W_MutableSmallInteger W_AbstractObjectWithIdentityHash W_AbstractFloat W_Float W_MutableFloat", "from rsqueakvm.model.compiled_methods import * # from rsqueakvm.model.display import * from", "W_MutableFloat W_Character W_PointersObject W_AbstractObjectWithClassReference W_LargeInteger W_LargeIntegerWord W_LargeIntegerBig W_BytesObject W_WordsObject W_CompiledMethod", "W_Character W_PointersObject W_AbstractObjectWithClassReference W_LargeInteger W_LargeIntegerWord W_LargeIntegerBig W_BytesObject W_WordsObject W_CompiledMethod W_SpurCompiledMethod", "W_SpurCompiledMethod W_PreSpurCompiledMethod \"\"\" from rsqueakvm.model.base import * from rsqueakvm.model.character import", "* from rsqueakvm.model.character import * from rsqueakvm.model.compiled_methods import * #", "W_PointersObject W_AbstractObjectWithClassReference W_LargeInteger W_LargeIntegerWord W_LargeIntegerBig W_BytesObject W_WordsObject W_CompiledMethod W_SpurCompiledMethod W_PreSpurCompiledMethod", "from rsqueakvm.model.display import * from rsqueakvm.model.numeric import * from rsqueakvm.model.pointers" ]
[ "out, label): assert not label.requires_grad # out shape batch_size x", "def forward(self, out, label, ignore_label=255): assert not label.requires_grad mask =", "label): assert not label.requires_grad # out shape batch_size x channels", "= nn.NLLLoss(ignore_index=255) def forward(self, out, label): assert not label.requires_grad #", "self).__init__() self.size_average = size_average self.batch_average = batch_average def forward(self, output,", "loss_pos + loss_neg if self.size_average: final_loss /= float(np.prod(label.size())) elif self.batch_average:", "torch.nn as nn import torch.nn.functional as F from torch.nn.modules.module import", "void_pixels=None): assert (output.size() == label.size()) labels = torch.ge(label, 0.5).float() #", "= pos_weight def forward(self, output, label, void_pixels=None): assert (output.size() ==", "torch.log( 1 + torch.exp(output - 2 * torch.mul(output, output_gt_zero))) loss_pos_pix", "default is HED-style if self.pos_weight is None: num_labels_pos = torch.sum(labels)", "batch_average def forward(self, output, label, void_pixels=None): assert (output.size() == label.size())", "normalization for surface normals \"\"\" def __init__(self, size_average=True, normalize=False, norm=1):", "num_total = num_labels_pos + num_labels_neg w = num_labels_neg / num_total", "= torch.ge(label, 0.5).float() # Weighting of the loss, default is", "torch.ge(void_pixels, 0.5).float().sum() w = num_labels_neg / num_total loss_pos = torch.sum(loss_pos_pix)", "qn = torch.norm(bottom, p=2, dim=1).unsqueeze(dim=1) + 1e-12 top = bottom.div(qn)", "\"\"\" def __init__(self, size_average=True, batch_average=True): super(BinaryCrossEntropyLoss, self).__init__() self.size_average = size_average", "loss for semantic segmentation \"\"\" def __init__(self): super(SoftMaxwithLoss, self).__init__() self.softmax", "DepthLoss'.format(loss)) def forward(self, out, label): mask = (label != 255)", "self.loss = nn.L1Loss() else: raise NotImplementedError('Loss {} currently not supported", "size_average=True, batch_average=True, pos_weight=None): super(BalancedCrossEntropyLoss, self).__init__() self.size_average = size_average self.batch_average =", "is not None: w_void = torch.le(void_pixels, 0.5).float() loss_pos_pix = torch.mul(w_void,", "self.batch_average: final_loss /= label.size()[0] return final_loss class DepthLoss(nn.Module): \"\"\" Loss", "self.loss_func(torch.masked_select(out_norm, mask), torch.masked_select(label, mask), reduction='sum') else: loss = self.loss_func(torch.masked_select(out, mask),", "not None: out_norm = self.normalize(out) loss = self.loss_func(torch.masked_select(out_norm, mask), torch.masked_select(label,", "forward(self, bottom): qn = torch.norm(bottom, p=2, dim=1).unsqueeze(dim=1) + 1e-12 top", "return loss class BalancedCrossEntropyLoss(Module): \"\"\" Balanced Cross Entropy Loss with", "torch.sum(loss_pos_pix) loss_neg = torch.sum(loss_neg_pix) final_loss = loss_pos + loss_neg if", "label, void_pixels=None): assert (output.size() == label.size()) labels = torch.ge(label, 0.5).float()", "void_pixels=None): assert (output.size() == label.size()) labels = torch.ge(label, 0.5).float() output_gt_zero", "+ torch.exp(output - 2 * torch.mul(output, output_gt_zero))) loss_pos_pix = -torch.mul(labels,", "= None if norm == 1: print('Using L1 loss for", "label shape batch_size x 1 x h x w label", "regions, not balanced. \"\"\" def __init__(self, size_average=True, batch_average=True): super(BinaryCrossEntropyLoss, self).__init__()", "- labels, loss_val) if void_pixels is not None and not", "not self.pos_weight: w_void = torch.le(void_pixels, 0.5).float() loss_pos_pix = torch.mul(w_void, loss_pos_pix)", "label) return loss class BalancedCrossEntropyLoss(Module): \"\"\" Balanced Cross Entropy Loss", "forward(self, output, label, void_pixels=None): assert (output.size() == label.size()) labels =", "class BinaryCrossEntropyLoss(Module): \"\"\" Binary Cross Entropy with ignore regions, not", "for surface normals') self.loss_func = F.l1_loss elif norm == 2:", "loss = self.loss_func(torch.masked_select(out_norm, mask), torch.masked_select(label, mask), reduction='sum') else: loss =", "self.softmax = nn.LogSoftmax(dim=1) self.criterion = nn.NLLLoss(ignore_index=255) def forward(self, out, label):", "is not None and not self.pos_weight: w_void = torch.le(void_pixels, 0.5).float()", "<filename>Multi-Task-Learning-PyTorch-master/losses/loss_functions.py # This code is referenced from # https://github.com/facebookresearch/astmt/ #", "not label.requires_grad # out shape batch_size x channels x h", "ignore_label) n_valid = torch.sum(mask).item() if self.normalize is not None: out_norm", "torch.exp(output - 2 * torch.mul(output, output_gt_zero))) loss_pos_pix = -torch.mul(labels, loss_val)", "Entropy with ignore regions, not balanced. \"\"\" def __init__(self, size_average=True,", "raise NotImplementedError('Loss {} currently not supported in DepthLoss'.format(loss)) def forward(self,", "torch.mul(w_void, loss_neg_pix) num_total = num_total - torch.ge(void_pixels, 0.5).float().sum() w =", "self.normalize = Normalize() else: self.normalize = None if norm ==", "default L1 loss is used. \"\"\" def __init__(self, loss='l1'): super(DepthLoss,", "n_valid = torch.sum(mask).item() if self.normalize is not None: out_norm =", "nn.L1Loss() else: raise NotImplementedError('Loss {} currently not supported in DepthLoss'.format(loss))", "return final_loss class DepthLoss(nn.Module): \"\"\" Loss for depth prediction. By", "def forward(self, bottom): qn = torch.norm(bottom, p=2, dim=1).unsqueeze(dim=1) + 1e-12", "Entropy Loss with optional ignore regions \"\"\" def __init__(self, size_average=True,", "top class NormalsLoss(Module): \"\"\" L1 loss with ignore labels normalize:", "loss for surface normals') self.loss_func = F.mse_loss else: raise NotImplementedError", "loss, default is HED-style if self.pos_weight is None: num_labels_pos =", "* torch.mul(output, output_gt_zero))) loss_pos_pix = -torch.mul(labels, loss_val) loss_neg_pix = -torch.mul(1.0", "x w label = label[:, 0, :, :].long() loss =", "segmentation \"\"\" def __init__(self): super(SoftMaxwithLoss, self).__init__() self.softmax = nn.LogSoftmax(dim=1) self.criterion", "output, label, void_pixels=None): assert (output.size() == label.size()) labels = torch.ge(label,", "- torch.ge(void_pixels, 0.5).float().sum() w = num_labels_neg / num_total loss_pos =", "loss with ignore labels normalize: normalization for surface normals \"\"\"", "h x w # label shape batch_size x 1 x", "= -torch.mul(labels, loss_val) loss_neg_pix = -torch.mul(1.0 - labels, loss_val) if", "self.pos_weight = pos_weight def forward(self, output, label, void_pixels=None): assert (output.size()", "!= ignore_label) n_valid = torch.sum(mask).item() if self.normalize is not None:", "from # https://github.com/facebookresearch/astmt/ # # Copyright (c) Facebook, Inc. and", "- 2 * torch.mul(output, output_gt_zero))) loss_pos_pix = -torch.mul(labels, loss_val) loss_neg_pix", "= torch.mul(w_void, loss_neg_pix) loss_pos = torch.sum(loss_pos_pix) loss_neg = torch.sum(loss_neg_pix) final_loss", "(output.size() == label.size()) labels = torch.ge(label, 0.5).float() # Weighting of", "torch.nn.functional as F from torch.nn.modules.module import Module import numpy as", "== label.size()) labels = torch.ge(label, 0.5).float() output_gt_zero = torch.ge(output, 0).float()", "mask = (label != 255) return self.loss(torch.masked_select(out, mask), torch.masked_select(label, mask))", "loss_pos_pix = -torch.mul(labels, loss_val) loss_neg_pix = -torch.mul(1.0 - labels, loss_val)", "import torch import torch.nn as nn import torch.nn.functional as F", "https://github.com/facebookresearch/astmt/ # # Copyright (c) Facebook, Inc. and its affiliates.", "size_average if normalize: self.normalize = Normalize() else: self.normalize = None", "0.5).float() # Weighting of the loss, default is HED-style if", "currently not supported in DepthLoss'.format(loss)) def forward(self, out, label): mask", "loss_neg if self.size_average: final_loss /= float(np.prod(label.size())) elif self.batch_average: final_loss /=", "function returns cross entropy loss for semantic segmentation \"\"\" def", "torch.le(void_pixels, 0.5).float() loss_pos_pix = torch.mul(w_void, loss_pos_pix) loss_neg_pix = torch.mul(w_void, loss_neg_pix)", "None: out_norm = self.normalize(out) loss = self.loss_func(torch.masked_select(out_norm, mask), torch.masked_select(label, mask),", "num_total else: w = self.pos_weight output_gt_zero = torch.ge(output, 0).float() loss_val", "returns cross entropy loss for semantic segmentation \"\"\" def __init__(self):", "size_average self.batch_average = batch_average self.pos_weight = pos_weight def forward(self, output,", "if self.pos_weight is None: num_labels_pos = torch.sum(labels) num_labels_neg = torch.sum(1.0", "== 'l1': self.loss = nn.L1Loss() else: raise NotImplementedError('Loss {} currently", "supported in DepthLoss'.format(loss)) def forward(self, out, label): mask = (label", "labels normalize: normalization for surface normals \"\"\" def __init__(self, size_average=True,", "ignore labels normalize: normalization for surface normals \"\"\" def __init__(self,", "loss_pos_pix = torch.mul(w_void, loss_pos_pix) loss_neg_pix = torch.mul(w_void, loss_neg_pix) loss_pos =", "final_loss /= label.size()[0] return final_loss class DepthLoss(nn.Module): \"\"\" Loss for", "label[:, 0, :, :].long() loss = self.criterion(self.softmax(out), label) return loss", "labels, loss_val) if void_pixels is not None and not self.pos_weight:", "if norm == 1: print('Using L1 loss for surface normals')", "depth prediction. By default L1 loss is used. \"\"\" def", "__init__(self, size_average=True, batch_average=True): super(BinaryCrossEntropyLoss, self).__init__() self.size_average = size_average self.batch_average =", "/= float(np.prod(label.size())) elif self.batch_average: final_loss /= label.size()[0] return final_loss class", "x h x w # label shape batch_size x 1", "(label != ignore_label) n_valid = torch.sum(mask).item() if self.normalize is not", "- output_gt_zero)) - torch.log( 1 + torch.exp(output - 2 *", "This function returns cross entropy loss for semantic segmentation \"\"\"", "out, label): mask = (label != 255) return self.loss(torch.masked_select(out, mask),", "= self.pos_weight output_gt_zero = torch.ge(output, 0).float() loss_val = torch.mul(output, (labels", "= size_average self.batch_average = batch_average def forward(self, output, label, void_pixels=None):", "bottom): qn = torch.norm(bottom, p=2, dim=1).unsqueeze(dim=1) + 1e-12 top =", "License: Attribution-NonCommercial 4.0 International import torch import torch.nn as nn", "= torch.sum(labels) num_labels_neg = torch.sum(1.0 - labels) num_total = num_labels_pos", "0.5).float() output_gt_zero = torch.ge(output, 0).float() loss_val = torch.mul(output, (labels -", "return self.loss(torch.masked_select(out, mask), torch.masked_select(label, mask)) class Normalize(nn.Module): def __init__(self): super(Normalize,", "if void_pixels is not None: w_void = torch.le(void_pixels, 0.5).float() loss_pos_pix", "batch_average self.pos_weight = pos_weight def forward(self, output, label, void_pixels=None): assert", "w_void = torch.le(void_pixels, 0.5).float() loss_pos_pix = torch.mul(w_void, loss_pos_pix) loss_neg_pix =", "= num_labels_neg / num_total else: w = self.pos_weight output_gt_zero =", "Loss for depth prediction. By default L1 loss is used.", "== 2: print('Using L2 loss for surface normals') self.loss_func =", "from torch.nn.modules.module import Module import numpy as np class SoftMaxwithLoss(Module):", "x 1 x h x w label = label[:, 0,", "NotImplementedError('Loss {} currently not supported in DepthLoss'.format(loss)) def forward(self, out,", "code is referenced from # https://github.com/facebookresearch/astmt/ # # Copyright (c)", "referenced from # https://github.com/facebookresearch/astmt/ # # Copyright (c) Facebook, Inc.", "self.criterion(self.softmax(out), label) return loss class BalancedCrossEntropyLoss(Module): \"\"\" Balanced Cross Entropy", "void_pixels is not None and not self.pos_weight: w_void = torch.le(void_pixels,", "loss = self.criterion(self.softmax(out), label) return loss class BalancedCrossEntropyLoss(Module): \"\"\" Balanced", "ignore_label: ret_loss = torch.div(loss, max(n_valid, 1e-6)) return ret_loss else: ret_loss", "label.size()[0] return final_loss class BinaryCrossEntropyLoss(Module): \"\"\" Binary Cross Entropy with", "dim=1).unsqueeze(dim=1) + 1e-12 top = bottom.div(qn) return top class NormalsLoss(Module):", "= torch.sum(loss_pos_pix) loss_neg = torch.sum(loss_neg_pix) final_loss = loss_pos + loss_neg", "# License: Attribution-NonCommercial 4.0 International import torch import torch.nn as", "w # label shape batch_size x 1 x h x", "nn import torch.nn.functional as F from torch.nn.modules.module import Module import", "for depth prediction. By default L1 loss is used. \"\"\"", "# Weighting of the loss, default is HED-style if self.pos_weight", "if self.normalize is not None: out_norm = self.normalize(out) loss =", "if ignore_label: ret_loss = torch.div(loss, max(n_valid, 1e-6)) return ret_loss else:", "ignore regions \"\"\" def __init__(self, size_average=True, batch_average=True, pos_weight=None): super(BalancedCrossEntropyLoss, self).__init__()", "def forward(self, out, label): mask = (label != 255) return", "num_total loss_pos = torch.sum(loss_pos_pix) loss_neg = torch.sum(loss_neg_pix) final_loss = w", "1 + torch.exp(output - 2 * torch.mul(output, output_gt_zero))) loss_pos_pix =", "self).__init__() if loss == 'l1': self.loss = nn.L1Loss() else: raise", "self.pos_weight: w_void = torch.le(void_pixels, 0.5).float() loss_pos_pix = torch.mul(w_void, loss_pos_pix) loss_neg_pix", "x channels x h x w # label shape batch_size", "super(Normalize, self).__init__() def forward(self, bottom): qn = torch.norm(bottom, p=2, dim=1).unsqueeze(dim=1)", "with optional ignore regions \"\"\" def __init__(self, size_average=True, batch_average=True, pos_weight=None):", "= (label != 255) return self.loss(torch.masked_select(out, mask), torch.masked_select(label, mask)) class", "reduction='sum') else: loss = self.loss_func(torch.masked_select(out, mask), torch.masked_select(label, mask), reduction='sum') if", "BalancedCrossEntropyLoss(Module): \"\"\" Balanced Cross Entropy Loss with optional ignore regions", "batch_average=True, pos_weight=None): super(BalancedCrossEntropyLoss, self).__init__() self.size_average = size_average self.batch_average = batch_average", "= num_labels_neg / num_total loss_pos = torch.sum(loss_pos_pix) loss_neg = torch.sum(loss_neg_pix)", "class NormalsLoss(Module): \"\"\" L1 loss with ignore labels normalize: normalization", "shape batch_size x channels x h x w # label", "= batch_average def forward(self, output, label, void_pixels=None): assert (output.size() ==", "and its affiliates. # All rights reserved. # # License:", "Normalize(nn.Module): def __init__(self): super(Normalize, self).__init__() def forward(self, bottom): qn =", "= torch.mul(output, (labels - output_gt_zero)) - torch.log( 1 + torch.exp(output", "= (label != ignore_label) n_valid = torch.sum(mask).item() if self.normalize is", "= size_average if normalize: self.normalize = Normalize() else: self.normalize =", "loss is used. \"\"\" def __init__(self, loss='l1'): super(DepthLoss, self).__init__() if", "= -torch.mul(1.0 - labels, loss_val) if void_pixels is not None:", "else: raise NotImplementedError def forward(self, out, label, ignore_label=255): assert not", "= num_labels_pos + num_labels_neg w = num_labels_neg / num_total else:", "super(SoftMaxwithLoss, self).__init__() self.softmax = nn.LogSoftmax(dim=1) self.criterion = nn.NLLLoss(ignore_index=255) def forward(self,", "= -torch.mul(1.0 - labels, loss_val) if void_pixels is not None", "# This code is referenced from # https://github.com/facebookresearch/astmt/ # #", "All rights reserved. # # License: Attribution-NonCommercial 4.0 International import", "self.normalize(out) loss = self.loss_func(torch.masked_select(out_norm, mask), torch.masked_select(label, mask), reduction='sum') else: loss", "torch.div(loss, max(n_valid, 1e-6)) return ret_loss else: ret_loss = torch.div(loss, float(np.prod(label.size())))", "label.size()) labels = torch.ge(label, 0.5).float() output_gt_zero = torch.ge(output, 0).float() loss_val", "= bottom.div(qn) return top class NormalsLoss(Module): \"\"\" L1 loss with", "def forward(self, output, label, void_pixels=None): assert (output.size() == label.size()) labels", "in DepthLoss'.format(loss)) def forward(self, out, label): mask = (label !=", "torch.mul(w_void, loss_pos_pix) loss_neg_pix = torch.mul(w_void, loss_neg_pix) num_total = num_total -", "out, label, ignore_label=255): assert not label.requires_grad mask = (label !=", "torch.sum(loss_pos_pix) loss_neg = torch.sum(loss_neg_pix) final_loss = w * loss_pos +", "= self.criterion(self.softmax(out), label) return loss class BalancedCrossEntropyLoss(Module): \"\"\" Balanced Cross", "__init__(self): super(Normalize, self).__init__() def forward(self, bottom): qn = torch.norm(bottom, p=2,", ":].long() loss = self.criterion(self.softmax(out), label) return loss class BalancedCrossEntropyLoss(Module): \"\"\"", "'l1': self.loss = nn.L1Loss() else: raise NotImplementedError('Loss {} currently not", "loss class BalancedCrossEntropyLoss(Module): \"\"\" Balanced Cross Entropy Loss with optional", "+ num_labels_neg w = num_labels_neg / num_total else: w =", "\"\"\" Loss for depth prediction. By default L1 loss is", "= self.loss_func(torch.masked_select(out, mask), torch.masked_select(label, mask), reduction='sum') if self.size_average: if ignore_label:", "cross entropy loss for semantic segmentation \"\"\" def __init__(self): super(SoftMaxwithLoss,", "for surface normals') self.loss_func = F.mse_loss else: raise NotImplementedError def", "loss for surface normals') self.loss_func = F.l1_loss elif norm ==", "loss == 'l1': self.loss = nn.L1Loss() else: raise NotImplementedError('Loss {}", "x w # label shape batch_size x 1 x h", "loss='l1'): super(DepthLoss, self).__init__() if loss == 'l1': self.loss = nn.L1Loss()", "NormalsLoss(Module): \"\"\" L1 loss with ignore labels normalize: normalization for", "with ignore labels normalize: normalization for surface normals \"\"\" def", "loss_pos = torch.sum(loss_pos_pix) loss_neg = torch.sum(loss_neg_pix) final_loss = w *", "import torch.nn as nn import torch.nn.functional as F from torch.nn.modules.module", "optional ignore regions \"\"\" def __init__(self, size_average=True, batch_average=True, pos_weight=None): super(BalancedCrossEntropyLoss,", "torch.mul(output, output_gt_zero))) loss_pos_pix = -torch.mul(labels, loss_val) loss_neg_pix = -torch.mul(1.0 -", "= torch.mul(w_void, loss_pos_pix) loss_neg_pix = torch.mul(w_void, loss_neg_pix) loss_pos = torch.sum(loss_pos_pix)", "= size_average self.batch_average = batch_average self.pos_weight = pos_weight def forward(self,", "not None and not self.pos_weight: w_void = torch.le(void_pixels, 0.5).float() loss_pos_pix", "ret_loss else: ret_loss = torch.div(loss, float(np.prod(label.size()))) return ret_loss return loss", "not None: w_void = torch.le(void_pixels, 0.5).float() loss_pos_pix = torch.mul(w_void, loss_pos_pix)", "HED-style if self.pos_weight is None: num_labels_pos = torch.sum(labels) num_labels_neg =", "0.5).float().sum() w = num_labels_neg / num_total loss_pos = torch.sum(loss_pos_pix) loss_neg", "float(np.prod(label.size())) elif self.batch_average: final_loss /= label.size()[0] return final_loss class BinaryCrossEntropyLoss(Module):", "By default L1 loss is used. \"\"\" def __init__(self, loss='l1'):", "loss_pos = torch.sum(loss_pos_pix) loss_neg = torch.sum(loss_neg_pix) final_loss = loss_pos +", "its affiliates. # All rights reserved. # # License: Attribution-NonCommercial", "= num_total - torch.ge(void_pixels, 0.5).float().sum() w = num_labels_neg / num_total", "__init__(self, size_average=True, batch_average=True, pos_weight=None): super(BalancedCrossEntropyLoss, self).__init__() self.size_average = size_average self.batch_average", "super(NormalsLoss, self).__init__() self.size_average = size_average if normalize: self.normalize = Normalize()", "loss_neg_pix) num_total = num_total - torch.ge(void_pixels, 0.5).float().sum() w = num_labels_neg", "BinaryCrossEntropyLoss(Module): \"\"\" Binary Cross Entropy with ignore regions, not balanced.", "normalize: normalization for surface normals \"\"\" def __init__(self, size_average=True, normalize=False,", "1e-12 top = bottom.div(qn) return top class NormalsLoss(Module): \"\"\" L1", "= torch.ge(label, 0.5).float() output_gt_zero = torch.ge(output, 0).float() loss_val = torch.mul(output,", "w label = label[:, 0, :, :].long() loss = self.criterion(self.softmax(out),", "label.requires_grad mask = (label != ignore_label) n_valid = torch.sum(mask).item() if", "# # License: Attribution-NonCommercial 4.0 International import torch import torch.nn", "numpy as np class SoftMaxwithLoss(Module): \"\"\" This function returns cross", "loss_neg_pix = -torch.mul(1.0 - labels, loss_val) if void_pixels is not", "w) * loss_neg if self.size_average: final_loss /= float(np.prod(label.size())) elif self.batch_average:", "if loss == 'l1': self.loss = nn.L1Loss() else: raise NotImplementedError('Loss", "self.loss_func(torch.masked_select(out, mask), torch.masked_select(label, mask), reduction='sum') if self.size_average: if ignore_label: ret_loss", "loss_pos_pix) loss_neg_pix = torch.mul(w_void, loss_neg_pix) loss_pos = torch.sum(loss_pos_pix) loss_neg =", "* loss_pos + (1 - w) * loss_neg if self.size_average:", "+ (1 - w) * loss_neg if self.size_average: final_loss /=", "output_gt_zero)) - torch.log( 1 + torch.exp(output - 2 * torch.mul(output,", "rights reserved. # # License: Attribution-NonCommercial 4.0 International import torch", "with ignore regions, not balanced. \"\"\" def __init__(self, size_average=True, batch_average=True):", "loss_neg = torch.sum(loss_neg_pix) final_loss = w * loss_pos + (1", "if self.size_average: final_loss /= float(np.prod(label.size())) elif self.batch_average: final_loss /= label.size()[0]", "torch.masked_select(label, mask), reduction='sum') if self.size_average: if ignore_label: ret_loss = torch.div(loss,", "loss_neg_pix) loss_pos = torch.sum(loss_pos_pix) loss_neg = torch.sum(loss_neg_pix) final_loss = loss_pos", "return final_loss class BinaryCrossEntropyLoss(Module): \"\"\" Binary Cross Entropy with ignore", "torch.mul(w_void, loss_neg_pix) loss_pos = torch.sum(loss_pos_pix) loss_neg = torch.sum(loss_neg_pix) final_loss =", "elif self.batch_average: final_loss /= label.size()[0] return final_loss class BinaryCrossEntropyLoss(Module): \"\"\"", "= loss_pos + loss_neg if self.size_average: final_loss /= float(np.prod(label.size())) elif", "is HED-style if self.pos_weight is None: num_labels_pos = torch.sum(labels) num_labels_neg", "loss_val) if void_pixels is not None and not self.pos_weight: w_void", "torch.ge(label, 0.5).float() output_gt_zero = torch.ge(output, 0).float() loss_val = torch.mul(output, (labels", "used. \"\"\" def __init__(self, loss='l1'): super(DepthLoss, self).__init__() if loss ==", "self.size_average = size_average self.batch_average = batch_average self.pos_weight = pos_weight def", "= w * loss_pos + (1 - w) * loss_neg", "max(n_valid, 1e-6)) return ret_loss else: ret_loss = torch.div(loss, float(np.prod(label.size()))) return", "self.batch_average = batch_average def forward(self, output, label, void_pixels=None): assert (output.size()", "= nn.L1Loss() else: raise NotImplementedError('Loss {} currently not supported in", "normalize: self.normalize = Normalize() else: self.normalize = None if norm", "loss_neg = torch.sum(loss_neg_pix) final_loss = loss_pos + loss_neg if self.size_average:", "0, :, :].long() loss = self.criterion(self.softmax(out), label) return loss class", "self.normalize = None if norm == 1: print('Using L1 loss", "print('Using L2 loss for surface normals') self.loss_func = F.mse_loss else:", "size_average=True, normalize=False, norm=1): super(NormalsLoss, self).__init__() self.size_average = size_average if normalize:", "is not None: out_norm = self.normalize(out) loss = self.loss_func(torch.masked_select(out_norm, mask),", "Cross Entropy Loss with optional ignore regions \"\"\" def __init__(self,", "= F.l1_loss elif norm == 2: print('Using L2 loss for", "batch_size x channels x h x w # label shape", "self.loss_func = F.l1_loss elif norm == 2: print('Using L2 loss", "{} currently not supported in DepthLoss'.format(loss)) def forward(self, out, label):", "nn.NLLLoss(ignore_index=255) def forward(self, out, label): assert not label.requires_grad # out", "else: loss = self.loss_func(torch.masked_select(out, mask), torch.masked_select(label, mask), reduction='sum') if self.size_average:", "mask), reduction='sum') else: loss = self.loss_func(torch.masked_select(out, mask), torch.masked_select(label, mask), reduction='sum')", "loss_val) loss_neg_pix = -torch.mul(1.0 - labels, loss_val) if void_pixels is", "is referenced from # https://github.com/facebookresearch/astmt/ # # Copyright (c) Facebook,", "ignore_label=255): assert not label.requires_grad mask = (label != ignore_label) n_valid", "(labels - output_gt_zero)) - torch.log( 1 + torch.exp(output - 2", "- torch.log( 1 + torch.exp(output - 2 * torch.mul(output, output_gt_zero)))", "self).__init__() self.size_average = size_average self.batch_average = batch_average self.pos_weight = pos_weight", "\"\"\" def __init__(self, loss='l1'): super(DepthLoss, self).__init__() if loss == 'l1':", "== label.size()) labels = torch.ge(label, 0.5).float() # Weighting of the", "-torch.mul(labels, loss_val) loss_neg_pix = -torch.mul(1.0 - labels, loss_val) if void_pixels", "= Normalize() else: self.normalize = None if norm == 1:", "self.size_average: if ignore_label: ret_loss = torch.div(loss, max(n_valid, 1e-6)) return ret_loss", "torch.sum(mask).item() if self.normalize is not None: out_norm = self.normalize(out) loss", "norm=1): super(NormalsLoss, self).__init__() self.size_average = size_average if normalize: self.normalize =", "def forward(self, out, label): assert not label.requires_grad # out shape", "1: print('Using L1 loss for surface normals') self.loss_func = F.l1_loss", "self.loss_func = F.mse_loss else: raise NotImplementedError def forward(self, out, label,", "# # Copyright (c) Facebook, Inc. and its affiliates. #", "0.5).float() loss_pos_pix = torch.mul(w_void, loss_pos_pix) loss_neg_pix = torch.mul(w_void, loss_neg_pix) loss_pos", "elif norm == 2: print('Using L2 loss for surface normals')", "= self.normalize(out) loss = self.loss_func(torch.masked_select(out_norm, mask), torch.masked_select(label, mask), reduction='sum') else:", "= torch.norm(bottom, p=2, dim=1).unsqueeze(dim=1) + 1e-12 top = bottom.div(qn) return", "reserved. # # License: Attribution-NonCommercial 4.0 International import torch import", "torch.sum(loss_neg_pix) final_loss = w * loss_pos + (1 - w)", "DepthLoss(nn.Module): \"\"\" Loss for depth prediction. By default L1 loss", "- labels, loss_val) if void_pixels is not None: w_void =", "if normalize: self.normalize = Normalize() else: self.normalize = None if", "= torch.sum(loss_neg_pix) final_loss = loss_pos + loss_neg if self.size_average: final_loss", "\"\"\" def __init__(self, size_average=True, normalize=False, norm=1): super(NormalsLoss, self).__init__() self.size_average =", "F.l1_loss elif norm == 2: print('Using L2 loss for surface", "as F from torch.nn.modules.module import Module import numpy as np", "= torch.mul(w_void, loss_pos_pix) loss_neg_pix = torch.mul(w_void, loss_neg_pix) num_total = num_total", "self).__init__() self.size_average = size_average if normalize: self.normalize = Normalize() else:", "NotImplementedError def forward(self, out, label, ignore_label=255): assert not label.requires_grad mask", "* loss_neg if self.size_average: final_loss /= float(np.prod(label.size())) elif self.batch_average: final_loss", "labels, loss_val) if void_pixels is not None: w_void = torch.le(void_pixels,", "torch.nn.modules.module import Module import numpy as np class SoftMaxwithLoss(Module): \"\"\"", "= label[:, 0, :, :].long() loss = self.criterion(self.softmax(out), label) return", "torch.sum(loss_neg_pix) final_loss = loss_pos + loss_neg if self.size_average: final_loss /=", "num_total - torch.ge(void_pixels, 0.5).float().sum() w = num_labels_neg / num_total loss_pos", "torch.sum(1.0 - labels) num_total = num_labels_pos + num_labels_neg w =", "import numpy as np class SoftMaxwithLoss(Module): \"\"\" This function returns", "final_loss = loss_pos + loss_neg if self.size_average: final_loss /= float(np.prod(label.size()))", "= torch.le(void_pixels, 0.5).float() loss_pos_pix = torch.mul(w_void, loss_pos_pix) loss_neg_pix = torch.mul(w_void,", "assert not label.requires_grad mask = (label != ignore_label) n_valid =", "normals') self.loss_func = F.l1_loss elif norm == 2: print('Using L2", "(c) Facebook, Inc. and its affiliates. # All rights reserved.", "raise NotImplementedError def forward(self, out, label, ignore_label=255): assert not label.requires_grad", "not balanced. \"\"\" def __init__(self, size_average=True, batch_average=True): super(BinaryCrossEntropyLoss, self).__init__() self.size_average", "num_labels_neg / num_total loss_pos = torch.sum(loss_pos_pix) loss_neg = torch.sum(loss_neg_pix) final_loss", "SoftMaxwithLoss(Module): \"\"\" This function returns cross entropy loss for semantic", "def __init__(self, size_average=True, batch_average=True, pos_weight=None): super(BalancedCrossEntropyLoss, self).__init__() self.size_average = size_average", "self).__init__() self.softmax = nn.LogSoftmax(dim=1) self.criterion = nn.NLLLoss(ignore_index=255) def forward(self, out,", "float(np.prod(label.size())) elif self.batch_average: final_loss /= label.size()[0] return final_loss class DepthLoss(nn.Module):", "L2 loss for surface normals') self.loss_func = F.mse_loss else: raise", "Loss with optional ignore regions \"\"\" def __init__(self, size_average=True, batch_average=True,", "= torch.sum(loss_neg_pix) final_loss = w * loss_pos + (1 -", "self.batch_average = batch_average self.pos_weight = pos_weight def forward(self, output, label,", "is None: num_labels_pos = torch.sum(labels) num_labels_neg = torch.sum(1.0 - labels)", "def __init__(self, size_average=True, normalize=False, norm=1): super(NormalsLoss, self).__init__() self.size_average = size_average", "# Copyright (c) Facebook, Inc. and its affiliates. # All", "Module import numpy as np class SoftMaxwithLoss(Module): \"\"\" This function", "torch.mul(output, (labels - output_gt_zero)) - torch.log( 1 + torch.exp(output -", "# All rights reserved. # # License: Attribution-NonCommercial 4.0 International", "= torch.div(loss, max(n_valid, 1e-6)) return ret_loss else: ret_loss = torch.div(loss,", "assert (output.size() == label.size()) labels = torch.ge(label, 0.5).float() # Weighting", "4.0 International import torch import torch.nn as nn import torch.nn.functional", "+ loss_neg if self.size_average: final_loss /= float(np.prod(label.size())) elif self.batch_average: final_loss", "None: num_labels_pos = torch.sum(labels) num_labels_neg = torch.sum(1.0 - labels) num_total", "None if norm == 1: print('Using L1 loss for surface", "label = label[:, 0, :, :].long() loss = self.criterion(self.softmax(out), label)", "def __init__(self, size_average=True, batch_average=True): super(BinaryCrossEntropyLoss, self).__init__() self.size_average = size_average self.batch_average", "= self.loss_func(torch.masked_select(out_norm, mask), torch.masked_select(label, mask), reduction='sum') else: loss = self.loss_func(torch.masked_select(out,", "pos_weight=None): super(BalancedCrossEntropyLoss, self).__init__() self.size_average = size_average self.batch_average = batch_average self.pos_weight", "/ num_total else: w = self.pos_weight output_gt_zero = torch.ge(output, 0).float()", "self.loss(torch.masked_select(out, mask), torch.masked_select(label, mask)) class Normalize(nn.Module): def __init__(self): super(Normalize, self).__init__()", "None: w_void = torch.le(void_pixels, 0.5).float() loss_pos_pix = torch.mul(w_void, loss_pos_pix) loss_neg_pix", "labels = torch.ge(label, 0.5).float() # Weighting of the loss, default", "forward(self, out, label, ignore_label=255): assert not label.requires_grad mask = (label", "affiliates. # All rights reserved. # # License: Attribution-NonCommercial 4.0", "bottom.div(qn) return top class NormalsLoss(Module): \"\"\" L1 loss with ignore", "w = self.pos_weight output_gt_zero = torch.ge(output, 0).float() loss_val = torch.mul(output,", "loss_val = torch.mul(output, (labels - output_gt_zero)) - torch.log( 1 +", "def __init__(self): super(SoftMaxwithLoss, self).__init__() self.softmax = nn.LogSoftmax(dim=1) self.criterion = nn.NLLLoss(ignore_index=255)", "norm == 2: print('Using L2 loss for surface normals') self.loss_func", "self.normalize is not None: out_norm = self.normalize(out) loss = self.loss_func(torch.masked_select(out_norm,", "loss = self.loss_func(torch.masked_select(out, mask), torch.masked_select(label, mask), reduction='sum') if self.size_average: if", "self.size_average: final_loss /= float(np.prod(label.size())) elif self.batch_average: final_loss /= label.size()[0] return", "output_gt_zero = torch.ge(output, 0).float() loss_val = torch.mul(output, (labels - output_gt_zero))", "loss_pos_pix) loss_neg_pix = torch.mul(w_void, loss_neg_pix) num_total = num_total - torch.ge(void_pixels,", "pos_weight def forward(self, output, label, void_pixels=None): assert (output.size() == label.size())", "output_gt_zero))) loss_pos_pix = -torch.mul(labels, loss_val) loss_neg_pix = -torch.mul(1.0 - labels,", "# label shape batch_size x 1 x h x w", "assert (output.size() == label.size()) labels = torch.ge(label, 0.5).float() output_gt_zero =", "\"\"\" def __init__(self): super(SoftMaxwithLoss, self).__init__() self.softmax = nn.LogSoftmax(dim=1) self.criterion =", "and not self.pos_weight: w_void = torch.le(void_pixels, 0.5).float() loss_pos_pix = torch.mul(w_void,", "of the loss, default is HED-style if self.pos_weight is None:", "labels) num_total = num_labels_pos + num_labels_neg w = num_labels_neg /", "void_pixels is not None: w_void = torch.le(void_pixels, 0.5).float() loss_pos_pix =", "assert not label.requires_grad # out shape batch_size x channels x", "num_labels_neg / num_total else: w = self.pos_weight output_gt_zero = torch.ge(output,", "surface normals') self.loss_func = F.l1_loss elif norm == 2: print('Using", "labels = torch.ge(label, 0.5).float() output_gt_zero = torch.ge(output, 0).float() loss_val =", "F from torch.nn.modules.module import Module import numpy as np class", "# out shape batch_size x channels x h x w", "L1 loss for surface normals') self.loss_func = F.l1_loss elif norm", "- labels) num_total = num_labels_pos + num_labels_neg w = num_labels_neg", "This code is referenced from # https://github.com/facebookresearch/astmt/ # # Copyright", "final_loss class DepthLoss(nn.Module): \"\"\" Loss for depth prediction. By default", "-torch.mul(1.0 - labels, loss_val) if void_pixels is not None and", "normals \"\"\" def __init__(self, size_average=True, normalize=False, norm=1): super(NormalsLoss, self).__init__() self.size_average", "loss_neg_pix = torch.mul(w_void, loss_neg_pix) loss_pos = torch.sum(loss_pos_pix) loss_neg = torch.sum(loss_neg_pix)", "torch.masked_select(label, mask)) class Normalize(nn.Module): def __init__(self): super(Normalize, self).__init__() def forward(self,", "International import torch import torch.nn as nn import torch.nn.functional as", "class Normalize(nn.Module): def __init__(self): super(Normalize, self).__init__() def forward(self, bottom): qn", "def __init__(self, loss='l1'): super(DepthLoss, self).__init__() if loss == 'l1': self.loss", "class DepthLoss(nn.Module): \"\"\" Loss for depth prediction. By default L1", "self).__init__() def forward(self, bottom): qn = torch.norm(bottom, p=2, dim=1).unsqueeze(dim=1) +", "super(BalancedCrossEntropyLoss, self).__init__() self.size_average = size_average self.batch_average = batch_average self.pos_weight =", "torch.sum(labels) num_labels_neg = torch.sum(1.0 - labels) num_total = num_labels_pos +", "mask), torch.masked_select(label, mask), reduction='sum') if self.size_average: if ignore_label: ret_loss =", "regions \"\"\" def __init__(self, size_average=True, batch_average=True, pos_weight=None): super(BalancedCrossEntropyLoss, self).__init__() self.size_average", "class BalancedCrossEntropyLoss(Module): \"\"\" Balanced Cross Entropy Loss with optional ignore", "+ 1e-12 top = bottom.div(qn) return top class NormalsLoss(Module): \"\"\"", "not label.requires_grad mask = (label != ignore_label) n_valid = torch.sum(mask).item()", "forward(self, out, label): assert not label.requires_grad # out shape batch_size", "2: print('Using L2 loss for surface normals') self.loss_func = F.mse_loss", "w = num_labels_neg / num_total else: w = self.pos_weight output_gt_zero", "import torch.nn.functional as F from torch.nn.modules.module import Module import numpy", "super(DepthLoss, self).__init__() if loss == 'l1': self.loss = nn.L1Loss() else:", "def __init__(self): super(Normalize, self).__init__() def forward(self, bottom): qn = torch.norm(bottom,", "import Module import numpy as np class SoftMaxwithLoss(Module): \"\"\" This", "Weighting of the loss, default is HED-style if self.pos_weight is", "Attribution-NonCommercial 4.0 International import torch import torch.nn as nn import", "super(BinaryCrossEntropyLoss, self).__init__() self.size_average = size_average self.batch_average = batch_average def forward(self,", "num_labels_neg = torch.sum(1.0 - labels) num_total = num_labels_pos + num_labels_neg", "= torch.sum(loss_pos_pix) loss_neg = torch.sum(loss_neg_pix) final_loss = w * loss_pos", "balanced. \"\"\" def __init__(self, size_average=True, batch_average=True): super(BinaryCrossEntropyLoss, self).__init__() self.size_average =", "normalize=False, norm=1): super(NormalsLoss, self).__init__() self.size_average = size_average if normalize: self.normalize", "1e-6)) return ret_loss else: ret_loss = torch.div(loss, float(np.prod(label.size()))) return ret_loss", "semantic segmentation \"\"\" def __init__(self): super(SoftMaxwithLoss, self).__init__() self.softmax = nn.LogSoftmax(dim=1)", "Binary Cross Entropy with ignore regions, not balanced. \"\"\" def", "F.mse_loss else: raise NotImplementedError def forward(self, out, label, ignore_label=255): assert", "\"\"\" Binary Cross Entropy with ignore regions, not balanced. \"\"\"", "entropy loss for semantic segmentation \"\"\" def __init__(self): super(SoftMaxwithLoss, self).__init__()", "(output.size() == label.size()) labels = torch.ge(label, 0.5).float() output_gt_zero = torch.ge(output,", "batch_average=True): super(BinaryCrossEntropyLoss, self).__init__() self.size_average = size_average self.batch_average = batch_average def", "final_loss /= float(np.prod(label.size())) elif self.batch_average: final_loss /= label.size()[0] return final_loss", "label.size()) labels = torch.ge(label, 0.5).float() # Weighting of the loss,", "Facebook, Inc. and its affiliates. # All rights reserved. #", "1 x h x w label = label[:, 0, :,", "\"\"\" Balanced Cross Entropy Loss with optional ignore regions \"\"\"", "np class SoftMaxwithLoss(Module): \"\"\" This function returns cross entropy loss", "= torch.ge(output, 0).float() loss_val = torch.mul(output, (labels - output_gt_zero)) -", "out shape batch_size x channels x h x w #", "else: raise NotImplementedError('Loss {} currently not supported in DepthLoss'.format(loss)) def", "torch.ge(output, 0).float() loss_val = torch.mul(output, (labels - output_gt_zero)) - torch.log(", "__init__(self): super(SoftMaxwithLoss, self).__init__() self.softmax = nn.LogSoftmax(dim=1) self.criterion = nn.NLLLoss(ignore_index=255) def", "__init__(self, size_average=True, normalize=False, norm=1): super(NormalsLoss, self).__init__() self.size_average = size_average if", "for surface normals \"\"\" def __init__(self, size_average=True, normalize=False, norm=1): super(NormalsLoss,", "forward(self, out, label): mask = (label != 255) return self.loss(torch.masked_select(out,", "== 1: print('Using L1 loss for surface normals') self.loss_func =", "elif self.batch_average: final_loss /= label.size()[0] return final_loss class DepthLoss(nn.Module): \"\"\"", "\"\"\" L1 loss with ignore labels normalize: normalization for surface", "torch.mul(w_void, loss_pos_pix) loss_neg_pix = torch.mul(w_void, loss_neg_pix) loss_pos = torch.sum(loss_pos_pix) loss_neg", "reduction='sum') if self.size_average: if ignore_label: ret_loss = torch.div(loss, max(n_valid, 1e-6))", "Inc. and its affiliates. # All rights reserved. # #", "label, ignore_label=255): assert not label.requires_grad mask = (label != ignore_label)", "(1 - w) * loss_neg if self.size_average: final_loss /= float(np.prod(label.size()))", "size_average self.batch_average = batch_average def forward(self, output, label, void_pixels=None): assert", ":, :].long() loss = self.criterion(self.softmax(out), label) return loss class BalancedCrossEntropyLoss(Module):", "self.pos_weight output_gt_zero = torch.ge(output, 0).float() loss_val = torch.mul(output, (labels -", "label.size()[0] return final_loss class DepthLoss(nn.Module): \"\"\" Loss for depth prediction.", "mask), torch.masked_select(label, mask)) class Normalize(nn.Module): def __init__(self): super(Normalize, self).__init__() def", "- w) * loss_neg if self.size_average: final_loss /= float(np.prod(label.size())) elif", "p=2, dim=1).unsqueeze(dim=1) + 1e-12 top = bottom.div(qn) return top class", "num_labels_neg w = num_labels_neg / num_total else: w = self.pos_weight", "print('Using L1 loss for surface normals') self.loss_func = F.l1_loss elif", "w = num_labels_neg / num_total loss_pos = torch.sum(loss_pos_pix) loss_neg =", "norm == 1: print('Using L1 loss for surface normals') self.loss_func", "loss_val) if void_pixels is not None: w_void = torch.le(void_pixels, 0.5).float()", "normals') self.loss_func = F.mse_loss else: raise NotImplementedError def forward(self, out,", "as nn import torch.nn.functional as F from torch.nn.modules.module import Module", "surface normals \"\"\" def __init__(self, size_average=True, normalize=False, norm=1): super(NormalsLoss, self).__init__()", "top = bottom.div(qn) return top class NormalsLoss(Module): \"\"\" L1 loss", "self.size_average = size_average if normalize: self.normalize = Normalize() else: self.normalize", "out_norm = self.normalize(out) loss = self.loss_func(torch.masked_select(out_norm, mask), torch.masked_select(label, mask), reduction='sum')", "ignore regions, not balanced. \"\"\" def __init__(self, size_average=True, batch_average=True): super(BinaryCrossEntropyLoss,", "torch.masked_select(label, mask), reduction='sum') else: loss = self.loss_func(torch.masked_select(out, mask), torch.masked_select(label, mask),", "x h x w label = label[:, 0, :, :].long()", "return ret_loss else: ret_loss = torch.div(loss, float(np.prod(label.size()))) return ret_loss return", "= torch.mul(w_void, loss_neg_pix) num_total = num_total - torch.ge(void_pixels, 0.5).float().sum() w", "# https://github.com/facebookresearch/astmt/ # # Copyright (c) Facebook, Inc. and its", "self.pos_weight is None: num_labels_pos = torch.sum(labels) num_labels_neg = torch.sum(1.0 -", "final_loss = w * loss_pos + (1 - w) *", "/= label.size()[0] return final_loss class DepthLoss(nn.Module): \"\"\" Loss for depth", "L1 loss with ignore labels normalize: normalization for surface normals", "prediction. By default L1 loss is used. \"\"\" def __init__(self,", "num_labels_pos = torch.sum(labels) num_labels_neg = torch.sum(1.0 - labels) num_total =", "torch import torch.nn as nn import torch.nn.functional as F from", "self.size_average = size_average self.batch_average = batch_average def forward(self, output, label,", "not supported in DepthLoss'.format(loss)) def forward(self, out, label): mask =", "= F.mse_loss else: raise NotImplementedError def forward(self, out, label, ignore_label=255):", "= nn.LogSoftmax(dim=1) self.criterion = nn.NLLLoss(ignore_index=255) def forward(self, out, label): assert", "label.requires_grad # out shape batch_size x channels x h x", "= torch.sum(1.0 - labels) num_total = num_labels_pos + num_labels_neg w", "0).float() loss_val = torch.mul(output, (labels - output_gt_zero)) - torch.log( 1", "None and not self.pos_weight: w_void = torch.le(void_pixels, 0.5).float() loss_pos_pix =", "loss_neg_pix = torch.mul(w_void, loss_neg_pix) num_total = num_total - torch.ge(void_pixels, 0.5).float().sum()", "/ num_total loss_pos = torch.sum(loss_pos_pix) loss_neg = torch.sum(loss_neg_pix) final_loss =", "0.5).float() loss_pos_pix = torch.mul(w_void, loss_pos_pix) loss_neg_pix = torch.mul(w_void, loss_neg_pix) num_total", "is used. \"\"\" def __init__(self, loss='l1'): super(DepthLoss, self).__init__() if loss", "nn.LogSoftmax(dim=1) self.criterion = nn.NLLLoss(ignore_index=255) def forward(self, out, label): assert not", "self.criterion = nn.NLLLoss(ignore_index=255) def forward(self, out, label): assert not label.requires_grad", "loss_pos + (1 - w) * loss_neg if self.size_average: final_loss", "2 * torch.mul(output, output_gt_zero))) loss_pos_pix = -torch.mul(labels, loss_val) loss_neg_pix =", "-torch.mul(1.0 - labels, loss_val) if void_pixels is not None: w_void", "L1 loss is used. \"\"\" def __init__(self, loss='l1'): super(DepthLoss, self).__init__()", "= torch.sum(mask).item() if self.normalize is not None: out_norm = self.normalize(out)", "Balanced Cross Entropy Loss with optional ignore regions \"\"\" def", "mask), torch.masked_select(label, mask), reduction='sum') else: loss = self.loss_func(torch.masked_select(out, mask), torch.masked_select(label,", "else: self.normalize = None if norm == 1: print('Using L1", "255) return self.loss(torch.masked_select(out, mask), torch.masked_select(label, mask)) class Normalize(nn.Module): def __init__(self):", "mask), reduction='sum') if self.size_average: if ignore_label: ret_loss = torch.div(loss, max(n_valid,", "mask)) class Normalize(nn.Module): def __init__(self): super(Normalize, self).__init__() def forward(self, bottom):", "num_total = num_total - torch.ge(void_pixels, 0.5).float().sum() w = num_labels_neg /", "h x w label = label[:, 0, :, :].long() loss", "torch.ge(label, 0.5).float() # Weighting of the loss, default is HED-style", "/= label.size()[0] return final_loss class BinaryCrossEntropyLoss(Module): \"\"\" Binary Cross Entropy", "\"\"\" This function returns cross entropy loss for semantic segmentation", "label): mask = (label != 255) return self.loss(torch.masked_select(out, mask), torch.masked_select(label,", "Copyright (c) Facebook, Inc. and its affiliates. # All rights", "\"\"\" def __init__(self, size_average=True, batch_average=True, pos_weight=None): super(BalancedCrossEntropyLoss, self).__init__() self.size_average =", "= batch_average self.pos_weight = pos_weight def forward(self, output, label, void_pixels=None):", "mask = (label != ignore_label) n_valid = torch.sum(mask).item() if self.normalize", "shape batch_size x 1 x h x w label =", "if self.size_average: if ignore_label: ret_loss = torch.div(loss, max(n_valid, 1e-6)) return", "ret_loss = torch.div(loss, max(n_valid, 1e-6)) return ret_loss else: ret_loss =", "!= 255) return self.loss(torch.masked_select(out, mask), torch.masked_select(label, mask)) class Normalize(nn.Module): def", "w * loss_pos + (1 - w) * loss_neg if", "return top class NormalsLoss(Module): \"\"\" L1 loss with ignore labels", "Cross Entropy with ignore regions, not balanced. \"\"\" def __init__(self,", "else: w = self.pos_weight output_gt_zero = torch.ge(output, 0).float() loss_val =", "surface normals') self.loss_func = F.mse_loss else: raise NotImplementedError def forward(self,", "(label != 255) return self.loss(torch.masked_select(out, mask), torch.masked_select(label, mask)) class Normalize(nn.Module):", "for semantic segmentation \"\"\" def __init__(self): super(SoftMaxwithLoss, self).__init__() self.softmax =", "final_loss class BinaryCrossEntropyLoss(Module): \"\"\" Binary Cross Entropy with ignore regions,", "loss_pos_pix = torch.mul(w_void, loss_pos_pix) loss_neg_pix = torch.mul(w_void, loss_neg_pix) num_total =", "Normalize() else: self.normalize = None if norm == 1: print('Using", "final_loss /= label.size()[0] return final_loss class BinaryCrossEntropyLoss(Module): \"\"\" Binary Cross", "__init__(self, loss='l1'): super(DepthLoss, self).__init__() if loss == 'l1': self.loss =", "self.batch_average: final_loss /= label.size()[0] return final_loss class BinaryCrossEntropyLoss(Module): \"\"\" Binary", "torch.norm(bottom, p=2, dim=1).unsqueeze(dim=1) + 1e-12 top = bottom.div(qn) return top", "num_labels_pos + num_labels_neg w = num_labels_neg / num_total else: w", "as np class SoftMaxwithLoss(Module): \"\"\" This function returns cross entropy", "batch_size x 1 x h x w label = label[:,", "size_average=True, batch_average=True): super(BinaryCrossEntropyLoss, self).__init__() self.size_average = size_average self.batch_average = batch_average", "the loss, default is HED-style if self.pos_weight is None: num_labels_pos", "class SoftMaxwithLoss(Module): \"\"\" This function returns cross entropy loss for", "channels x h x w # label shape batch_size x", "if void_pixels is not None and not self.pos_weight: w_void =" ]
[ "* (z_k[i+1]-2*z_k[i]+z_k[i-1]) z = np.asarray(z) x, y = np.meshgrid(x, y)", "= plt.figure() ax = fig.gca(projection='3d') surf = ax.plot_surface(x, y, z,", "in range(PASSOS+1): z_k = np.copy(z_temp) z.append(z_k) for i in range(1,", "np import os import contorno from constantes import INTERVALOS, PASSOS,", "DELTA_X z_temp = contorno.p_3 TAMANHO_BARRA = 2 x = np.linspace(0.0,", "INTERVALOS+1) y = np.linspace(0.0, DELTA_T, PASSOS+1) z = [] for", "z_temp = contorno.p_3 TAMANHO_BARRA = 2 x = np.linspace(0.0, TAMANHO_BARRA,", "= fig.gca(projection='3d') surf = ax.plot_surface(x, y, z, cmap=cm.coolwarm, antialiased=False) ax.set_xlabel('x')", "range(1, INTERVALOS): z_temp[i] = z_k[i] + (DELTA_T/(DELTA_X**2)) * (z_k[i+1]-2*z_k[i]+z_k[i-1]) z", "cm import numpy as np import os import contorno from", "PASSOS+1) z = [] for k in range(PASSOS+1): z_k =", "= contorno.p_3 TAMANHO_BARRA = 2 x = np.linspace(0.0, TAMANHO_BARRA, INTERVALOS+1)", "TAMANHO_BARRA = 2 x = np.linspace(0.0, TAMANHO_BARRA, INTERVALOS+1) y =", "= 2 x = np.linspace(0.0, TAMANHO_BARRA, INTERVALOS+1) y = np.linspace(0.0,", "y) fig = plt.figure() ax = fig.gca(projection='3d') surf = ax.plot_surface(x,", "contorno from constantes import INTERVALOS, PASSOS, TAMANHO_BARRA, DELTA_T, DELTA_X z_temp", "from mpl_toolkits.mplot3d import Axes3D import matplotlib.pyplot as plt from matplotlib", "os import contorno from constantes import INTERVALOS, PASSOS, TAMANHO_BARRA, DELTA_T,", "TAMANHO_BARRA, DELTA_T, DELTA_X z_temp = contorno.p_3 TAMANHO_BARRA = 2 x", "= np.copy(z_temp) z.append(z_k) for i in range(1, INTERVALOS): z_temp[i] =", "mpl_toolkits.mplot3d import Axes3D import matplotlib.pyplot as plt from matplotlib import", "contorno.p_3 TAMANHO_BARRA = 2 x = np.linspace(0.0, TAMANHO_BARRA, INTERVALOS+1) y", "import contorno from constantes import INTERVALOS, PASSOS, TAMANHO_BARRA, DELTA_T, DELTA_X", "import Axes3D import matplotlib.pyplot as plt from matplotlib import cm", "range(PASSOS+1): z_k = np.copy(z_temp) z.append(z_k) for i in range(1, INTERVALOS):", "ax = fig.gca(projection='3d') surf = ax.plot_surface(x, y, z, cmap=cm.coolwarm, antialiased=False)", "as np import os import contorno from constantes import INTERVALOS,", "fig = plt.figure() ax = fig.gca(projection='3d') surf = ax.plot_surface(x, y,", "(z_k[i+1]-2*z_k[i]+z_k[i-1]) z = np.asarray(z) x, y = np.meshgrid(x, y) fig", "y = np.meshgrid(x, y) fig = plt.figure() ax = fig.gca(projection='3d')", "Axes3D import matplotlib.pyplot as plt from matplotlib import cm import", "[] for k in range(PASSOS+1): z_k = np.copy(z_temp) z.append(z_k) for", "for k in range(PASSOS+1): z_k = np.copy(z_temp) z.append(z_k) for i", "z, cmap=cm.coolwarm, antialiased=False) ax.set_xlabel('x') ax.set_ylabel('t') ax.set_zlabel('T(x,t)') fig.colorbar(surf, shrink=0.5, aspect=5) plt.show()", "k in range(PASSOS+1): z_k = np.copy(z_temp) z.append(z_k) for i in", "np.linspace(0.0, DELTA_T, PASSOS+1) z = [] for k in range(PASSOS+1):", "z = [] for k in range(PASSOS+1): z_k = np.copy(z_temp)", "as plt from matplotlib import cm import numpy as np", "y = np.linspace(0.0, DELTA_T, PASSOS+1) z = [] for k", "+ (DELTA_T/(DELTA_X**2)) * (z_k[i+1]-2*z_k[i]+z_k[i-1]) z = np.asarray(z) x, y =", "import INTERVALOS, PASSOS, TAMANHO_BARRA, DELTA_T, DELTA_X z_temp = contorno.p_3 TAMANHO_BARRA", "np.meshgrid(x, y) fig = plt.figure() ax = fig.gca(projection='3d') surf =", "x = np.linspace(0.0, TAMANHO_BARRA, INTERVALOS+1) y = np.linspace(0.0, DELTA_T, PASSOS+1)", "from constantes import INTERVALOS, PASSOS, TAMANHO_BARRA, DELTA_T, DELTA_X z_temp =", "z_k[i] + (DELTA_T/(DELTA_X**2)) * (z_k[i+1]-2*z_k[i]+z_k[i-1]) z = np.asarray(z) x, y", "TAMANHO_BARRA, INTERVALOS+1) y = np.linspace(0.0, DELTA_T, PASSOS+1) z = []", "PASSOS, TAMANHO_BARRA, DELTA_T, DELTA_X z_temp = contorno.p_3 TAMANHO_BARRA = 2", "z.append(z_k) for i in range(1, INTERVALOS): z_temp[i] = z_k[i] +", "INTERVALOS, PASSOS, TAMANHO_BARRA, DELTA_T, DELTA_X z_temp = contorno.p_3 TAMANHO_BARRA =", "= np.meshgrid(x, y) fig = plt.figure() ax = fig.gca(projection='3d') surf", "= [] for k in range(PASSOS+1): z_k = np.copy(z_temp) z.append(z_k)", "(DELTA_T/(DELTA_X**2)) * (z_k[i+1]-2*z_k[i]+z_k[i-1]) z = np.asarray(z) x, y = np.meshgrid(x,", "plt.figure() ax = fig.gca(projection='3d') surf = ax.plot_surface(x, y, z, cmap=cm.coolwarm,", "in range(1, INTERVALOS): z_temp[i] = z_k[i] + (DELTA_T/(DELTA_X**2)) * (z_k[i+1]-2*z_k[i]+z_k[i-1])", "= np.linspace(0.0, TAMANHO_BARRA, INTERVALOS+1) y = np.linspace(0.0, DELTA_T, PASSOS+1) z", "z_k = np.copy(z_temp) z.append(z_k) for i in range(1, INTERVALOS): z_temp[i]", "import os import contorno from constantes import INTERVALOS, PASSOS, TAMANHO_BARRA,", "surf = ax.plot_surface(x, y, z, cmap=cm.coolwarm, antialiased=False) ax.set_xlabel('x') ax.set_ylabel('t') ax.set_zlabel('T(x,t)')", "plt from matplotlib import cm import numpy as np import", "DELTA_T, PASSOS+1) z = [] for k in range(PASSOS+1): z_k", "import numpy as np import os import contorno from constantes", "numpy as np import os import contorno from constantes import", "fig.gca(projection='3d') surf = ax.plot_surface(x, y, z, cmap=cm.coolwarm, antialiased=False) ax.set_xlabel('x') ax.set_ylabel('t')", "ax.plot_surface(x, y, z, cmap=cm.coolwarm, antialiased=False) ax.set_xlabel('x') ax.set_ylabel('t') ax.set_zlabel('T(x,t)') fig.colorbar(surf, shrink=0.5,", "matplotlib.pyplot as plt from matplotlib import cm import numpy as", "from matplotlib import cm import numpy as np import os", "INTERVALOS): z_temp[i] = z_k[i] + (DELTA_T/(DELTA_X**2)) * (z_k[i+1]-2*z_k[i]+z_k[i-1]) z =", "2 x = np.linspace(0.0, TAMANHO_BARRA, INTERVALOS+1) y = np.linspace(0.0, DELTA_T,", "import matplotlib.pyplot as plt from matplotlib import cm import numpy", "y, z, cmap=cm.coolwarm, antialiased=False) ax.set_xlabel('x') ax.set_ylabel('t') ax.set_zlabel('T(x,t)') fig.colorbar(surf, shrink=0.5, aspect=5)", "constantes import INTERVALOS, PASSOS, TAMANHO_BARRA, DELTA_T, DELTA_X z_temp = contorno.p_3", "= z_k[i] + (DELTA_T/(DELTA_X**2)) * (z_k[i+1]-2*z_k[i]+z_k[i-1]) z = np.asarray(z) x,", "DELTA_T, DELTA_X z_temp = contorno.p_3 TAMANHO_BARRA = 2 x =", "= np.linspace(0.0, DELTA_T, PASSOS+1) z = [] for k in", "z_temp[i] = z_k[i] + (DELTA_T/(DELTA_X**2)) * (z_k[i+1]-2*z_k[i]+z_k[i-1]) z = np.asarray(z)", "matplotlib import cm import numpy as np import os import", "= ax.plot_surface(x, y, z, cmap=cm.coolwarm, antialiased=False) ax.set_xlabel('x') ax.set_ylabel('t') ax.set_zlabel('T(x,t)') fig.colorbar(surf,", "np.copy(z_temp) z.append(z_k) for i in range(1, INTERVALOS): z_temp[i] = z_k[i]", "np.asarray(z) x, y = np.meshgrid(x, y) fig = plt.figure() ax", "= np.asarray(z) x, y = np.meshgrid(x, y) fig = plt.figure()", "i in range(1, INTERVALOS): z_temp[i] = z_k[i] + (DELTA_T/(DELTA_X**2)) *", "np.linspace(0.0, TAMANHO_BARRA, INTERVALOS+1) y = np.linspace(0.0, DELTA_T, PASSOS+1) z =", "for i in range(1, INTERVALOS): z_temp[i] = z_k[i] + (DELTA_T/(DELTA_X**2))", "z = np.asarray(z) x, y = np.meshgrid(x, y) fig =", "import cm import numpy as np import os import contorno", "x, y = np.meshgrid(x, y) fig = plt.figure() ax =" ]
[ "numRows - 1: step = -1 index += step return", "Solution: def convert(self, s: str, numRows: int) -> str: #", "return s L = [''] * numRows index, step =", "string as it is if numRows < 2: return s", "+ row1 +.. numRows row = 0 result = [\"\"]*numRows", "index == 0: step = 1 elif index == numRows", "row-1 return \"\".join(result) if __name__ == '__main__': # begin s", "# Solution 2 class Solution: def convert(self, s: str, numRows:", "row == numRows-1: move_down = False result[row] += character row", "+= character row = (row+1) if move_down else row-1 return", "row # from row = 0 to row = numRows-1,", "Finally after filling up all the four rows we join", "if move_down else row-1 return \"\".join(result) if __name__ == '__main__':", "(i.e. row = numRows-1) # then we move up. Similarly", "elif index == numRows - 1: step = -1 index", "True elif row == numRows-1: move_down = False result[row] +=", "# then we move up. Similarly if we reach top,", "s # We will create an empty string for each", "index == numRows - 1: step = -1 index +=", "all the four rows we join them row0 + row1", "elif row == numRows-1: move_down = False result[row] += character", "the string as it is if numRows < 2: return", "the four rows we join them row0 + row1 +..", "= True elif row == numRows-1: move_down = False result[row]", "== 1 or numRows >= len(s): return s L =", "and move down # Finally after filling up all the", "== numRows-1: move_down = False result[row] += character row =", "if we reach bottom (i.e. row = numRows-1) # then", "= 0 result = [\"\"]*numRows for character in s: if", "== 0: step = 1 elif index == numRows -", "is if numRows < 2: return s # We will", "step = 0, 1 for x in s: L[index] +=", "after filling up all the four rows we join them", "them row0 + row1 +.. numRows row = 0 result", "row = (row+1) if move_down else row-1 return \"\".join(result) if", "1 or numRows >= len(s): return s L = ['']", "only one row then we can return the string as", "numRows-1) # then we move up. Similarly if we reach", "bottom (i.e. row = numRows-1) # then we move up.", "# solution 1: class Solution1: def convert(self, s: str, numRows:", "s L = [''] * numRows index, step = 0,", "index, step = 0, 1 for x in s: L[index]", "for x in s: L[index] += x if index ==", "2: return s # We will create an empty string", "def convert(self, s: str, numRows: int) -> str: if numRows", "or numRows >= len(s): return s L = [''] *", "class Solution: def convert(self, s: str, numRows: int) -> str:", "str: # If we have only one row then we", "Solution 2 class Solution: def convert(self, s: str, numRows: int)", "if index == 0: step = 1 elif index ==", "numRows == 1 or numRows >= len(s): return s L", "int) -> str: if numRows == 1 or numRows >=", "down # Finally after filling up all the four rows", "str, numRows: int) -> str: if numRows == 1 or", "if __name__ == '__main__': # begin s = Solution() print(s.convert(\"PAYPALISHIRING\",", "str, numRows: int) -> str: # If we have only", "we have only one row then we can return the", "we join them row0 + row1 +.. numRows row =", "up all the four rows we join them row0 +", "-1 index += step return ''.join(L) # Solution 2 class", "string for each row and then fill each element in", "+= x if index == 0: step = 1 elif", "= [''] * numRows index, step = 0, 1 for", "class Solution1: def convert(self, s: str, numRows: int) -> str:", "0, 1 for x in s: L[index] += x if", "an empty string for each row and then fill each", "numRows-1: move_down = False result[row] += character row = (row+1)", "numRows: int) -> str: # If we have only one", "= -1 index += step return ''.join(L) # Solution 2", "move up. Similarly if we reach top, we change direction", "# We will create an empty string for each row", "change direction and move down # Finally after filling up", "we reach bottom (i.e. row = numRows-1) # then we", "as it is if numRows < 2: return s #", "0 to row = numRows-1, if we reach bottom (i.e.", "return \"\".join(result) if __name__ == '__main__': # begin s =", "= (row+1) if move_down else row-1 return \"\".join(result) if __name__", "reach bottom (i.e. row = numRows-1) # then we move", ">= len(s): return s L = [''] * numRows index,", "(row+1) if move_down else row-1 return \"\".join(result) if __name__ ==", "numRows < 2: return s # We will create an", "row and then fill each element in each row #", "and then fill each element in each row # from", "convert(self, s: str, numRows: int) -> str: if numRows ==", "1: class Solution1: def convert(self, s: str, numRows: int) ->", "len(s): return s L = [''] * numRows index, step", "if we reach top, we change direction and move down", "one row then we can return the string as it", "it is if numRows < 2: return s # We", "-> str: if numRows == 1 or numRows >= len(s):", "move_down = True elif row == numRows-1: move_down = False", "will create an empty string for each row and then", "from row = 0 to row = numRows-1, if we", "[''] * numRows index, step = 0, 1 for x", "\"\".join(result) if __name__ == '__main__': # begin s = Solution()", "result = [\"\"]*numRows for character in s: if row ==", "element in each row # from row = 0 to", "if row == 0: move_down = True elif row ==", "rows we join them row0 + row1 +.. numRows row", "x if index == 0: step = 1 elif index", "in s: if row == 0: move_down = True elif", "= numRows-1) # then we move up. Similarly if we", "1 for x in s: L[index] += x if index", "= [\"\"]*numRows for character in s: if row == 0:", "if numRows == 1 or numRows >= len(s): return s", "s: str, numRows: int) -> str: # If we have", "four rows we join them row0 + row1 +.. numRows", "= False result[row] += character row = (row+1) if move_down", "numRows: int) -> str: if numRows == 1 or numRows", "we can return the string as it is if numRows", "= 0 to row = numRows-1, if we reach bottom", "in s: L[index] += x if index == 0: step", "+= step return ''.join(L) # Solution 2 class Solution: def", "If we have only one row then we can return", "s: if row == 0: move_down = True elif row", "= numRows-1, if we reach bottom (i.e. row = numRows-1)", "numRows-1, if we reach bottom (i.e. row = numRows-1) #", "reach top, we change direction and move down # Finally", "solution 1: class Solution1: def convert(self, s: str, numRows: int)", "create an empty string for each row and then fill", "-> str: # If we have only one row then", "have only one row then we can return the string", "result[row] += character row = (row+1) if move_down else row-1", "''.join(L) # Solution 2 class Solution: def convert(self, s: str,", "up. Similarly if we reach top, we change direction and", "False result[row] += character row = (row+1) if move_down else", "move down # Finally after filling up all the four", "Similarly if we reach top, we change direction and move", "step = 1 elif index == numRows - 1: step", "= 1 elif index == numRows - 1: step =", "top, we change direction and move down # Finally after", "0: move_down = True elif row == numRows-1: move_down =", "L[index] += x if index == 0: step = 1", "row = 0 result = [\"\"]*numRows for character in s:", "for character in s: if row == 0: move_down =", "we change direction and move down # Finally after filling", "== numRows - 1: step = -1 index += step", "we move up. Similarly if we reach top, we change", "str: if numRows == 1 or numRows >= len(s): return", "# If we have only one row then we can", "row == 0: move_down = True elif row == numRows-1:", "x in s: L[index] += x if index == 0:", "We will create an empty string for each row and", "we reach top, we change direction and move down #", "return s # We will create an empty string for", "< 2: return s # We will create an empty", "in each row # from row = 0 to row", "if numRows < 2: return s # We will create", "fill each element in each row # from row =", "row then we can return the string as it is", "row0 + row1 +.. numRows row = 0 result =", "then we can return the string as it is if", "== 0: move_down = True elif row == numRows-1: move_down", "move_down else row-1 return \"\".join(result) if __name__ == '__main__': #", "for each row and then fill each element in each", "direction and move down # Finally after filling up all", "# from row = 0 to row = numRows-1, if", "L = [''] * numRows index, step = 0, 1", "numRows index, step = 0, 1 for x in s:", "1 elif index == numRows - 1: step = -1", "row1 +.. numRows row = 0 result = [\"\"]*numRows for", "convert(self, s: str, numRows: int) -> str: # If we", "s: str, numRows: int) -> str: if numRows == 1", "return the string as it is if numRows < 2:", "each row # from row = 0 to row =", "index += step return ''.join(L) # Solution 2 class Solution:", "row = numRows-1, if we reach bottom (i.e. row =", "else row-1 return \"\".join(result) if __name__ == '__main__': # begin", "numRows >= len(s): return s L = [''] * numRows", "# Finally after filling up all the four rows we", "empty string for each row and then fill each element", "each row and then fill each element in each row", "filling up all the four rows we join them row0", "row = 0 to row = numRows-1, if we reach", "s: L[index] += x if index == 0: step =", "step = -1 index += step return ''.join(L) # Solution", "- 1: step = -1 index += step return ''.join(L)", "then fill each element in each row # from row", "numRows row = 0 result = [\"\"]*numRows for character in", "Solution1: def convert(self, s: str, numRows: int) -> str: if", "0 result = [\"\"]*numRows for character in s: if row", "int) -> str: # If we have only one row", "row = numRows-1) # then we move up. Similarly if", "step return ''.join(L) # Solution 2 class Solution: def convert(self,", "1: step = -1 index += step return ''.join(L) #", "each element in each row # from row = 0", "0: step = 1 elif index == numRows - 1:", "__name__ == '__main__': # begin s = Solution() print(s.convert(\"PAYPALISHIRING\", 3))", "+.. numRows row = 0 result = [\"\"]*numRows for character", "character in s: if row == 0: move_down = True", "* numRows index, step = 0, 1 for x in", "to row = numRows-1, if we reach bottom (i.e. row", "[\"\"]*numRows for character in s: if row == 0: move_down", "can return the string as it is if numRows <", "join them row0 + row1 +.. numRows row = 0", "def convert(self, s: str, numRows: int) -> str: # If", "return ''.join(L) # Solution 2 class Solution: def convert(self, s:", "2 class Solution: def convert(self, s: str, numRows: int) ->", "then we move up. Similarly if we reach top, we", "character row = (row+1) if move_down else row-1 return \"\".join(result)", "move_down = False result[row] += character row = (row+1) if", "= 0, 1 for x in s: L[index] += x" ]
[ "as tf from tensorflow.keras.layers import Embedding, Dense, LSTM from tensorflow.keras.models", "words in corpus] sent_len = 20 embedded_doc = pad_sequences(onehot_repr,maxlen =", "X_train, X_test, y_train, y_test = train_test_split(X_final,y_final,test_size = 0.33,random_state = 42)", "@author: ASUS \"\"\" import pandas as pd df = pd.read_csv(r'D:\\nlp\\fake-news-data\\train.csv')", "sklearn.metrics import confusion_matrix, accuracy_score cm = confusion_matrix(y_test,y_pred) acc = accuracy_score(y_test,y_pred)", "= Sequential() model.add(Embedding(voc_size,embedding_vector_features,input_length=sent_len)) model.add(LSTM(100)) model.add(Dense(1,activation='sigmoid')) model.compile(loss='binary_crossentropy',optimizer = 'adam',metrics = ['accuracy'])", "= pd.read_csv(r'D:\\nlp\\fake-news-data\\train.csv') df = df.dropna() X = df.drop('label',axis = 1)", "= PorterStemmer() corpus = [] for i in range(len(messages)): print(i)", "utf-8 -*- \"\"\" Created on Thu Feb 11 13:42:45 2021", "True) import nltk import re from nltk.corpus import stopwords #", "# One Hot Representation messages = X.copy() messages.reset_index(inplace = True)", "messages = X.copy() messages.reset_index(inplace = True) import nltk import re", "sent_len,padding = 'pre') # Creating the model embedding_vector_features = 40", "',messages['title'][i]) review = review.lower() review = review.split() review = [ps.stem(word)", "\" \".join(review) corpus.append(review) onehot_repr = [one_hot(words,voc_size) for words in corpus]", "5000 # One Hot Representation messages = X.copy() messages.reset_index(inplace =", "for words in corpus] sent_len = 20 embedded_doc = pad_sequences(onehot_repr,maxlen", "from tensorflow.keras.models import Sequential from tensorflow.keras.preprocessing.sequence import pad_sequences from tensorflow.keras.preprocessing.text", "42) model.fit(X_train,y_train,validation_data=(X_test,y_test),epochs=10,batch_size=64) y_pred = model.predict_classes(X_test) from sklearn.metrics import confusion_matrix, accuracy_score", "= model.predict_classes(X_test) from sklearn.metrics import confusion_matrix, accuracy_score cm = confusion_matrix(y_test,y_pred)", "tensorflow as tf from tensorflow.keras.layers import Embedding, Dense, LSTM from", "review if word not in stopwords.words('english')] review = \" \".join(review)", "pad_sequences from tensorflow.keras.preprocessing.text import one_hot # Vocabulary size voc_size =", "40 model = Sequential() model.add(Embedding(voc_size,embedding_vector_features,input_length=sent_len)) model.add(LSTM(100)) model.add(Dense(1,activation='sigmoid')) model.compile(loss='binary_crossentropy',optimizer = 'adam',metrics", "from tensorflow.keras.preprocessing.sequence import pad_sequences from tensorflow.keras.preprocessing.text import one_hot # Vocabulary", "2021 @author: ASUS \"\"\" import pandas as pd df =", "review = \" \".join(review) corpus.append(review) onehot_repr = [one_hot(words,voc_size) for words", "Embedding, Dense, LSTM from tensorflow.keras.models import Sequential from tensorflow.keras.preprocessing.sequence import", "df = df.dropna() X = df.drop('label',axis = 1) y =", "= 40 model = Sequential() model.add(Embedding(voc_size,embedding_vector_features,input_length=sent_len)) model.add(LSTM(100)) model.add(Dense(1,activation='sigmoid')) model.compile(loss='binary_crossentropy',optimizer =", "pd df = pd.read_csv(r'D:\\nlp\\fake-news-data\\train.csv') df = df.dropna() X = df.drop('label',axis", "= np.array(embedded_doc) y_final = np.array(y) from sklearn.model_selection import train_test_split X_train,", "model embedding_vector_features = 40 model = Sequential() model.add(Embedding(voc_size,embedding_vector_features,input_length=sent_len)) model.add(LSTM(100)) model.add(Dense(1,activation='sigmoid'))", "voc_size = 5000 # One Hot Representation messages = X.copy()", "= df.drop('label',axis = 1) y = df['label'] import tensorflow as", "= review.split() review = [ps.stem(word) for word in review if", "# -*- coding: utf-8 -*- \"\"\" Created on Thu Feb", "tensorflow.keras.layers import Embedding, Dense, LSTM from tensorflow.keras.models import Sequential from", "= X.copy() messages.reset_index(inplace = True) import nltk import re from", "model.compile(loss='binary_crossentropy',optimizer = 'adam',metrics = ['accuracy']) model.summary() import numpy as np", "# Creating the model embedding_vector_features = 40 model = Sequential()", "= 'adam',metrics = ['accuracy']) model.summary() import numpy as np X_final", "import tensorflow as tf from tensorflow.keras.layers import Embedding, Dense, LSTM", "pandas as pd df = pd.read_csv(r'D:\\nlp\\fake-news-data\\train.csv') df = df.dropna() X", "coding: utf-8 -*- \"\"\" Created on Thu Feb 11 13:42:45", "Hot Representation messages = X.copy() messages.reset_index(inplace = True) import nltk", "X_final = np.array(embedded_doc) y_final = np.array(y) from sklearn.model_selection import train_test_split", "y_final = np.array(y) from sklearn.model_selection import train_test_split X_train, X_test, y_train,", "re from nltk.corpus import stopwords # Dataset Preprocessing from nltk.stem", "import train_test_split X_train, X_test, y_train, y_test = train_test_split(X_final,y_final,test_size = 0.33,random_state", "= sent_len,padding = 'pre') # Creating the model embedding_vector_features =", "= df.dropna() X = df.drop('label',axis = 1) y = df['label']", "= train_test_split(X_final,y_final,test_size = 0.33,random_state = 42) model.fit(X_train,y_train,validation_data=(X_test,y_test),epochs=10,batch_size=64) y_pred = model.predict_classes(X_test)", "pad_sequences(onehot_repr,maxlen = sent_len,padding = 'pre') # Creating the model embedding_vector_features", "import Embedding, Dense, LSTM from tensorflow.keras.models import Sequential from tensorflow.keras.preprocessing.sequence", "from tensorflow.keras.layers import Embedding, Dense, LSTM from tensorflow.keras.models import Sequential", "train_test_split(X_final,y_final,test_size = 0.33,random_state = 42) model.fit(X_train,y_train,validation_data=(X_test,y_test),epochs=10,batch_size=64) y_pred = model.predict_classes(X_test) from", "= 1) y = df['label'] import tensorflow as tf from", "in corpus] sent_len = 20 embedded_doc = pad_sequences(onehot_repr,maxlen = sent_len,padding", "model = Sequential() model.add(Embedding(voc_size,embedding_vector_features,input_length=sent_len)) model.add(LSTM(100)) model.add(Dense(1,activation='sigmoid')) model.compile(loss='binary_crossentropy',optimizer = 'adam',metrics =", "= pad_sequences(onehot_repr,maxlen = sent_len,padding = 'pre') # Creating the model", "y_pred = model.predict_classes(X_test) from sklearn.metrics import confusion_matrix, accuracy_score cm =", "[ps.stem(word) for word in review if word not in stopwords.words('english')]", "= 0.33,random_state = 42) model.fit(X_train,y_train,validation_data=(X_test,y_test),epochs=10,batch_size=64) y_pred = model.predict_classes(X_test) from sklearn.metrics", "i in range(len(messages)): print(i) review = re.sub('[^a-zA-Z]',' ',messages['title'][i]) review =", "model.add(LSTM(100)) model.add(Dense(1,activation='sigmoid')) model.compile(loss='binary_crossentropy',optimizer = 'adam',metrics = ['accuracy']) model.summary() import numpy", "word not in stopwords.words('english')] review = \" \".join(review) corpus.append(review) onehot_repr", "tensorflow.keras.preprocessing.sequence import pad_sequences from tensorflow.keras.preprocessing.text import one_hot # Vocabulary size", "size voc_size = 5000 # One Hot Representation messages =", "from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X_final,y_final,test_size", "df.dropna() X = df.drop('label',axis = 1) y = df['label'] import", "= \" \".join(review) corpus.append(review) onehot_repr = [one_hot(words,voc_size) for words in", "= np.array(y) from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test", "from nltk.corpus import stopwords # Dataset Preprocessing from nltk.stem import", "model.add(Dense(1,activation='sigmoid')) model.compile(loss='binary_crossentropy',optimizer = 'adam',metrics = ['accuracy']) model.summary() import numpy as", "import re from nltk.corpus import stopwords # Dataset Preprocessing from", "import nltk import re from nltk.corpus import stopwords # Dataset", "= [ps.stem(word) for word in review if word not in", "-*- \"\"\" Created on Thu Feb 11 13:42:45 2021 @author:", "LSTM from tensorflow.keras.models import Sequential from tensorflow.keras.preprocessing.sequence import pad_sequences from", "= True) import nltk import re from nltk.corpus import stopwords", "review = review.split() review = [ps.stem(word) for word in review", "= ['accuracy']) model.summary() import numpy as np X_final = np.array(embedded_doc)", "<reponame>pratikasarkar/nlp # -*- coding: utf-8 -*- \"\"\" Created on Thu", "PorterStemmer() corpus = [] for i in range(len(messages)): print(i) review", "nltk.stem import PorterStemmer ps = PorterStemmer() corpus = [] for", "\".join(review) corpus.append(review) onehot_repr = [one_hot(words,voc_size) for words in corpus] sent_len", "y_train, y_test = train_test_split(X_final,y_final,test_size = 0.33,random_state = 42) model.fit(X_train,y_train,validation_data=(X_test,y_test),epochs=10,batch_size=64) y_pred", "Sequential from tensorflow.keras.preprocessing.sequence import pad_sequences from tensorflow.keras.preprocessing.text import one_hot #", "Dense, LSTM from tensorflow.keras.models import Sequential from tensorflow.keras.preprocessing.sequence import pad_sequences", "np.array(y) from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test =", "\"\"\" Created on Thu Feb 11 13:42:45 2021 @author: ASUS", "numpy as np X_final = np.array(embedded_doc) y_final = np.array(y) from", "Creating the model embedding_vector_features = 40 model = Sequential() model.add(Embedding(voc_size,embedding_vector_features,input_length=sent_len))", "tensorflow.keras.models import Sequential from tensorflow.keras.preprocessing.sequence import pad_sequences from tensorflow.keras.preprocessing.text import", "corpus] sent_len = 20 embedded_doc = pad_sequences(onehot_repr,maxlen = sent_len,padding =", "tf from tensorflow.keras.layers import Embedding, Dense, LSTM from tensorflow.keras.models import", "embedded_doc = pad_sequences(onehot_repr,maxlen = sent_len,padding = 'pre') # Creating the", "if word not in stopwords.words('english')] review = \" \".join(review) corpus.append(review)", "= re.sub('[^a-zA-Z]',' ',messages['title'][i]) review = review.lower() review = review.split() review", "= df['label'] import tensorflow as tf from tensorflow.keras.layers import Embedding,", "Created on Thu Feb 11 13:42:45 2021 @author: ASUS \"\"\"", "onehot_repr = [one_hot(words,voc_size) for words in corpus] sent_len = 20", "'adam',metrics = ['accuracy']) model.summary() import numpy as np X_final =", "-*- coding: utf-8 -*- \"\"\" Created on Thu Feb 11", "one_hot # Vocabulary size voc_size = 5000 # One Hot", "Representation messages = X.copy() messages.reset_index(inplace = True) import nltk import", "[] for i in range(len(messages)): print(i) review = re.sub('[^a-zA-Z]',' ',messages['title'][i])", "tensorflow.keras.preprocessing.text import one_hot # Vocabulary size voc_size = 5000 #", "on Thu Feb 11 13:42:45 2021 @author: ASUS \"\"\" import", "in review if word not in stopwords.words('english')] review = \"", "model.fit(X_train,y_train,validation_data=(X_test,y_test),epochs=10,batch_size=64) y_pred = model.predict_classes(X_test) from sklearn.metrics import confusion_matrix, accuracy_score cm", "import pad_sequences from tensorflow.keras.preprocessing.text import one_hot # Vocabulary size voc_size", "not in stopwords.words('english')] review = \" \".join(review) corpus.append(review) onehot_repr =", "review = re.sub('[^a-zA-Z]',' ',messages['title'][i]) review = review.lower() review = review.split()", "1) y = df['label'] import tensorflow as tf from tensorflow.keras.layers", "= 20 embedded_doc = pad_sequences(onehot_repr,maxlen = sent_len,padding = 'pre') #", "from tensorflow.keras.preprocessing.text import one_hot # Vocabulary size voc_size = 5000", "\"\"\" import pandas as pd df = pd.read_csv(r'D:\\nlp\\fake-news-data\\train.csv') df =", "stopwords.words('english')] review = \" \".join(review) corpus.append(review) onehot_repr = [one_hot(words,voc_size) for", "the model embedding_vector_features = 40 model = Sequential() model.add(Embedding(voc_size,embedding_vector_features,input_length=sent_len)) model.add(LSTM(100))", "0.33,random_state = 42) model.fit(X_train,y_train,validation_data=(X_test,y_test),epochs=10,batch_size=64) y_pred = model.predict_classes(X_test) from sklearn.metrics import", "Sequential() model.add(Embedding(voc_size,embedding_vector_features,input_length=sent_len)) model.add(LSTM(100)) model.add(Dense(1,activation='sigmoid')) model.compile(loss='binary_crossentropy',optimizer = 'adam',metrics = ['accuracy']) model.summary()", "['accuracy']) model.summary() import numpy as np X_final = np.array(embedded_doc) y_final", "nltk import re from nltk.corpus import stopwords # Dataset Preprocessing", "y_test = train_test_split(X_final,y_final,test_size = 0.33,random_state = 42) model.fit(X_train,y_train,validation_data=(X_test,y_test),epochs=10,batch_size=64) y_pred =", "import one_hot # Vocabulary size voc_size = 5000 # One", "corpus = [] for i in range(len(messages)): print(i) review =", "= [] for i in range(len(messages)): print(i) review = re.sub('[^a-zA-Z]','", "import pandas as pd df = pd.read_csv(r'D:\\nlp\\fake-news-data\\train.csv') df = df.dropna()", "in range(len(messages)): print(i) review = re.sub('[^a-zA-Z]',' ',messages['title'][i]) review = review.lower()", "print(i) review = re.sub('[^a-zA-Z]',' ',messages['title'][i]) review = review.lower() review =", "as np X_final = np.array(embedded_doc) y_final = np.array(y) from sklearn.model_selection", "# Vocabulary size voc_size = 5000 # One Hot Representation", "as pd df = pd.read_csv(r'D:\\nlp\\fake-news-data\\train.csv') df = df.dropna() X =", "range(len(messages)): print(i) review = re.sub('[^a-zA-Z]',' ',messages['title'][i]) review = review.lower() review", "model.add(Embedding(voc_size,embedding_vector_features,input_length=sent_len)) model.add(LSTM(100)) model.add(Dense(1,activation='sigmoid')) model.compile(loss='binary_crossentropy',optimizer = 'adam',metrics = ['accuracy']) model.summary() import", "embedding_vector_features = 40 model = Sequential() model.add(Embedding(voc_size,embedding_vector_features,input_length=sent_len)) model.add(LSTM(100)) model.add(Dense(1,activation='sigmoid')) model.compile(loss='binary_crossentropy',optimizer", "import Sequential from tensorflow.keras.preprocessing.sequence import pad_sequences from tensorflow.keras.preprocessing.text import one_hot", "13:42:45 2021 @author: ASUS \"\"\" import pandas as pd df", "from nltk.stem import PorterStemmer ps = PorterStemmer() corpus = []", "re.sub('[^a-zA-Z]',' ',messages['title'][i]) review = review.lower() review = review.split() review =", "= [one_hot(words,voc_size) for words in corpus] sent_len = 20 embedded_doc", "word in review if word not in stopwords.words('english')] review =", "= 'pre') # Creating the model embedding_vector_features = 40 model", "# Dataset Preprocessing from nltk.stem import PorterStemmer ps = PorterStemmer()", "= review.lower() review = review.split() review = [ps.stem(word) for word", "One Hot Representation messages = X.copy() messages.reset_index(inplace = True) import", "= 42) model.fit(X_train,y_train,validation_data=(X_test,y_test),epochs=10,batch_size=64) y_pred = model.predict_classes(X_test) from sklearn.metrics import confusion_matrix,", "ASUS \"\"\" import pandas as pd df = pd.read_csv(r'D:\\nlp\\fake-news-data\\train.csv') df", "for i in range(len(messages)): print(i) review = re.sub('[^a-zA-Z]',' ',messages['title'][i]) review", "import PorterStemmer ps = PorterStemmer() corpus = [] for i", "20 embedded_doc = pad_sequences(onehot_repr,maxlen = sent_len,padding = 'pre') # Creating", "Vocabulary size voc_size = 5000 # One Hot Representation messages", "X = df.drop('label',axis = 1) y = df['label'] import tensorflow", "review = [ps.stem(word) for word in review if word not", "model.predict_classes(X_test) from sklearn.metrics import confusion_matrix, accuracy_score cm = confusion_matrix(y_test,y_pred) acc", "df = pd.read_csv(r'D:\\nlp\\fake-news-data\\train.csv') df = df.dropna() X = df.drop('label',axis =", "= 5000 # One Hot Representation messages = X.copy() messages.reset_index(inplace", "train_test_split X_train, X_test, y_train, y_test = train_test_split(X_final,y_final,test_size = 0.33,random_state =", "messages.reset_index(inplace = True) import nltk import re from nltk.corpus import", "PorterStemmer ps = PorterStemmer() corpus = [] for i in", "X.copy() messages.reset_index(inplace = True) import nltk import re from nltk.corpus", "'pre') # Creating the model embedding_vector_features = 40 model =", "df.drop('label',axis = 1) y = df['label'] import tensorflow as tf", "np X_final = np.array(embedded_doc) y_final = np.array(y) from sklearn.model_selection import", "df['label'] import tensorflow as tf from tensorflow.keras.layers import Embedding, Dense,", "nltk.corpus import stopwords # Dataset Preprocessing from nltk.stem import PorterStemmer", "11 13:42:45 2021 @author: ASUS \"\"\" import pandas as pd", "review.split() review = [ps.stem(word) for word in review if word", "import stopwords # Dataset Preprocessing from nltk.stem import PorterStemmer ps", "Thu Feb 11 13:42:45 2021 @author: ASUS \"\"\" import pandas", "Preprocessing from nltk.stem import PorterStemmer ps = PorterStemmer() corpus =", "review = review.lower() review = review.split() review = [ps.stem(word) for", "np.array(embedded_doc) y_final = np.array(y) from sklearn.model_selection import train_test_split X_train, X_test,", "corpus.append(review) onehot_repr = [one_hot(words,voc_size) for words in corpus] sent_len =", "[one_hot(words,voc_size) for words in corpus] sent_len = 20 embedded_doc =", "y = df['label'] import tensorflow as tf from tensorflow.keras.layers import", "stopwords # Dataset Preprocessing from nltk.stem import PorterStemmer ps =", "Dataset Preprocessing from nltk.stem import PorterStemmer ps = PorterStemmer() corpus", "model.summary() import numpy as np X_final = np.array(embedded_doc) y_final =", "from sklearn.metrics import confusion_matrix, accuracy_score cm = confusion_matrix(y_test,y_pred) acc =", "for word in review if word not in stopwords.words('english')] review", "ps = PorterStemmer() corpus = [] for i in range(len(messages)):", "sent_len = 20 embedded_doc = pad_sequences(onehot_repr,maxlen = sent_len,padding = 'pre')", "pd.read_csv(r'D:\\nlp\\fake-news-data\\train.csv') df = df.dropna() X = df.drop('label',axis = 1) y", "import numpy as np X_final = np.array(embedded_doc) y_final = np.array(y)", "in stopwords.words('english')] review = \" \".join(review) corpus.append(review) onehot_repr = [one_hot(words,voc_size)", "review.lower() review = review.split() review = [ps.stem(word) for word in", "Feb 11 13:42:45 2021 @author: ASUS \"\"\" import pandas as", "sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X_final,y_final,test_size =", "X_test, y_train, y_test = train_test_split(X_final,y_final,test_size = 0.33,random_state = 42) model.fit(X_train,y_train,validation_data=(X_test,y_test),epochs=10,batch_size=64)" ]
[ "named pred from sklearn.metrics import accuracy_score acc = accuracy_score(pred, labels_test)", "################################# ### we handle the import statement and SVC creation", "sklearn.svm import SVC clf = SVC(kernel=\"linear\") #### now your job", "copy import numpy as np import pylab as pl features_train,", "now your job is to fit the classifier #### using", "your job is to fit the classifier #### using the", "########################## SVM ################################# ### we handle the import statement and", "import accuracy_score acc = accuracy_score(pred, labels_test) def submitAccuracy(): return acc", "import makeTerrainData import matplotlib.pyplot as plt import copy import numpy", "import matplotlib.pyplot as plt import copy import numpy as np", "your predictions in a list named pred from sklearn.metrics import", "on the test data clf.fit(features_train,labels_train) pred = clf.predict(features_test) #### store", "pylab as pl features_train, labels_train, features_test, labels_test = makeTerrainData() ##########################", "from sklearn.svm import SVC clf = SVC(kernel=\"linear\") #### now your", "classifier #### using the training features/labels, and to #### make", "and to #### make a set of predictions on the", "you here from sklearn.svm import SVC clf = SVC(kernel=\"linear\") ####", "test data clf.fit(features_train,labels_train) pred = clf.predict(features_test) #### store your predictions", "#### store your predictions in a list named pred from", "import pylab as pl features_train, labels_train, features_test, labels_test = makeTerrainData()", "from prep_terrain_data import makeTerrainData import matplotlib.pyplot as plt import copy", "of predictions on the test data clf.fit(features_train,labels_train) pred = clf.predict(features_test)", "statement and SVC creation for you here from sklearn.svm import", "as pl features_train, labels_train, features_test, labels_test = makeTerrainData() ########################## SVM", "handle the import statement and SVC creation for you here", "pred = clf.predict(features_test) #### store your predictions in a list", "store your predictions in a list named pred from sklearn.metrics", "#### using the training features/labels, and to #### make a", "job is to fit the classifier #### using the training", "list named pred from sklearn.metrics import accuracy_score acc = accuracy_score(pred,", "SVC creation for you here from sklearn.svm import SVC clf", "import statement and SVC creation for you here from sklearn.svm", "np import pylab as pl features_train, labels_train, features_test, labels_test =", "= SVC(kernel=\"linear\") #### now your job is to fit the", "for you here from sklearn.svm import SVC clf = SVC(kernel=\"linear\")", "makeTerrainData import matplotlib.pyplot as plt import copy import numpy as", "SVM ################################# ### we handle the import statement and SVC", "sklearn.metrics import accuracy_score acc = accuracy_score(pred, labels_test) def submitAccuracy(): return", "make a set of predictions on the test data clf.fit(features_train,labels_train)", "predictions on the test data clf.fit(features_train,labels_train) pred = clf.predict(features_test) ####", "= makeTerrainData() ########################## SVM ################################# ### we handle the import", "from class_vis import prettyPicture from prep_terrain_data import makeTerrainData import matplotlib.pyplot", "predictions in a list named pred from sklearn.metrics import accuracy_score", "SVC(kernel=\"linear\") #### now your job is to fit the classifier", "import prettyPicture from prep_terrain_data import makeTerrainData import matplotlib.pyplot as plt", "clf.predict(features_test) #### store your predictions in a list named pred", "pred from sklearn.metrics import accuracy_score acc = accuracy_score(pred, labels_test) def", "makeTerrainData() ########################## SVM ################################# ### we handle the import statement", "the test data clf.fit(features_train,labels_train) pred = clf.predict(features_test) #### store your", "prettyPicture from prep_terrain_data import makeTerrainData import matplotlib.pyplot as plt import", "fit the classifier #### using the training features/labels, and to", "#### now your job is to fit the classifier ####", "matplotlib.pyplot as plt import copy import numpy as np import", "sys from class_vis import prettyPicture from prep_terrain_data import makeTerrainData import", "and SVC creation for you here from sklearn.svm import SVC", "data clf.fit(features_train,labels_train) pred = clf.predict(features_test) #### store your predictions in", "import sys from class_vis import prettyPicture from prep_terrain_data import makeTerrainData", "here from sklearn.svm import SVC clf = SVC(kernel=\"linear\") #### now", "prep_terrain_data import makeTerrainData import matplotlib.pyplot as plt import copy import", "features/labels, and to #### make a set of predictions on", "class_vis import prettyPicture from prep_terrain_data import makeTerrainData import matplotlib.pyplot as", "a list named pred from sklearn.metrics import accuracy_score acc =", "#### make a set of predictions on the test data", "creation for you here from sklearn.svm import SVC clf =", "pl features_train, labels_train, features_test, labels_test = makeTerrainData() ########################## SVM #################################", "we handle the import statement and SVC creation for you", "training features/labels, and to #### make a set of predictions", "import numpy as np import pylab as pl features_train, labels_train,", "as np import pylab as pl features_train, labels_train, features_test, labels_test", "clf = SVC(kernel=\"linear\") #### now your job is to fit", "set of predictions on the test data clf.fit(features_train,labels_train) pred =", "import copy import numpy as np import pylab as pl", "labels_test = makeTerrainData() ########################## SVM ################################# ### we handle the", "### we handle the import statement and SVC creation for", "the training features/labels, and to #### make a set of", "in a list named pred from sklearn.metrics import accuracy_score acc", "SVC clf = SVC(kernel=\"linear\") #### now your job is to", "numpy as np import pylab as pl features_train, labels_train, features_test,", "using the training features/labels, and to #### make a set", "is to fit the classifier #### using the training features/labels,", "features_train, labels_train, features_test, labels_test = makeTerrainData() ########################## SVM ################################# ###", "to #### make a set of predictions on the test", "to fit the classifier #### using the training features/labels, and", "plt import copy import numpy as np import pylab as", "a set of predictions on the test data clf.fit(features_train,labels_train) pred", "the import statement and SVC creation for you here from", "features_test, labels_test = makeTerrainData() ########################## SVM ################################# ### we handle", "from sklearn.metrics import accuracy_score acc = accuracy_score(pred, labels_test) def submitAccuracy():", "import SVC clf = SVC(kernel=\"linear\") #### now your job is", "as plt import copy import numpy as np import pylab", "the classifier #### using the training features/labels, and to ####", "= clf.predict(features_test) #### store your predictions in a list named", "labels_train, features_test, labels_test = makeTerrainData() ########################## SVM ################################# ### we", "clf.fit(features_train,labels_train) pred = clf.predict(features_test) #### store your predictions in a" ]
[ "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "= draw( st.lists( st.integers( min_value=20, max_value=100), min_size=4, max_size=4)) input_spec =", "OPset version: 7, 9, 15 \"\"\" def sample_convert_config(self, draw): input_shape", "# # Licensed under the Apache License, Version 2.0 (the", "compliance with the License. # You may obtain a copy", "an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF", "All Rights Reserved. # # Licensed under the Apache License,", "2021 PaddlePaddle Authors. All Rights Reserved. # # Licensed under", "file except in compliance with the License. # You may", "agreed to in writing, software # distributed under the License", "Unless required by applicable law or agreed to in writing,", "on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS", "Licensed under the Apache License, Version 2.0 (the \"License\" #", "[[dtype]], \"opset_version\": [7, 9, 15], \"input_spec_shape\": [input_spec], } models =", "7, 9, 15 \"\"\" def sample_convert_config(self, draw): input_shape = draw(", "(c) 2021 PaddlePaddle Authors. All Rights Reserved. # # Licensed", "distributed under the License is distributed on an \"AS IS\"", "dtype = draw(st.sampled_from([\"float32\", \"float64\"])) config = { \"op_names\": [\"logsigmoid\"], \"test_data_shapes\":", "the specific language governing permissions and # limitations under the", "governing permissions and # limitations under the License. from auto_scan_test", "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "\"test_data_shapes\": [input_shape], \"test_data_types\": [[dtype]], \"opset_version\": [7, 9, 15], \"input_spec_shape\": [input_spec],", "input_shape = draw( st.lists( st.integers( min_value=20, max_value=100), min_size=4, max_size=4)) input_spec", "[input_spec], } models = Net(config) return (config, models) def test(self):", "st import numpy as np import unittest import paddle class", "express or implied. # See the License for the specific", "applicable law or agreed to in writing, software # distributed", "import hypothesis.strategies as st import numpy as np import unittest", "st.integers( min_value=20, max_value=100), min_size=4, max_size=4)) input_spec = [-1] * len(input_shape)", "except in compliance with the License. # You may obtain", "= paddle.nn.functional.log_sigmoid(inputs) return x class TestLogsigmoidConvert(OPConvertAutoScanTest): \"\"\" api: paddle.nn.functional.log_sigmoid OPset", "of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "sample_convert_config(self, draw): input_shape = draw( st.lists( st.integers( min_value=20, max_value=100), min_size=4,", "permissions and # limitations under the License. from auto_scan_test import", "input_spec = [-1] * len(input_shape) dtype = draw(st.sampled_from([\"float32\", \"float64\"])) config", "= [-1] * len(input_shape) dtype = draw(st.sampled_from([\"float32\", \"float64\"])) config =", "PaddlePaddle Authors. All Rights Reserved. # # Licensed under the", "and # limitations under the License. from auto_scan_test import OPConvertAutoScanTest,", "not use this file except in compliance with the License.", "import paddle class Net(BaseNet): \"\"\" simple Net \"\"\" def forward(self,", "Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. # #", "TestLogsigmoidConvert(OPConvertAutoScanTest): \"\"\" api: paddle.nn.functional.log_sigmoid OPset version: 7, 9, 15 \"\"\"", "writing, software # distributed under the License is distributed on", "in writing, software # distributed under the License is distributed", "numpy as np import unittest import paddle class Net(BaseNet): \"\"\"", "you may not use this file except in compliance with", "Version 2.0 (the \"License\" # you may not use this", "language governing permissions and # limitations under the License. from", "= { \"op_names\": [\"logsigmoid\"], \"test_data_shapes\": [input_shape], \"test_data_types\": [[dtype]], \"opset_version\": [7,", "use this file except in compliance with the License. #", "[7, 9, 15], \"input_spec_shape\": [input_spec], } models = Net(config) return", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "\"op_names\": [\"logsigmoid\"], \"test_data_shapes\": [input_shape], \"test_data_types\": [[dtype]], \"opset_version\": [7, 9, 15],", "class Net(BaseNet): \"\"\" simple Net \"\"\" def forward(self, inputs): \"\"\"", "\"\"\" api: paddle.nn.functional.log_sigmoid OPset version: 7, 9, 15 \"\"\" def", "} models = Net(config) return (config, models) def test(self): self.run_and_statis(max_examples=30)", "15 \"\"\" def sample_convert_config(self, draw): input_shape = draw( st.lists( st.integers(", "<gh_stars>0 # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.", "License. from auto_scan_test import OPConvertAutoScanTest, BaseNet from hypothesis import reproduce_failure", "as np import unittest import paddle class Net(BaseNet): \"\"\" simple", "CONDITIONS OF ANY KIND, either express or implied. # See", "def sample_convert_config(self, draw): input_shape = draw( st.lists( st.integers( min_value=20, max_value=100),", "api: paddle.nn.functional.log_sigmoid OPset version: 7, 9, 15 \"\"\" def sample_convert_config(self,", "the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "= draw(st.sampled_from([\"float32\", \"float64\"])) config = { \"op_names\": [\"logsigmoid\"], \"test_data_shapes\": [input_shape],", "import numpy as np import unittest import paddle class Net(BaseNet):", "or implied. # See the License for the specific language", "License is distributed on an \"AS IS\" BASIS, # WITHOUT", "Rights Reserved. # # Licensed under the Apache License, Version", "License. # You may obtain a copy of the License", "is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES", "Net \"\"\" def forward(self, inputs): \"\"\" forward \"\"\" x =", "under the Apache License, Version 2.0 (the \"License\" # you", "# You may obtain a copy of the License at", "KIND, either express or implied. # See the License for", "specific language governing permissions and # limitations under the License.", "unittest import paddle class Net(BaseNet): \"\"\" simple Net \"\"\" def", "inputs): \"\"\" forward \"\"\" x = paddle.nn.functional.log_sigmoid(inputs) return x class", "return (config, models) def test(self): self.run_and_statis(max_examples=30) if __name__ == \"__main__\":", "under the License is distributed on an \"AS IS\" BASIS,", "copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #", "License for the specific language governing permissions and # limitations", "min_size=4, max_size=4)) input_spec = [-1] * len(input_shape) dtype = draw(st.sampled_from([\"float32\",", "Authors. All Rights Reserved. # # Licensed under the Apache", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "import reproduce_failure import hypothesis.strategies as st import numpy as np", "under the License. from auto_scan_test import OPConvertAutoScanTest, BaseNet from hypothesis", "the License. from auto_scan_test import OPConvertAutoScanTest, BaseNet from hypothesis import", "Reserved. # # Licensed under the Apache License, Version 2.0", "hypothesis.strategies as st import numpy as np import unittest import", "limitations under the License. from auto_scan_test import OPConvertAutoScanTest, BaseNet from", "hypothesis import reproduce_failure import hypothesis.strategies as st import numpy as", "np import unittest import paddle class Net(BaseNet): \"\"\" simple Net", "forward(self, inputs): \"\"\" forward \"\"\" x = paddle.nn.functional.log_sigmoid(inputs) return x", "the License for the specific language governing permissions and #", "# you may not use this file except in compliance", "either express or implied. # See the License for the", "config = { \"op_names\": [\"logsigmoid\"], \"test_data_shapes\": [input_shape], \"test_data_types\": [[dtype]], \"opset_version\":", "OR CONDITIONS OF ANY KIND, either express or implied. #", "max_value=100), min_size=4, max_size=4)) input_spec = [-1] * len(input_shape) dtype =", "= Net(config) return (config, models) def test(self): self.run_and_statis(max_examples=30) if __name__", "Net(config) return (config, models) def test(self): self.run_and_statis(max_examples=30) if __name__ ==", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "models = Net(config) return (config, models) def test(self): self.run_and_statis(max_examples=30) if", "the License is distributed on an \"AS IS\" BASIS, #", "in compliance with the License. # You may obtain a", "[input_shape], \"test_data_types\": [[dtype]], \"opset_version\": [7, 9, 15], \"input_spec_shape\": [input_spec], }", "len(input_shape) dtype = draw(st.sampled_from([\"float32\", \"float64\"])) config = { \"op_names\": [\"logsigmoid\"],", "return x class TestLogsigmoidConvert(OPConvertAutoScanTest): \"\"\" api: paddle.nn.functional.log_sigmoid OPset version: 7,", "software # distributed under the License is distributed on an", "(the \"License\" # you may not use this file except", "x = paddle.nn.functional.log_sigmoid(inputs) return x class TestLogsigmoidConvert(OPConvertAutoScanTest): \"\"\" api: paddle.nn.functional.log_sigmoid", "# # Unless required by applicable law or agreed to", "paddle.nn.functional.log_sigmoid OPset version: 7, 9, 15 \"\"\" def sample_convert_config(self, draw):", "\"\"\" def sample_convert_config(self, draw): input_shape = draw( st.lists( st.integers( min_value=20,", "min_value=20, max_value=100), min_size=4, max_size=4)) input_spec = [-1] * len(input_shape) dtype", "2.0 (the \"License\" # you may not use this file", "draw( st.lists( st.integers( min_value=20, max_value=100), min_size=4, max_size=4)) input_spec = [-1]", "a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #", "obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0", "OPConvertAutoScanTest, BaseNet from hypothesis import reproduce_failure import hypothesis.strategies as st", "# Licensed under the Apache License, Version 2.0 (the \"License\"", "* len(input_shape) dtype = draw(st.sampled_from([\"float32\", \"float64\"])) config = { \"op_names\":", "x class TestLogsigmoidConvert(OPConvertAutoScanTest): \"\"\" api: paddle.nn.functional.log_sigmoid OPset version: 7, 9,", "class TestLogsigmoidConvert(OPConvertAutoScanTest): \"\"\" api: paddle.nn.functional.log_sigmoid OPset version: 7, 9, 15", "\"input_spec_shape\": [input_spec], } models = Net(config) return (config, models) def", "\"float64\"])) config = { \"op_names\": [\"logsigmoid\"], \"test_data_shapes\": [input_shape], \"test_data_types\": [[dtype]],", "law or agreed to in writing, software # distributed under", "{ \"op_names\": [\"logsigmoid\"], \"test_data_shapes\": [input_shape], \"test_data_types\": [[dtype]], \"opset_version\": [7, 9,", "\"opset_version\": [7, 9, 15], \"input_spec_shape\": [input_spec], } models = Net(config)", "[-1] * len(input_shape) dtype = draw(st.sampled_from([\"float32\", \"float64\"])) config = {", "simple Net \"\"\" def forward(self, inputs): \"\"\" forward \"\"\" x", "implied. # See the License for the specific language governing", "\"\"\" simple Net \"\"\" def forward(self, inputs): \"\"\" forward \"\"\"", "Net(BaseNet): \"\"\" simple Net \"\"\" def forward(self, inputs): \"\"\" forward", "9, 15 \"\"\" def sample_convert_config(self, draw): input_shape = draw( st.lists(", "distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR", "BaseNet from hypothesis import reproduce_failure import hypothesis.strategies as st import", "[\"logsigmoid\"], \"test_data_shapes\": [input_shape], \"test_data_types\": [[dtype]], \"opset_version\": [7, 9, 15], \"input_spec_shape\":", "\"\"\" forward \"\"\" x = paddle.nn.functional.log_sigmoid(inputs) return x class TestLogsigmoidConvert(OPConvertAutoScanTest):", "\"test_data_types\": [[dtype]], \"opset_version\": [7, 9, 15], \"input_spec_shape\": [input_spec], } models", "draw(st.sampled_from([\"float32\", \"float64\"])) config = { \"op_names\": [\"logsigmoid\"], \"test_data_shapes\": [input_shape], \"test_data_types\":", "by applicable law or agreed to in writing, software #", "# distributed under the License is distributed on an \"AS", "OF ANY KIND, either express or implied. # See the", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "import OPConvertAutoScanTest, BaseNet from hypothesis import reproduce_failure import hypothesis.strategies as", "from auto_scan_test import OPConvertAutoScanTest, BaseNet from hypothesis import reproduce_failure import", "may obtain a copy of the License at # #", "# Unless required by applicable law or agreed to in", "ANY KIND, either express or implied. # See the License", "See the License for the specific language governing permissions and", "reproduce_failure import hypothesis.strategies as st import numpy as np import", "\"\"\" x = paddle.nn.functional.log_sigmoid(inputs) return x class TestLogsigmoidConvert(OPConvertAutoScanTest): \"\"\" api:", "the Apache License, Version 2.0 (the \"License\" # you may", "auto_scan_test import OPConvertAutoScanTest, BaseNet from hypothesis import reproduce_failure import hypothesis.strategies", "the License. # You may obtain a copy of the", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "for the specific language governing permissions and # limitations under", "9, 15], \"input_spec_shape\": [input_spec], } models = Net(config) return (config,", "\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY", "paddle.nn.functional.log_sigmoid(inputs) return x class TestLogsigmoidConvert(OPConvertAutoScanTest): \"\"\" api: paddle.nn.functional.log_sigmoid OPset version:", "to in writing, software # distributed under the License is", "IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "# See the License for the specific language governing permissions", "forward \"\"\" x = paddle.nn.functional.log_sigmoid(inputs) return x class TestLogsigmoidConvert(OPConvertAutoScanTest): \"\"\"", "You may obtain a copy of the License at #", "(config, models) def test(self): self.run_and_statis(max_examples=30) if __name__ == \"__main__\": unittest.main()", "may not use this file except in compliance with the", "or agreed to in writing, software # distributed under the", "# limitations under the License. from auto_scan_test import OPConvertAutoScanTest, BaseNet", "Apache License, Version 2.0 (the \"License\" # you may not", "max_size=4)) input_spec = [-1] * len(input_shape) dtype = draw(st.sampled_from([\"float32\", \"float64\"]))", "required by applicable law or agreed to in writing, software", "paddle class Net(BaseNet): \"\"\" simple Net \"\"\" def forward(self, inputs):", "# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. #", "BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "def forward(self, inputs): \"\"\" forward \"\"\" x = paddle.nn.functional.log_sigmoid(inputs) return", "with the License. # You may obtain a copy of", "this file except in compliance with the License. # You", "st.lists( st.integers( min_value=20, max_value=100), min_size=4, max_size=4)) input_spec = [-1] *", "from hypothesis import reproduce_failure import hypothesis.strategies as st import numpy", "as st import numpy as np import unittest import paddle", "version: 7, 9, 15 \"\"\" def sample_convert_config(self, draw): input_shape =", "draw): input_shape = draw( st.lists( st.integers( min_value=20, max_value=100), min_size=4, max_size=4))", "License, Version 2.0 (the \"License\" # you may not use", "\"License\" # you may not use this file except in", "import unittest import paddle class Net(BaseNet): \"\"\" simple Net \"\"\"", "15], \"input_spec_shape\": [input_spec], } models = Net(config) return (config, models)", "\"\"\" def forward(self, inputs): \"\"\" forward \"\"\" x = paddle.nn.functional.log_sigmoid(inputs)" ]
[ "Task function/method key - getter only. :getter: Gets the task", "= threads def __call__(self, signum, frame): self.stopper.set() for task in", "concurrently via Python ``multiprocessing`` processes, puts the results into a", "stopper = Event() threads = tuple(Thread(target=run, args=(i, task_q, result_q, stopper,))", "from __future__ import division from __future__ import print_function from __future__", "key is not None else func.__name__ self._result = None self._is_done", "the task function/method result (produced by calling the function on", "absolute_import from __future__ import division from __future__ import print_function from", "r): self._result = r self._is_done = True @property def is_done(self):", "task_q, result_q, stopper): while not stopper.is_set(): try: task = task_q.get_nowait()", "from builtins import str from future import standard_library standard_library.install_aliases() try:", "Task function/method arguments property - getter only. :getter: Gets the", "= pool.apply_async(task.func, args=task.args, callback=build_results) run.get() pool.close() pool.join() while not result_q.empty():", "__init__(self, func, args=(), key=None): self._func = func self._args = args", "generates these back to the caller. \"\"\" task_q = Queue()", "task.result = task.func(*task.args) if task.args else task.func() if type(task.result) in", "import print_function from __future__ import unicode_literals from builtins import open", "and generates these back to the caller. \"\"\" pool =", "func.__name__ self._result = None self._is_done = False @property def func(self):", "property - getter only. :getter: Gets the task function/method status", "result property. :getter: Gets the task function/method result (produced by", "result \"\"\" return self._result @result.setter def result(self, r): self._result =", "pool_size=10): \"\"\" Executes several tasks concurrently via Python ``multiprocessing`` processes,", "function/method object \"\"\" return self._func @property def args(self): \"\"\" Task", "the caller. \"\"\" task_q = Queue() num_tasks = 0 for", "threads: thread.start() task_q.join() while not result_q.empty(): key, result = result_q.get_nowait()", "Queue() def build_results(result): if type(result) in (types.GeneratorType, list, tuple, set):", "``multiprocessing`` processes, puts the results into a queue, and generates", "self.stopper.set() for task in self.threads: task.join() sys.exit(0) class Task(object): def", "the function on the defined arguments) :setter: Sets the task", "return self._key @property def result(self): \"\"\" Task function/method result property.", "function/method status \"\"\" return self._is_done def multithread(tasks, pool_size=10): \"\"\" Executes", "in (types.GeneratorType, list, tuple, set): for r in result: result_q.put(r)", "from Queue import Queue, Empty import sys import types import", "(produced by calling the function on the defined arguments) :setter:", "self._key = key if key is not None else func.__name__", "for r in task.result: result_q.put((task.key, r,)) else: result_q.put((task.key, task.result,)) task_q.task_done()", "results into a queue, and generates these back to the", "] class SignalHandler(object): def __init__(self, stopper, threads): self.stopper = stopper", "= r self._is_done = True @property def is_done(self): \"\"\" Task", "task_q.join() while not result_q.empty(): key, result = result_q.get_nowait() yield key,", "(types.GeneratorType, list, tuple, set): for r in result: result_q.put(r) else:", "if key is not None else func.__name__ self._result = None", "result_q.put(r) else: result_q.put(result) for task in tasks: run = pool.apply_async(task.func,", "'multiprocess', 'multithread', 'SignalHandler', 'Task' ] class SignalHandler(object): def __init__(self, stopper,", "thread.start() task_q.join() while not result_q.empty(): key, result = result_q.get_nowait() yield", "arguments \"\"\" return self._args @property def key(self): \"\"\" Task function/method", "= 0 for task in tasks: task_q.put(task) num_tasks += 1", "else task.func() if type(task.result) in (types.GeneratorType, list, tuple, set): for", "self._func @property def args(self): \"\"\" Task function/method arguments property -", "Task(object): def __init__(self, func, args=(), key=None): self._func = func self._args", "type(result) in (types.GeneratorType, list, tuple, set): for r in result:", "from __future__ import absolute_import from __future__ import division from __future__", "try: task = task_q.get_nowait() except Empty: break else: task.result =", "range(pool_size)) handler = SignalHandler(stopper, threads) signal(SIGINT, handler) for thread in", "task_q.get_nowait() except Empty: break else: task.result = task.func(*task.args) if task.args", "Empty except ImportError: from Queue import Queue, Empty import sys", "self._key @property def result(self): \"\"\" Task function/method result property. :getter:", "'SignalHandler', 'Task' ] class SignalHandler(object): def __init__(self, stopper, threads): self.stopper", "except Empty: break else: task.result = task.func(*task.args) if task.args else", "return self._args @property def key(self): \"\"\" Task function/method key -", "concurrently via ``threading`` threads, puts the results into a queue,", ":getter: Gets the task function/method arguments \"\"\" return self._args @property", "Sets the task function/method result \"\"\" return self._result @result.setter def", "utf-8 -*- from __future__ import absolute_import from __future__ import division", "\"\"\" pool = billiard.Pool(pool_size) result_q = Queue() def build_results(result): if", "status property - getter only. :getter: Gets the task function/method", "yield key, result def multiprocess(tasks, pool_size=10): \"\"\" Executes several tasks", "def __init__(self, stopper, threads): self.stopper = stopper self.threads = threads", "is_done(self): \"\"\" Task function/method status property - getter only. :getter:", "division from __future__ import print_function from __future__ import unicode_literals from", "task function/method object \"\"\" return self._func @property def args(self): \"\"\"", "\"\"\" return self._args @property def key(self): \"\"\" Task function/method key", "arguments property - getter only. :getter: Gets the task function/method", "caller. \"\"\" task_q = Queue() num_tasks = 0 for task", "function/method key \"\"\" return self._key @property def result(self): \"\"\" Task", "try: from queue import Queue, Empty except ImportError: from Queue", ":getter: Gets the task function/method object \"\"\" return self._func @property", "getter only. :getter: Gets the task function/method object \"\"\" return", "\"\"\" return self._key @property def result(self): \"\"\" Task function/method result", "Empty import sys import types import billiard from signal import", "function/method key - getter only. :getter: Gets the task function/method", "result = result_q.get_nowait() yield key, result def multiprocess(tasks, pool_size=10): \"\"\"", "args=(), key=None): self._func = func self._args = args self._key =", ":setter: Sets the task function/method result \"\"\" return self._result @result.setter", "not stopper.is_set(): try: task = task_q.get_nowait() except Empty: break else:", "result_q, stopper,)) for i in range(pool_size)) handler = SignalHandler(stopper, threads)", "str from future import standard_library standard_library.install_aliases() try: from queue import", "on the defined arguments) :setter: Sets the task function/method result", "back to the caller. \"\"\" task_q = Queue() num_tasks =", "def is_done(self): \"\"\" Task function/method status property - getter only.", "import types import billiard from signal import ( signal, SIGINT,", "in self.threads: task.join() sys.exit(0) class Task(object): def __init__(self, func, args=(),", "import ( signal, SIGINT, ) from threading import ( Event,", "0 for task in tasks: task_q.put(task) num_tasks += 1 def", "self._result = r self._is_done = True @property def is_done(self): \"\"\"", "num_tasks += 1 def run(i, task_q, result_q, stopper): while not", "function/method result (produced by calling the function on the defined", "task in tasks: task_q.put(task) num_tasks += 1 def run(i, task_q,", "getter only. :getter: Gets the task function/method key \"\"\" return", "\"\"\" Task function/method property - getter only. :getter: Gets the", "@property def key(self): \"\"\" Task function/method key - getter only.", "__future__ import print_function from __future__ import unicode_literals from builtins import", "task_q, result_q, stopper,)) for i in range(pool_size)) handler = SignalHandler(stopper,", "tuple, set): for r in task.result: result_q.put((task.key, r,)) else: result_q.put((task.key,", "threads def __call__(self, signum, frame): self.stopper.set() for task in self.threads:", "result_q = Queue() def build_results(result): if type(result) in (types.GeneratorType, list,", "- getter only. :getter: Gets the task function/method object \"\"\"", "stopper, threads): self.stopper = stopper self.threads = threads def __call__(self,", "i in range(pool_size)) handler = SignalHandler(stopper, threads) signal(SIGINT, handler) for", "( signal, SIGINT, ) from threading import ( Event, Thread,", "task function/method status \"\"\" return self._is_done def multithread(tasks, pool_size=10): \"\"\"", "task = task_q.get_nowait() except Empty: break else: task.result = task.func(*task.args)", "return self._func @property def args(self): \"\"\" Task function/method arguments property", "self.stopper = stopper self.threads = threads def __call__(self, signum, frame):", "Gets the task function/method status \"\"\" return self._is_done def multithread(tasks,", "self._func = func self._args = args self._key = key if", "builtins import open from builtins import str from future import", "task.join() sys.exit(0) class Task(object): def __init__(self, func, args=(), key=None): self._func", "Queue, Empty except ImportError: from Queue import Queue, Empty import", "caller. \"\"\" pool = billiard.Pool(pool_size) result_q = Queue() def build_results(result):", "several tasks concurrently via ``threading`` threads, puts the results into", "import open from builtins import str from future import standard_library", "return self._result @result.setter def result(self, r): self._result = r self._is_done", "key(self): \"\"\" Task function/method key - getter only. :getter: Gets", "def __init__(self, func, args=(), key=None): self._func = func self._args =", "import str from future import standard_library standard_library.install_aliases() try: from queue", "= billiard.Pool(pool_size) result_q = Queue() def build_results(result): if type(result) in", "task in tasks: run = pool.apply_async(task.func, args=task.args, callback=build_results) run.get() pool.close()", ":getter: Gets the task function/method status \"\"\" return self._is_done def", "list, tuple, set): for r in task.result: result_q.put((task.key, r,)) else:", "threads, puts the results into a queue, and generates these", "func(self): \"\"\" Task function/method property - getter only. :getter: Gets", "back to the caller. \"\"\" pool = billiard.Pool(pool_size) result_q =", "task.result: result_q.put((task.key, r,)) else: result_q.put((task.key, task.result,)) task_q.task_done() result_q = Queue()", "only. :getter: Gets the task function/method key \"\"\" return self._key", "result_q.put(result) for task in tasks: run = pool.apply_async(task.func, args=task.args, callback=build_results)", "threads): self.stopper = stopper self.threads = threads def __call__(self, signum,", "from future import standard_library standard_library.install_aliases() try: from queue import Queue,", "stopper): while not stopper.is_set(): try: task = task_q.get_nowait() except Empty:", "False @property def func(self): \"\"\" Task function/method property - getter", "self._is_done = True @property def is_done(self): \"\"\" Task function/method status", "Gets the task function/method key \"\"\" return self._key @property def", "in tasks: task_q.put(task) num_tasks += 1 def run(i, task_q, result_q,", "Empty: break else: task.result = task.func(*task.args) if task.args else task.func()", "key \"\"\" return self._key @property def result(self): \"\"\" Task function/method", "pool.apply_async(task.func, args=task.args, callback=build_results) run.get() pool.close() pool.join() while not result_q.empty(): result", "result (produced by calling the function on the defined arguments)", "standard_library.install_aliases() try: from queue import Queue, Empty except ImportError: from", "def build_results(result): if type(result) in (types.GeneratorType, list, tuple, set): for", ":getter: Gets the task function/method key \"\"\" return self._key @property", "num_tasks = 0 for task in tasks: task_q.put(task) num_tasks +=", "for task in self.threads: task.join() sys.exit(0) class Task(object): def __init__(self,", "property. :getter: Gets the task function/method result (produced by calling", "else: result_q.put((task.key, task.result,)) task_q.task_done() result_q = Queue() stopper = Event()", "sys import types import billiard from signal import ( signal,", ") __all__ = [ 'multiprocess', 'multithread', 'SignalHandler', 'Task' ] class", "function/method result property. :getter: Gets the task function/method result (produced", "task.args else task.func() if type(task.result) in (types.GeneratorType, list, tuple, set):", "multiprocess(tasks, pool_size=10): \"\"\" Executes several tasks concurrently via Python ``multiprocessing``", "Queue() num_tasks = 0 for task in tasks: task_q.put(task) num_tasks", "result_q.empty(): key, result = result_q.get_nowait() yield key, result def multiprocess(tasks,", "= func self._args = args self._key = key if key", "to the caller. \"\"\" pool = billiard.Pool(pool_size) result_q = Queue()", "self._is_done = False @property def func(self): \"\"\" Task function/method property", "Queue, Empty import sys import types import billiard from signal", "\"\"\" Task function/method status property - getter only. :getter: Gets", "property - getter only. :getter: Gets the task function/method arguments", "import division from __future__ import print_function from __future__ import unicode_literals", "types import billiard from signal import ( signal, SIGINT, )", "break else: task.result = task.func(*task.args) if task.args else task.func() if", "print_function from __future__ import unicode_literals from builtins import open from", "calling the function on the defined arguments) :setter: Sets the", "run = pool.apply_async(task.func, args=task.args, callback=build_results) run.get() pool.close() pool.join() while not", "tasks: task_q.put(task) num_tasks += 1 def run(i, task_q, result_q, stopper):", "key - getter only. :getter: Gets the task function/method key", "function on the defined arguments) :setter: Sets the task function/method", "\"\"\" return self._is_done def multithread(tasks, pool_size=10): \"\"\" Executes several tasks", "\"\"\" Executes several tasks concurrently via Python ``multiprocessing`` processes, puts", "only. :getter: Gets the task function/method arguments \"\"\" return self._args", "@property def is_done(self): \"\"\" Task function/method status property - getter", "task_q.put(task) num_tasks += 1 def run(i, task_q, result_q, stopper): while", "func, args=(), key=None): self._func = func self._args = args self._key", "def __call__(self, signum, frame): self.stopper.set() for task in self.threads: task.join()", "run(i, task_q, result_q, stopper): while not stopper.is_set(): try: task =", "to the caller. \"\"\" task_q = Queue() num_tasks = 0", "Executes several tasks concurrently via Python ``multiprocessing`` processes, puts the", "= key if key is not None else func.__name__ self._result", "via Python ``multiprocessing`` processes, puts the results into a queue,", "import absolute_import from __future__ import division from __future__ import print_function", "def key(self): \"\"\" Task function/method key - getter only. :getter:", "\"\"\" return self._result @result.setter def result(self, r): self._result = r", "args(self): \"\"\" Task function/method arguments property - getter only. :getter:", "result_q.get_nowait() yield key, result def multiprocess(tasks, pool_size=10): \"\"\" Executes several", "\"\"\" Task function/method result property. :getter: Gets the task function/method", "the task function/method object \"\"\" return self._func @property def args(self):", "= task.func(*task.args) if task.args else task.func() if type(task.result) in (types.GeneratorType,", "in threads: thread.start() task_q.join() while not result_q.empty(): key, result =", "= task_q.get_nowait() except Empty: break else: task.result = task.func(*task.args) if", "\"\"\" Task function/method arguments property - getter only. :getter: Gets", "'multithread', 'SignalHandler', 'Task' ] class SignalHandler(object): def __init__(self, stopper, threads):", "key, result = result_q.get_nowait() yield key, result def multiprocess(tasks, pool_size=10):", ":getter: Gets the task function/method result (produced by calling the", "= [ 'multiprocess', 'multithread', 'SignalHandler', 'Task' ] class SignalHandler(object): def", "billiard from signal import ( signal, SIGINT, ) from threading", "Gets the task function/method result (produced by calling the function", "arguments) :setter: Sets the task function/method result \"\"\" return self._result", "signum, frame): self.stopper.set() for task in self.threads: task.join() sys.exit(0) class", "for task in tasks: run = pool.apply_async(task.func, args=task.args, callback=build_results) run.get()", "Task function/method property - getter only. :getter: Gets the task", "Python ``multiprocessing`` processes, puts the results into a queue, and", "threads) signal(SIGINT, handler) for thread in threads: thread.start() task_q.join() while", "status \"\"\" return self._is_done def multithread(tasks, pool_size=10): \"\"\" Executes several", "result_q.put((task.key, r,)) else: result_q.put((task.key, task.result,)) task_q.task_done() result_q = Queue() stopper", "result def multiprocess(tasks, pool_size=10): \"\"\" Executes several tasks concurrently via", "def func(self): \"\"\" Task function/method property - getter only. :getter:", "args=task.args, callback=build_results) run.get() pool.close() pool.join() while not result_q.empty(): result =", "in (types.GeneratorType, list, tuple, set): for r in task.result: result_q.put((task.key,", "= Queue() num_tasks = 0 for task in tasks: task_q.put(task)", "tuple(Thread(target=run, args=(i, task_q, result_q, stopper,)) for i in range(pool_size)) handler", "set): for r in task.result: result_q.put((task.key, r,)) else: result_q.put((task.key, task.result,))", "property - getter only. :getter: Gets the task function/method object", "function/method status property - getter only. :getter: Gets the task", "\"\"\" task_q = Queue() num_tasks = 0 for task in", "coding: utf-8 -*- from __future__ import absolute_import from __future__ import", "def result(self): \"\"\" Task function/method result property. :getter: Gets the", "else func.__name__ self._result = None self._is_done = False @property def", "__future__ import division from __future__ import print_function from __future__ import", "while not result_q.empty(): key, result = result_q.get_nowait() yield key, result", "1 def run(i, task_q, result_q, stopper): while not stopper.is_set(): try:", "these back to the caller. \"\"\" task_q = Queue() num_tasks", "billiard.Pool(pool_size) result_q = Queue() def build_results(result): if type(result) in (types.GeneratorType,", "and generates these back to the caller. \"\"\" task_q =", "result_q.put((task.key, task.result,)) task_q.task_done() result_q = Queue() stopper = Event() threads", "signal(SIGINT, handler) for thread in threads: thread.start() task_q.join() while not", "'Task' ] class SignalHandler(object): def __init__(self, stopper, threads): self.stopper =", "task function/method arguments \"\"\" return self._args @property def key(self): \"\"\"", "``threading`` threads, puts the results into a queue, and generates", "self._args = args self._key = key if key is not", "args=(i, task_q, result_q, stopper,)) for i in range(pool_size)) handler =", "unicode_literals from builtins import open from builtins import str from", "Task function/method result property. :getter: Gets the task function/method result", "def result(self, r): self._result = r self._is_done = True @property", "\"\"\" return self._func @property def args(self): \"\"\" Task function/method arguments", "result_q, stopper): while not stopper.is_set(): try: task = task_q.get_nowait() except", "if task.args else task.func() if type(task.result) in (types.GeneratorType, list, tuple,", "( Event, Thread, ) __all__ = [ 'multiprocess', 'multithread', 'SignalHandler',", "def run(i, task_q, result_q, stopper): while not stopper.is_set(): try: task", "if type(task.result) in (types.GeneratorType, list, tuple, set): for r in", "from signal import ( signal, SIGINT, ) from threading import", "SIGINT, ) from threading import ( Event, Thread, ) __all__", "except ImportError: from Queue import Queue, Empty import sys import", "tasks concurrently via Python ``multiprocessing`` processes, puts the results into", "pool.close() pool.join() while not result_q.empty(): result = result_q.get_nowait() yield result", "task_q = Queue() num_tasks = 0 for task in tasks:", "into a queue, and generates these back to the caller.", "@result.setter def result(self, r): self._result = r self._is_done = True", "result: result_q.put(r) else: result_q.put(result) for task in tasks: run =", "the defined arguments) :setter: Sets the task function/method result \"\"\"", "open from builtins import str from future import standard_library standard_library.install_aliases()", "stopper.is_set(): try: task = task_q.get_nowait() except Empty: break else: task.result", "import ( Event, Thread, ) __all__ = [ 'multiprocess', 'multithread',", "signal, SIGINT, ) from threading import ( Event, Thread, )", "__future__ import absolute_import from __future__ import division from __future__ import", "task.result,)) task_q.task_done() result_q = Queue() stopper = Event() threads =", "from builtins import open from builtins import str from future", "several tasks concurrently via Python ``multiprocessing`` processes, puts the results", "in result: result_q.put(r) else: result_q.put(result) for task in tasks: run", "for thread in threads: thread.start() task_q.join() while not result_q.empty(): key,", "handler) for thread in threads: thread.start() task_q.join() while not result_q.empty():", "= Queue() stopper = Event() threads = tuple(Thread(target=run, args=(i, task_q,", ") from threading import ( Event, Thread, ) __all__ =", "= result_q.get_nowait() yield key, result def multiprocess(tasks, pool_size=10): \"\"\" Executes", "from threading import ( Event, Thread, ) __all__ = [", "function/method arguments \"\"\" return self._args @property def key(self): \"\"\" Task", "function/method property - getter only. :getter: Gets the task function/method", "@property def result(self): \"\"\" Task function/method result property. :getter: Gets", "r self._is_done = True @property def is_done(self): \"\"\" Task function/method", "Queue import Queue, Empty import sys import types import billiard", "@property def args(self): \"\"\" Task function/method arguments property - getter", "None else func.__name__ self._result = None self._is_done = False @property", "Gets the task function/method object \"\"\" return self._func @property def", "handler = SignalHandler(stopper, threads) signal(SIGINT, handler) for thread in threads:", "import standard_library standard_library.install_aliases() try: from queue import Queue, Empty except", "func self._args = args self._key = key if key is", "result(self, r): self._result = r self._is_done = True @property def", "task function/method result \"\"\" return self._result @result.setter def result(self, r):", "task_q.task_done() result_q = Queue() stopper = Event() threads = tuple(Thread(target=run,", "the task function/method result \"\"\" return self._result @result.setter def result(self,", "import Queue, Empty import sys import types import billiard from", "only. :getter: Gets the task function/method object \"\"\" return self._func", "class Task(object): def __init__(self, func, args=(), key=None): self._func = func", "if type(result) in (types.GeneratorType, list, tuple, set): for r in", "key if key is not None else func.__name__ self._result =", "callback=build_results) run.get() pool.close() pool.join() while not result_q.empty(): result = result_q.get_nowait()", "only. :getter: Gets the task function/method status \"\"\" return self._is_done", "import sys import types import billiard from signal import (", "getter only. :getter: Gets the task function/method status \"\"\" return", "SignalHandler(stopper, threads) signal(SIGINT, handler) for thread in threads: thread.start() task_q.join()", "pool_size=10): \"\"\" Executes several tasks concurrently via ``threading`` threads, puts", "task function/method result (produced by calling the function on the", "tasks: run = pool.apply_async(task.func, args=task.args, callback=build_results) run.get() pool.close() pool.join() while", "the caller. \"\"\" pool = billiard.Pool(pool_size) result_q = Queue() def", "r in task.result: result_q.put((task.key, r,)) else: result_q.put((task.key, task.result,)) task_q.task_done() result_q", "build_results(result): if type(result) in (types.GeneratorType, list, tuple, set): for r", "Thread, ) __all__ = [ 'multiprocess', 'multithread', 'SignalHandler', 'Task' ]", "+= 1 def run(i, task_q, result_q, stopper): while not stopper.is_set():", "args self._key = key if key is not None else", "these back to the caller. \"\"\" pool = billiard.Pool(pool_size) result_q", "True @property def is_done(self): \"\"\" Task function/method status property -", "Gets the task function/method arguments \"\"\" return self._args @property def", "def args(self): \"\"\" Task function/method arguments property - getter only.", "self._is_done def multithread(tasks, pool_size=10): \"\"\" Executes several tasks concurrently via", "r in result: result_q.put(r) else: result_q.put(result) for task in tasks:", "__all__ = [ 'multiprocess', 'multithread', 'SignalHandler', 'Task' ] class SignalHandler(object):", "function/method result \"\"\" return self._result @result.setter def result(self, r): self._result", "self._args @property def key(self): \"\"\" Task function/method key - getter", "result(self): \"\"\" Task function/method result property. :getter: Gets the task", "result_q = Queue() stopper = Event() threads = tuple(Thread(target=run, args=(i,", "= None self._is_done = False @property def func(self): \"\"\" Task", "Queue() stopper = Event() threads = tuple(Thread(target=run, args=(i, task_q, result_q,", "__future__ import unicode_literals from builtins import open from builtins import", "= SignalHandler(stopper, threads) signal(SIGINT, handler) for thread in threads: thread.start()", "\"\"\" Executes several tasks concurrently via ``threading`` threads, puts the", "= stopper self.threads = threads def __call__(self, signum, frame): self.stopper.set()", "for task in tasks: task_q.put(task) num_tasks += 1 def run(i,", "Executes several tasks concurrently via ``threading`` threads, puts the results", "run.get() pool.close() pool.join() while not result_q.empty(): result = result_q.get_nowait() yield", "in tasks: run = pool.apply_async(task.func, args=task.args, callback=build_results) run.get() pool.close() pool.join()", "getter only. :getter: Gets the task function/method arguments \"\"\" return", "= True @property def is_done(self): \"\"\" Task function/method status property", "r,)) else: result_q.put((task.key, task.result,)) task_q.task_done() result_q = Queue() stopper =", "= Queue() def build_results(result): if type(result) in (types.GeneratorType, list, tuple,", "key=None): self._func = func self._args = args self._key = key", "self.threads = threads def __call__(self, signum, frame): self.stopper.set() for task", "pool = billiard.Pool(pool_size) result_q = Queue() def build_results(result): if type(result)", "from __future__ import print_function from __future__ import unicode_literals from builtins", "import unicode_literals from builtins import open from builtins import str", "Event() threads = tuple(Thread(target=run, args=(i, task_q, result_q, stopper,)) for i", "ImportError: from Queue import Queue, Empty import sys import types", "is not None else func.__name__ self._result = None self._is_done =", "\"\"\" Task function/method key - getter only. :getter: Gets the", "def multiprocess(tasks, pool_size=10): \"\"\" Executes several tasks concurrently via Python", "type(task.result) in (types.GeneratorType, list, tuple, set): for r in task.result:", "self._result @result.setter def result(self, r): self._result = r self._is_done =", "multithread(tasks, pool_size=10): \"\"\" Executes several tasks concurrently via ``threading`` threads,", "self._result = None self._is_done = False @property def func(self): \"\"\"", "while not stopper.is_set(): try: task = task_q.get_nowait() except Empty: break", "Event, Thread, ) __all__ = [ 'multiprocess', 'multithread', 'SignalHandler', 'Task'", "sys.exit(0) class Task(object): def __init__(self, func, args=(), key=None): self._func =", "__call__(self, signum, frame): self.stopper.set() for task in self.threads: task.join() sys.exit(0)", "tasks concurrently via ``threading`` threads, puts the results into a", "queue, and generates these back to the caller. \"\"\" pool", "import Queue, Empty except ImportError: from Queue import Queue, Empty", "- getter only. :getter: Gets the task function/method arguments \"\"\"", "# -*- coding: utf-8 -*- from __future__ import absolute_import from", "threading import ( Event, Thread, ) __all__ = [ 'multiprocess',", "Task function/method status property - getter only. :getter: Gets the", "task function/method key \"\"\" return self._key @property def result(self): \"\"\"", "for r in result: result_q.put(r) else: result_q.put(result) for task in", "via ``threading`` threads, puts the results into a queue, and", "- getter only. :getter: Gets the task function/method key \"\"\"", "return self._is_done def multithread(tasks, pool_size=10): \"\"\" Executes several tasks concurrently", "list, tuple, set): for r in result: result_q.put(r) else: result_q.put(result)", "else: result_q.put(result) for task in tasks: run = pool.apply_async(task.func, args=task.args,", "frame): self.stopper.set() for task in self.threads: task.join() sys.exit(0) class Task(object):", "a queue, and generates these back to the caller. \"\"\"", "for i in range(pool_size)) handler = SignalHandler(stopper, threads) signal(SIGINT, handler)", "processes, puts the results into a queue, and generates these", "threads = tuple(Thread(target=run, args=(i, task_q, result_q, stopper,)) for i in", "generates these back to the caller. \"\"\" pool = billiard.Pool(pool_size)", "task in self.threads: task.join() sys.exit(0) class Task(object): def __init__(self, func,", "key, result def multiprocess(tasks, pool_size=10): \"\"\" Executes several tasks concurrently", "- getter only. :getter: Gets the task function/method status \"\"\"", "stopper,)) for i in range(pool_size)) handler = SignalHandler(stopper, threads) signal(SIGINT,", "-*- from __future__ import absolute_import from __future__ import division from", "def multithread(tasks, pool_size=10): \"\"\" Executes several tasks concurrently via ``threading``", "standard_library standard_library.install_aliases() try: from queue import Queue, Empty except ImportError:", "stopper self.threads = threads def __call__(self, signum, frame): self.stopper.set() for", "not None else func.__name__ self._result = None self._is_done = False", "-*- coding: utf-8 -*- from __future__ import absolute_import from __future__", "puts the results into a queue, and generates these back", "task.func() if type(task.result) in (types.GeneratorType, list, tuple, set): for r", "= args self._key = key if key is not None", "SignalHandler(object): def __init__(self, stopper, threads): self.stopper = stopper self.threads =", "the task function/method arguments \"\"\" return self._args @property def key(self):", "= Event() threads = tuple(Thread(target=run, args=(i, task_q, result_q, stopper,)) for", "the results into a queue, and generates these back to", "in range(pool_size)) handler = SignalHandler(stopper, threads) signal(SIGINT, handler) for thread", "else: task.result = task.func(*task.args) if task.args else task.func() if type(task.result)", "class SignalHandler(object): def __init__(self, stopper, threads): self.stopper = stopper self.threads", "by calling the function on the defined arguments) :setter: Sets", "= tuple(Thread(target=run, args=(i, task_q, result_q, stopper,)) for i in range(pool_size))", "None self._is_done = False @property def func(self): \"\"\" Task function/method", "the task function/method status \"\"\" return self._is_done def multithread(tasks, pool_size=10):", "= False @property def func(self): \"\"\" Task function/method property -", "from __future__ import unicode_literals from builtins import open from builtins", "set): for r in result: result_q.put(r) else: result_q.put(result) for task", "import billiard from signal import ( signal, SIGINT, ) from", "not result_q.empty(): key, result = result_q.get_nowait() yield key, result def", "queue import Queue, Empty except ImportError: from Queue import Queue,", "future import standard_library standard_library.install_aliases() try: from queue import Queue, Empty", "tuple, set): for r in result: result_q.put(r) else: result_q.put(result) for", "self.threads: task.join() sys.exit(0) class Task(object): def __init__(self, func, args=(), key=None):", "defined arguments) :setter: Sets the task function/method result \"\"\" return", "(types.GeneratorType, list, tuple, set): for r in task.result: result_q.put((task.key, r,))", "@property def func(self): \"\"\" Task function/method property - getter only.", "__init__(self, stopper, threads): self.stopper = stopper self.threads = threads def", "from queue import Queue, Empty except ImportError: from Queue import", "signal import ( signal, SIGINT, ) from threading import (", "function/method arguments property - getter only. :getter: Gets the task", "queue, and generates these back to the caller. \"\"\" task_q", "task.func(*task.args) if task.args else task.func() if type(task.result) in (types.GeneratorType, list,", "the task function/method key \"\"\" return self._key @property def result(self):", "builtins import str from future import standard_library standard_library.install_aliases() try: from", "in task.result: result_q.put((task.key, r,)) else: result_q.put((task.key, task.result,)) task_q.task_done() result_q =", "object \"\"\" return self._func @property def args(self): \"\"\" Task function/method", "thread in threads: thread.start() task_q.join() while not result_q.empty(): key, result", "[ 'multiprocess', 'multithread', 'SignalHandler', 'Task' ] class SignalHandler(object): def __init__(self," ]
[ "add_to_line as with_comments from .identify import STATEMENT_DECLARATIONS from .settings import", "= True elif config.force_single_line and module not in config.single_line_exclusions: import_statement", "line: continue if line.startswith(\"#\"): comments_above.append(line) elif comments_above: new_section_output.append(_LineWithComments(line, comments_above)) comments_above", "continue if line.startswith(\"#\"): comments_above.append(line) elif comments_above: new_section_output.append(_LineWithComments(line, comments_above)) comments_above =", "if inline_comments: combined_inline_comments = \" \".join(inline_comments) else: combined_inline_comments = \"\"", "not in config.single_line_exclusions: import_statement = \"\" while from_imports: from_import =", "config.combine_star: import_statement = wrap.line( with_comments( _with_star_comments(parsed, module, list(comments or ())),", "output.append(wrap.line(single_import_line, parsed.line_separator, config)) from_import_section = [] while from_imports and (", "output.append( wrap.line(single_import_line, parsed.line_separator, config) ) from_comments = parsed.categorized_comments[\"straight\"].get( f\"{module}.{from_import}\" )", "False for line, prev_line in zip(output, [None] + output): #", "str: \"\"\"Adds the imports back to the file. (at the", "{section_title}\" if section_comment not in parsed.lines_without_imports[0:1]: # pragma: no branch", "f\"{import_start}*\", removed=config.ignore_comments, comment_prefix=config.comment_prefix, ), parsed.line_separator, config, ) from_imports = [", "if is_comment(line) and prev_line != \"\" and not is_comment(prev_line): new_output.append(\"\")", "else: formatted_output[imports_tail:0] = [\"\"] if parsed.place_imports: new_out_lines = [] for", "and len(import_statement) > config.line_length: import_statement = wrap.line(import_statement, parsed.line_separator, config) if", "\"pyi\" and next_construct.startswith(STATEMENT_DECLARATIONS): formatted_output[imports_tail:0] = [\"\", \"\"] else: formatted_output[imports_tail:0] =", "List[str]) -> List[str]: while lines and lines[-1].strip() == \"\": lines.pop(-1)", "if not from_import_section: import_statement = \"\" do_multiline_reformat = False force_grid_wrap", "List[str], line_separator: str) -> str: return line_separator.join(_normalize_empty_lines(lines)) def _normalize_empty_lines(lines: List[str])", "List[str] def __new__( cls: Type[\"_LineWithComments\"], value: Any, comments: List[str] )", "sorting.sort( config, from_imports, key=lambda key: sorting.module_key( key, config, True, config.force_alphabetical_sort_within_sections,", "comments_above: output.extend(comments_above) output.extend( with_comments( parsed.categorized_comments[\"straight\"].get(imodule), idef, removed=config.ignore_comments, comment_prefix=config.comment_prefix, ) for", ". import parse, sorting, wrap from .comments import add_to_line as", "comment_prefix=config.comment_prefix, ) single_import_line += ( f\"{use_comments and ';' or config.comment_prefix}", "as_imports: if not straight_modules: return [] above_comments: List[str] = []", "line in new_section_output: comments = getattr(line, \"comments\", ()) if comments:", "force_grid_wrap = config.force_grid_wrap if force_grid_wrap and len(from_import_section) >= force_grid_wrap: do_multiline_reformat", "from_imports[0] in as_imports: from_import = from_imports.pop(0) if not config.only_sections: as_imports[from_import]", "[\"\"] * ( config.lines_between_types if from_modules and straight_modules else 0", "from_modules, section, remove_imports, import_type ) lines_between = [\"\"] * (", "1].strip() ): continue next_construct = line break if in_quote: #", "output.append(f\"{import_type} {combined_straight_imports}\") return output for module in straight_modules: if module", "in parsed.categorized_comments[\"straight\"]: inline_comments.extend(parsed.categorized_comments[\"straight\"][module]) combined_straight_imports = \", \".join(straight_modules) if inline_comments: combined_inline_comments", "from_imports.pop(0) single_import_line = with_comments( comments, import_start + from_import, removed=config.ignore_comments, comment_prefix=config.comment_prefix,", "comments: List[str]) -> List[str]: star_comment = parsed.categorized_comments[\"nested\"].get(module, {}).pop(\"*\", None) if", "List[str]) -> List[str]: star_comment = parsed.categorized_comments[\"nested\"].get(module, {}).pop(\"*\", None) if star_comment:", "parsed.categorized_comments[\"above\"][\"straight\"]: above_comments.extend(parsed.categorized_comments[\"above\"][\"straight\"].pop(module)) if module in parsed.categorized_comments[\"straight\"]: inline_comments.extend(parsed.categorized_comments[\"straight\"][module]) combined_straight_imports = \",", "config: Config = DEFAULT_CONFIG, extension: str = \"py\", import_type: str", "= output if output: imports_tail = output_at + len(output) while", "range(config.lines_after_imports) ] elif extension != \"pyi\" and next_construct.startswith(STATEMENT_DECLARATIONS): formatted_output[imports_tail:0] =", "\"\") if section_title and section_title not in seen_headings: if config.dedup_headings:", "copy import itertools from functools import partial from typing import", "_with_star_comments(parsed, module, list(comments or ())), f\"{import_start}*\", removed=config.ignore_comments, comment_prefix=config.comment_prefix, ), parsed.line_separator,", "= pending_lines_before or not no_lines_before if config.ensure_newline_before_comments: output = _ensure_newline_before_comment(output)", "from_comments = [] for as_import in as_imports[from_import]: specific_comment = (", "only works for bare imports, 'as' imports not included if", "len(output) while [ character.strip() for character in formatted_output[imports_tail : imports_tail", "key=partial(sorting.section_key, config=config), reverse=config.reverse_sort, ) # uncollapse comments section_output = []", "_output_as_string(lines: List[str], line_separator: str) -> str: return line_separator.join(_normalize_empty_lines(lines)) def _normalize_empty_lines(lines:", "( f\"{use_comments and ';' or config.comment_prefix} \" f\"{comment}\" ) output.append(wrap.line(single_import_line,", "\" f\"{comment}\" ) if from_import in as_imports: if ( parsed.imports[section][\"from\"][module][from_import]", "not in config.single_line_exclusions) ) and not config.only_sections: from_imports = sorting.sort(", "parsed.place_imports: new_out_lines = [] for index, line in enumerate(formatted_output): new_out_lines.append(line)", "in straight_modules: if module in remove_imports: continue import_definition = []", "Config, from_modules: Iterable[str], section: str, remove_imports: List[str], import_type: str, )", "in config.no_lines_before if section_output: if section_name in parsed.place_imports: parsed.place_imports[section_name] =", "if \"*\" in from_imports and config.combine_star: import_statement = wrap.line( with_comments(", "\"_LineWithComments\": instance = super().__new__(cls, value) instance.comments = comments return instance", "= [f\"{module}.{from_import}\" for from_import in from_imports] as_imports = { from_import:", "base_sections: Tuple[str, ...] = () for section in sections: if", ".identify import STATEMENT_DECLARATIONS from .settings import DEFAULT_CONFIG, Config def sorted_imports(", "= None if \"*\" in from_imports and config.combine_star: import_statement =", "cls: Type[\"_LineWithComments\"], value: Any, comments: List[str] ) -> \"_LineWithComments\": instance", "branch section_output.insert(0, section_comment) if pending_lines_before or not no_lines_before: output +=", "Set, Tuple, Type from isort.format import format_simplified from . import", "specific_comment: from_comments.append(specific_comment) output.append( wrap.line( with_comments( from_comments, import_start + as_import, removed=config.ignore_comments,", "parsed.categorized_comments[\"nested\"] .get(module, {}) .pop(as_import, None) ) if specific_comment: from_comments.append(specific_comment) output.append(", "(\"FUTURE\",) continue parsed.imports[\"no_sections\"][\"straight\"].update( parsed.imports[section].get(\"straight\", {}) ) parsed.imports[\"no_sections\"][\"from\"].update(parsed.imports[section].get(\"from\", {})) sections =", "(\"no_sections\",) output: List[str] = [] seen_headings: Set[str] = set() pending_lines_before", "!= \"\" and not is_comment(prev_line): new_output.append(\"\") new_output.append(line) return new_output def", "NOT using GRID or VERTICAL wrap modes if ( len(import_statement)", "_output_as_string(parsed.lines_without_imports, parsed.line_separator) formatted_output: List[str] = parsed.lines_without_imports.copy() remove_imports = [format_simplified(removal) for", "List[str]: new_output: List[str] = [] def is_comment(line: Optional[str]) -> bool:", "';' or config.comment_prefix} \" f\"{comment}\" ) if from_import in as_imports:", "parsed.categorized_comments[\"straight\"].get( f\"{module}.{from_import}\" ) if not config.only_sections: output.extend( with_comments( from_comments, wrap.line(", "lines class _LineWithComments(str): comments: List[str] def __new__( cls: Type[\"_LineWithComments\"], value:", "as_import, removed=config.ignore_comments, comment_prefix=config.comment_prefix, ), parsed.line_separator, config, ) ) from_comments =", "parsed.categorized_comments[\"nested\"].get(module, {}).pop(from_import, None) ) if comment: single_import_line += ( f\"{comments", "as with_comments from .identify import STATEMENT_DECLARATIONS from .settings import DEFAULT_CONFIG,", "in as_imports: from_import = from_imports.pop(0) if not config.only_sections: as_imports[from_import] =", "if module in remove_imports: continue import_start = f\"from {module} {import_type}", "= line break if config.lines_after_imports != -1: formatted_output[imports_tail:0] = [", "len(import_statement) > config.line_length and len(from_import_section) > 0 and config.multi_line_output not", "[ f\"{from_import} as {as_module}\" for as_module in parsed.as_map[\"from\"][sub_module] ] for", "if line else False for line, prev_line in zip(output, [None]", "= ( parsed.categorized_comments[\"straight\"].get(f\"{module}.{from_import}\") or [] ) if ( parsed.imports[section][\"from\"][module][from_import] and", "str = \"import\", ) -> str: \"\"\"Adds the imports back", "continue parsed.imports[\"no_sections\"][\"straight\"].update( parsed.imports[section].get(\"straight\", {}) ) parsed.imports[\"no_sections\"][\"from\"].update(parsed.imports[section].get(\"from\", {})) sections = base_sections", "module: str, comments: List[str]) -> List[str]: star_comment = parsed.categorized_comments[\"nested\"].get(module, {}).pop(\"*\",", "reverse=config.reverse_sort, ) # uncollapse comments section_output = [] for line", "wrap.Modes.GRID: # type: ignore other_import_statement = wrap.import_statement( import_start=import_start, from_imports=from_import_section, comments=comments,", "sub_modules) if sub_module in parsed.as_map[\"from\"] } if config.combine_as_imports and not", "config.only_sections: as_imports[from_import] = sorting.sort(config, as_imports[from_import]) from_comments = ( parsed.categorized_comments[\"straight\"].get(f\"{module}.{from_import}\") or", "tail = formatted_output[imports_tail:] for index, line in enumerate(tail): # pragma:", "'as' imports not included if config.combine_straight_imports and not as_imports: if", "output: imports_tail = output_at + len(output) while [ character.strip() for", "): specific_comment = ( parsed.categorized_comments[\"nested\"] .get(module, {}) .pop(from_import, None) )", "()) if comments: section_output.extend(comments) section_output.append(str(line)) section_name = section no_lines_before =", "other_import_statement = wrap.import_statement( import_start=import_start, from_imports=from_import_section, comments=comments, line_separator=parsed.line_separator, config=config, multi_line_output=wrap.Modes.VERTICAL_GRID, #", "parsed: parse.ParsedContent, config: Config = DEFAULT_CONFIG, extension: str = \"py\",", "and not config.only_sections: from_imports = sorting.sort( config, from_imports, key=lambda key:", "False for section in sections: straight_modules = parsed.imports[section][\"straight\"] if not", "from_comments = ( parsed.categorized_comments[\"straight\"].get(f\"{module}.{from_import}\") or [] ) if ( parsed.imports[section][\"from\"][module][from_import]", "lines_between + from_imports if config.force_sort_within_sections: # collapse comments comments_above =", "if not config.only_sections: output.extend( with_comments( from_comments, wrap.line( import_start + as_import,", "straight_import=True ), reverse=config.reverse_sort, ) from_modules = parsed.imports[section][\"from\"] if not config.only_sections:", "not straight_modules: return [] above_comments: List[str] = [] inline_comments: List[str]", "config, from_modules, section, remove_imports, import_type ) lines_between = [\"\"] *", "base_sections + (\"no_sections\",) output: List[str] = [] seen_headings: Set[str] =", "= sorting.sort( config, from_modules, key=lambda key: sorting.module_key(key, config, section_name=section), reverse=config.reverse_sort,", "removed=config.ignore_comments, comment_prefix=config.comment_prefix, ) for idef, imodule in import_definition ) return", ") and not config.only_sections: from_imports = sorting.sort( config, from_imports, key=lambda", "else: from_imports[idx : (idx + 1)] = as_imports.pop(from_import) only_show_as_imports =", "sorting.sort(config, as_imports[from_import]) ) else: output.extend( with_comments( from_comments, wrap.line( import_start +", "= wrap.line(import_statement, parsed.line_separator, config) if import_statement: output.append(import_statement) return output def", "str, ) -> List[str]: output: List[str] = [] for module", "sub_module in parsed.as_map[\"from\"] } if config.combine_as_imports and not (\"*\" in", "with_comments( _with_star_comments(parsed, module, []), f\"{import_start}*\", removed=config.ignore_comments, comment_prefix=config.comment_prefix, ) ) from_imports.remove(\"*\")", "section_comments=config.section_comments, needs_import=False, ) if not should_skip and line.strip(): if (", "True new_section_output = sorting.sort( config, new_section_output, key=partial(sorting.section_key, config=config), reverse=config.reverse_sort, )", "not do_multiline_reformat and len(import_statement) > config.line_length: import_statement = wrap.line(import_statement, parsed.line_separator,", "bare imports, 'as' imports not included if config.combine_straight_imports and not", "False force_grid_wrap = config.force_grid_wrap if force_grid_wrap and len(from_import_section) >= force_grid_wrap:", "else False for line, prev_line in zip(output, [None] + output):", "single_import_line += ( f\"{comments and ';' or config.comment_prefix} \" f\"{comment}\"", "= output_at + len(output) while [ character.strip() for character in", "config.from_first: section_output = from_imports + lines_between + straight_imports else: section_output", "())), f\"{import_start}*\", removed=config.ignore_comments, comment_prefix=config.comment_prefix, ), parsed.line_separator, config, ) from_imports =", "return output def _with_straight_imports( parsed: parse.ParsedContent, config: Config, straight_modules: Iterable[str],", "config.only_sections: from_imports = sorting.sort( config, from_imports, key=lambda key: sorting.module_key( key,", "index, line in enumerate(tail): # pragma: no branch should_skip, in_quote,", "no branch should_skip, in_quote, *_ = parse.skip_line( line, in_quote=\"\", index=len(formatted_output),", "star_comment = parsed.categorized_comments[\"nested\"].get(module, {}).pop(\"*\", None) if star_comment: return comments +", "from_imports: from_import = from_imports.pop(0) single_import_line = with_comments( comments, import_start +", "GRID or VERTICAL wrap modes if ( len(import_statement) > config.line_length", "from_import = from_imports.pop(0) if not config.only_sections: as_imports[from_import] = sorting.sort(config, as_imports[from_import])", "straight_imports else: section_output = straight_imports + lines_between + from_imports if", "if comment: single_import_line += ( f\"{comments and ';' or config.comment_prefix}", "= config.force_grid_wrap if force_grid_wrap and len(from_import_section) >= force_grid_wrap: do_multiline_reformat =", "parse.ParsedContent, config: Config, from_modules: Iterable[str], section: str, remove_imports: List[str], import_type:", "+ lines_between + from_imports if config.force_sort_within_sections: # collapse comments comments_above", "[] for module in straight_modules: if module in parsed.categorized_comments[\"above\"][\"straight\"]: above_comments.extend(parsed.categorized_comments[\"above\"][\"straight\"].pop(module))", "in parsed.as_map[\"straight\"] for module in straight_modules)) # combine_straight_imports only works", "in formatted_output[imports_tail : imports_tail + 1] ] == [\"\"]: formatted_output.pop(imports_tail)", "options is not imposed if force_sort_within_sections is True new_section_output =", "in new_section_output: comments = getattr(line, \"comments\", ()) if comments: section_output.extend(comments)", "] == [\"\"]: formatted_output.pop(imports_tail) if len(formatted_output) > imports_tail: next_construct =", "parsed.original_line_count: output_at = parsed.import_index formatted_output[output_at:0] = output if output: imports_tail", "\".join(straight_modules) if inline_comments: combined_inline_comments = \" \".join(inline_comments) else: combined_inline_comments =", "config.forced_separate) if config.no_sections: parsed.imports[\"no_sections\"] = {\"straight\": {}, \"from\": {}} base_sections:", "straight_modules else 0 ) if config.from_first: section_output = from_imports +", "parsed: parse.ParsedContent, config: Config, from_modules: Iterable[str], section: str, remove_imports: List[str],", "instance = super().__new__(cls, value) instance.comments = comments return instance def", ") from_modules = parsed.imports[section][\"from\"] if not config.only_sections: from_modules = sorting.sort(", "[\"\"]: formatted_output.pop(imports_tail) if len(formatted_output) > imports_tail: next_construct = \"\" tail", "combined_inline_comments: output.append( f\"{import_type} {combined_straight_imports} # {combined_inline_comments}\" ) else: output.append(f\"{import_type} {combined_straight_imports}\")", "not config.only_sections: from_imports = sorting.sort( config, from_imports, key=lambda key: sorting.module_key(", "in from_imports and config.combine_star): if not config.no_inline_sort: for as_import in", "-1: return _output_as_string(parsed.lines_without_imports, parsed.line_separator) formatted_output: List[str] = parsed.lines_without_imports.copy() remove_imports =", "section_output = straight_imports + lines_between + from_imports if config.force_sort_within_sections: #", "1) and tail[index + 1].strip() ): continue next_construct = line", ") > config.line_length ): import_statement = other_import_statement if not do_multiline_reformat", "(index + 1) and tail[index + 1].strip() ): continue next_construct", "module in remove_imports: continue import_start = f\"from {module} {import_type} \"", "elif comments_above: new_section_output.append(_LineWithComments(line, comments_above)) comments_above = [] else: new_section_output.append(line) #", "section_output = from_imports + lines_between + straight_imports else: section_output =", "other_import_statement if not do_multiline_reformat and len(import_statement) > config.line_length: import_statement =", "single_import_line = with_comments( use_comments, import_start + from_import, removed=config.ignore_comments, comment_prefix=config.comment_prefix, )", "continue next_construct = line break if in_quote: # pragma: no", "from_modules: Iterable[str], section: str, remove_imports: List[str], import_type: str, ) ->", "line.strip(): if ( line.strip().startswith(\"#\") and len(tail) > (index + 1)", "inline_comments.extend(parsed.categorized_comments[\"straight\"][module]) combined_straight_imports = \", \".join(straight_modules) if inline_comments: combined_inline_comments = \"", "parsed.place_imports: parsed.place_imports[section_name] = section_output continue section_title = config.import_headings.get(section_name.lower(), \"\") if", "comments = None else: while from_imports and from_imports[0] in as_imports:", "f\"# {section_title}\" if section_comment not in parsed.lines_without_imports[0:1]: # pragma: no", "section_output: if not line: continue if line.startswith(\"#\"): comments_above.append(line) elif comments_above:", "pending_lines_before = pending_lines_before or not no_lines_before if config.ensure_newline_before_comments: output =", "ignore ): do_multiline_reformat = True if do_multiline_reformat: import_statement = wrap.import_statement(", "extension: str = \"py\", import_type: str = \"import\", ) ->", "output = _ensure_newline_before_comment(output) while output and output[-1].strip() == \"\": output.pop()", ">= force_grid_wrap: do_multiline_reformat = True if len(import_statement) > config.line_length and", "formatted_output[imports_tail : imports_tail + 1] ] == [\"\"]: formatted_output.pop(imports_tail) if", "if config.dedup_headings: seen_headings.add(section_title) section_comment = f\"# {section_title}\" if section_comment not", "star_modules + other_modules straight_imports = _with_straight_imports( parsed, config, straight_modules, section,", "from_import in as_imports: if ( parsed.imports[section][\"from\"][module][from_import] and not only_show_as_imports ):", "parsed.as_map[\"from\"][sub_module] ] for from_import, sub_module in zip(from_imports, sub_modules) if sub_module", "else: output.extend( with_comments( from_comments, wrap.line( import_start + as_import, parsed.line_separator, config", "zip(output, [None] + output): # type: ignore if is_comment(line) and", "line in parsed.import_placements: new_out_lines.extend(parsed.place_imports[parsed.import_placements[line]]) if ( len(formatted_output) <= (index +", "parsed.categorized_comments[\"from\"].pop(f\"{module}.__combined_as__\", ()) ) import_statement = with_comments( comments, import_start + (\",", "multi_line_output=wrap.Modes.VERTICAL_GRID, # type: ignore ) if ( max( len(import_line) for", "f\"{from_import} as {as_module}\" for as_module in parsed.as_map[\"from\"][sub_module] ] for from_import,", "should_skip and line.strip(): if ( line.strip().startswith(\"#\") and len(tail) > (index", "type: ignore ) if ( max( len(import_line) for import_line in", "from isort.format import format_simplified from . import parse, sorting, wrap", "if do_multiline_reformat: import_statement = wrap.import_statement( import_start=import_start, from_imports=from_import_section, comments=comments, line_separator=parsed.line_separator, config=config,", "= False else: pending_lines_before = pending_lines_before or not no_lines_before if", "config.only_sections: output.extend( with_comments( from_comments, wrap.line( import_start + as_import, parsed.line_separator, config", "None) ) if comment: from_imports.remove(from_import) if from_imports: use_comments = []", "itertools.chain(parsed.sections, config.forced_separate) if config.no_sections: parsed.imports[\"no_sections\"] = {\"straight\": {}, \"from\": {}}", "= \"\" output.extend(above_comments) if combined_inline_comments: output.append( f\"{import_type} {combined_straight_imports} # {combined_inline_comments}\"", "Any, comments: List[str] ) -> \"_LineWithComments\": instance = super().__new__(cls, value)", "not no_lines_before: output += [\"\"] * config.lines_between_sections output += section_output", "== [\"\"]: formatted_output.pop(imports_tail) if len(formatted_output) > imports_tail: next_construct = \"\"", "module not in config.single_line_exclusions: import_statement = \"\" while from_imports: from_import", "output.extend(above_comments) if combined_inline_comments: output.append( f\"{import_type} {combined_straight_imports} # {combined_inline_comments}\" ) else:", "[\"\"] if parsed.place_imports: new_out_lines = [] for index, line in", "def _with_star_comments(parsed: parse.ParsedContent, module: str, comments: List[str]) -> List[str]: star_comment", "have imports AND we are # NOT using GRID or", "for section in sections: if section == \"FUTURE\": base_sections =", "remove_imports: List[str], import_type: str, ) -> List[str]: output: List[str] =", "List[str] = [] as_imports = any((module in parsed.as_map[\"straight\"] for module", "if ( len(formatted_output) <= (index + 1) or formatted_output[index +", "from_modules and straight_modules else 0 ) if config.from_first: section_output =", "in parsed.import_placements: new_out_lines.extend(parsed.place_imports[parsed.import_placements[line]]) if ( len(formatted_output) <= (index + 1)", "STATEMENT_DECLARATIONS from .settings import DEFAULT_CONFIG, Config def sorted_imports( parsed: parse.ParsedContent,", "lines_between = [\"\"] * ( config.lines_between_types if from_modules and straight_modules", "{module} as {as_import}\", f\"{module} as {as_import}\") for as_import in parsed.as_map[\"straight\"][module]", "base_sections = (\"FUTURE\",) continue parsed.imports[\"no_sections\"][\"straight\"].update( parsed.imports[section].get(\"straight\", {}) ) parsed.imports[\"no_sections\"][\"from\"].update(parsed.imports[section].get(\"from\", {}))", "(idx + 1)] = as_imports.pop(from_import) else: from_imports[idx : (idx +", "def sorted_imports( parsed: parse.ParsedContent, config: Config = DEFAULT_CONFIG, extension: str", "config.combine_as_imports and not (\"*\" in from_imports and config.combine_star): if not", "and ';' or config.comment_prefix} \" f\"{comment}\" ) if from_import in", "# type: ignore other_import_statement = wrap.import_statement( import_start=import_start, from_imports=from_import_section, comments=comments, line_separator=parsed.line_separator,", "> 0 and config.multi_line_output not in (wrap.Modes.GRID, wrap.Modes.VERTICAL) # type:", "-1: formatted_output[imports_tail:0] = [ \"\" for line in range(config.lines_after_imports) ]", "pragma: no branch next_construct = line break if config.lines_after_imports !=", "instance.comments = comments return instance def _ensure_newline_before_comment(output: List[str]) -> List[str]:", ".get(module, {}) .pop(from_import, None) ) if specific_comment: from_comments.append(specific_comment) output.append( wrap.line(", "sub_module in zip(from_imports, sub_modules) if sub_module in parsed.as_map[\"from\"] } if", "+= ( f\"{use_comments and ';' or config.comment_prefix} \" f\"{comment}\" )", "= getattr(line, \"comments\", ()) if comments: section_output.extend(comments) section_output.append(str(line)) section_name =", "prev_line in zip(output, [None] + output): # type: ignore if", "= False for section in sections: straight_modules = parsed.imports[section][\"straight\"] if", "= sorting.sort(config, as_imports[from_import]) from_comments = ( parsed.categorized_comments[\"straight\"].get(f\"{module}.{from_import}\") or [] )", "wrap.line( import_start + as_import, parsed.line_separator, config ), removed=config.ignore_comments, comment_prefix=config.comment_prefix, )", "as_imports: if ( parsed.imports[section][\"from\"][module][from_import] and not only_show_as_imports ): output.append( wrap.line(single_import_line,", "Type from isort.format import format_simplified from . import parse, sorting,", "star_modules.append(module) else: other_modules.append(module) from_modules = star_modules + other_modules straight_imports =", "elif extension != \"pyi\" and next_construct.startswith(STATEMENT_DECLARATIONS): formatted_output[imports_tail:0] = [\"\", \"\"]", "in parsed.place_imports: parsed.place_imports[section_name] = section_output continue section_title = config.import_headings.get(section_name.lower(), \"\")", "[None] + output): # type: ignore if is_comment(line) and prev_line", "not in remove_imports ] sub_modules = [f\"{module}.{from_import}\" for from_import in", "with_comments( _with_star_comments(parsed, module, list(comments or ())), f\"{import_start}*\", removed=config.ignore_comments, comment_prefix=config.comment_prefix, ),", "in_quote=\"\", index=len(formatted_output), section_comments=config.section_comments, needs_import=False, ) if not should_skip and line.strip():", "for removal in config.remove_imports] sections: Iterable[str] = itertools.chain(parsed.sections, config.forced_separate) if", "if comments: section_output.extend(comments) section_output.append(str(line)) section_name = section no_lines_before = section_name", "output.extend(comments_above) output.extend( with_comments( parsed.categorized_comments[\"straight\"].get(imodule), idef, removed=config.ignore_comments, comment_prefix=config.comment_prefix, ) for idef,", "{})) sections = base_sections + (\"no_sections\",) output: List[str] = []", "(at the index of the first import) sorted alphabetically and", "in from_imports] as_imports = { from_import: [ f\"{from_import} as {as_module}\"", "in sections: straight_modules = parsed.imports[section][\"straight\"] if not config.only_sections: straight_modules =", "{import_type} \" from_imports = list(parsed.imports[section][\"from\"][module]) if ( not config.no_inline_sort or", "section == \"FUTURE\": base_sections = (\"FUTURE\",) continue parsed.imports[\"no_sections\"][\"straight\"].update( parsed.imports[section].get(\"straight\", {})", "in enumerate(formatted_output): new_out_lines.append(line) if line in parsed.import_placements: new_out_lines.extend(parsed.place_imports[parsed.import_placements[line]]) if (", "= parsed.categorized_comments[\"from\"].pop(module, ()) above_comments = parsed.categorized_comments[\"above\"][\"from\"].pop(module, None) while from_imports: if", "from_import_section = [] while from_imports and ( from_imports[0] not in", "as_imports: if not config.only_sections: as_imports[as_import] = sorting.sort(config, as_imports[as_import]) for from_import", ") if not from_import_section: import_statement = \"\" do_multiline_reformat = False", "in remove_imports: continue import_definition = [] if module in parsed.as_map[\"straight\"]:", "= [ from_import for from_import in from_imports if from_import in", "from_imports and from_imports[0] in as_imports: from_import = from_imports.pop(0) if not", "> config.line_length: import_statement = wrap.line(import_statement, parsed.line_separator, config) if import_statement: output.append(import_statement)", "do_multiline_reformat: import_statement = wrap.import_statement( import_start=import_start, from_imports=from_import_section, comments=comments, line_separator=parsed.line_separator, config=config, )", "from_imports: if above_comments: output.extend(above_comments) above_comments = None if \"*\" in", "output and output[-1].strip() == \"\": output.pop() # pragma: no cover", "config.formatting_function( parsed.line_separator.join(output), extension, config ).splitlines() output_at = 0 if parsed.import_index", "config.multi_line_output not in (wrap.Modes.GRID, wrap.Modes.VERTICAL) # type: ignore ): do_multiline_reformat", "import_definition.append((f\"{import_type} {module}\", module)) import_definition.extend( (f\"{import_type} {module} as {as_import}\", f\"{module} as", "removed=config.ignore_comments, comment_prefix=config.comment_prefix, ) single_import_line += ( f\"{use_comments and ';' or", "is_comment(prev_line): new_output.append(\"\") new_output.append(line) return new_output def _with_star_comments(parsed: parse.ParsedContent, module: str,", "[] other_modules = [] for module in from_modules: if \"*\"", "from_modules = sorting.sort( config, from_modules, key=lambda key: sorting.module_key(key, config, section_name=section),", "if not should_skip and line.strip(): if ( line.strip().startswith(\"#\") and len(tail)", "import_start=import_start, from_imports=from_import_section, comments=comments, line_separator=parsed.line_separator, config=config, ) if config.multi_line_output == wrap.Modes.GRID:", "ignore ) if ( max( len(import_line) for import_line in import_statement.split(parsed.line_separator)", "-> \"_LineWithComments\": instance = super().__new__(cls, value) instance.comments = comments return", "comment: single_import_line += ( f\"{comments and ';' or config.comment_prefix} \"", "[] def is_comment(line: Optional[str]) -> bool: return line.startswith(\"#\") if line", "or VERTICAL wrap modes if ( len(import_statement) > config.line_length and", "} if config.combine_as_imports and not (\"*\" in from_imports and config.combine_star):", "from_imports[(idx + 1) : (idx + 1)] = as_imports.pop(from_import) else:", "[] inline_comments: List[str] = [] for module in straight_modules: if", "[\"\", \"\"] else: formatted_output[imports_tail:0] = [\"\"] if parsed.place_imports: new_out_lines =", "index, line in enumerate(formatted_output): new_out_lines.append(line) if line in parsed.import_placements: new_out_lines.extend(parsed.place_imports[parsed.import_placements[line]])", "\"\"] else: formatted_output[imports_tail:0] = [\"\"] if parsed.place_imports: new_out_lines = []", "= [] def is_comment(line: Optional[str]) -> bool: return line.startswith(\"#\") if", "output_at = parsed.import_index formatted_output[output_at:0] = output if output: imports_tail =", "f\"{module} as {as_import}\") for as_import in parsed.as_map[\"straight\"][module] ) else: import_definition.append((f\"{import_type}", "from_imports=from_import_section, comments=comments, line_separator=parsed.line_separator, config=config, multi_line_output=wrap.Modes.VERTICAL_GRID, # type: ignore ) if", "1) : (idx + 1)] = as_imports.pop(from_import) else: from_imports[idx :", "else: pending_lines_before = pending_lines_before or not no_lines_before if config.ensure_newline_before_comments: output", "wrap modes if ( len(import_statement) > config.line_length and len(from_import_section) >", "config.ensure_newline_before_comments: output = _ensure_newline_before_comment(output) while output and output[-1].strip() == \"\":", "in (wrap.Modes.GRID, wrap.Modes.VERTICAL) # type: ignore ): do_multiline_reformat = True", "key=lambda key: sorting.module_key(key, config, section_name=section), reverse=config.reverse_sort, ) if config.star_first: star_modules", "from_import in copy.copy(from_imports): comment = ( parsed.categorized_comments[\"nested\"].get(module, {}).pop(from_import, None) )", "line break if config.lines_after_imports != -1: formatted_output[imports_tail:0] = [ \"\"", "formatted_output.pop(imports_tail) if len(formatted_output) > imports_tail: next_construct = \"\" tail =", "output and output[0].strip() == \"\": output.pop(0) if config.formatting_function: output =", "parsed.lines_without_imports[0:1]: # pragma: no branch section_output.insert(0, section_comment) if pending_lines_before or", "as_imports or ( config.combine_as_imports and parsed.imports[section][\"from\"][module][from_import] ) ): from_import_section.append(from_imports.pop(0)) if", "if from_import in as_imports ] only_show_as_imports = True elif config.force_single_line", "from_imports and config.combine_star): if not config.no_inline_sort: for as_import in as_imports:", "if f\"{module}.{line}\" not in remove_imports ] sub_modules = [f\"{module}.{from_import}\" for", "the imports back to the file. (at the index of", "[] for index, line in enumerate(formatted_output): new_out_lines.append(line) if line in", ") if comment: from_imports.remove(from_import) if from_imports: use_comments = [] else:", "split between groups \"\"\" if parsed.import_index == -1: return _output_as_string(parsed.lines_without_imports,", "not in (wrap.Modes.GRID, wrap.Modes.VERTICAL) # type: ignore ): do_multiline_reformat =", "comment_prefix=config.comment_prefix, ) comment = ( parsed.categorized_comments[\"nested\"].get(module, {}).pop(from_import, None) ) if", "+ other_modules straight_imports = _with_straight_imports( parsed, config, straight_modules, section, remove_imports,", "\"\" do_multiline_reformat = False force_grid_wrap = config.force_grid_wrap if force_grid_wrap and", "or config.comment_prefix} \" f\"{comment}\" ) if from_import in as_imports: if", "# type: ignore ): do_multiline_reformat = True if do_multiline_reformat: import_statement", "Tuple[str, ...] = () for section in sections: if section", "return lines class _LineWithComments(str): comments: List[str] def __new__( cls: Type[\"_LineWithComments\"],", "== \"\": output.pop() # pragma: no cover while output and", "{}, \"from\": {}} base_sections: Tuple[str, ...] = () for section", "imodule in import_definition ) return output def _output_as_string(lines: List[str], line_separator:", "and len(from_import_section) > 0 and config.multi_line_output not in (wrap.Modes.GRID, wrap.Modes.VERTICAL)", "AND we are # NOT using GRID or VERTICAL wrap", "inline_comments: combined_inline_comments = \" \".join(inline_comments) else: combined_inline_comments = \"\" output.extend(above_comments)", "if not line: continue if line.startswith(\"#\"): comments_above.append(line) elif comments_above: new_section_output.append(_LineWithComments(line,", "in parsed.lines_without_imports[0:1]: # pragma: no branch section_output.insert(0, section_comment) if pending_lines_before", "combined_inline_comments = \"\" output.extend(above_comments) if combined_inline_comments: output.append( f\"{import_type} {combined_straight_imports} #", "for from_import in from_imports if from_import in as_imports ] only_show_as_imports", "while from_imports and from_imports[0] in as_imports: from_import = from_imports.pop(0) if", "List[str]: output: List[str] = [] for module in from_modules: if", "_with_from_imports( parsed, config, from_modules, section, remove_imports, import_type ) lines_between =", "List[str] = [] for line in section_output: if not line:", "comments=comments, line_separator=parsed.line_separator, config=config, ) if config.multi_line_output == wrap.Modes.GRID: # type:", "section_comment) if pending_lines_before or not no_lines_before: output += [\"\"] *", "True # If line too long AND have imports AND", "!= -1: formatted_output[imports_tail:0] = [ \"\" for line in range(config.lines_after_imports)", "parsed.line_separator, config, ) from_imports = [ from_import for from_import in", "specific_comment = ( parsed.categorized_comments[\"nested\"] .get(module, {}) .pop(from_import, None) ) if", "parse.ParsedContent, config: Config, straight_modules: Iterable[str], section: str, remove_imports: List[str], import_type:", "f\"from {module} {import_type} \" from_imports = list(parsed.imports[section][\"from\"][module]) if ( not", "idef, imodule in import_definition ) return output def _output_as_string(lines: List[str],", "if ( parsed.imports[section][\"from\"][module][from_import] and not only_show_as_imports ): specific_comment = (", "not config.only_sections: straight_modules = sorting.sort( config, straight_modules, key=lambda key: sorting.module_key(", "> config.line_length ): import_statement = other_import_statement if not do_multiline_reformat and", "module in straight_modules: if module in remove_imports: continue import_definition =", "typing import Any, Iterable, List, Optional, Set, Tuple, Type from", "sections = base_sections + (\"no_sections\",) output: List[str] = [] seen_headings:", "), reverse=config.reverse_sort, ) if remove_imports: from_imports = [ line for", "import format_simplified from . import parse, sorting, wrap from .comments", ") if remove_imports: from_imports = [ line for line in", "comment: from_imports.remove(from_import) if from_imports: use_comments = [] else: use_comments =", "if above_comments: output.extend(above_comments) above_comments = None if \"*\" in from_imports", ") from_imports.remove(\"*\") for from_import in copy.copy(from_imports): comment = ( parsed.categorized_comments[\"nested\"].get(module,", "else: combined_inline_comments = \"\" output.extend(above_comments) if combined_inline_comments: output.append( f\"{import_type} {combined_straight_imports}", "+ 1] ] == [\"\"]: formatted_output.pop(imports_tail) if len(formatted_output) > imports_tail:", ") single_import_line += ( f\"{use_comments and ';' or config.comment_prefix} \"", "needs_import=False, ) if not should_skip and line.strip(): if ( line.strip().startswith(\"#\")", "parsed.imports[\"no_sections\"][\"from\"].update(parsed.imports[section].get(\"from\", {})) sections = base_sections + (\"no_sections\",) output: List[str] =", "= f\"# {section_title}\" if section_comment not in parsed.lines_without_imports[0:1]: # pragma:", "+ as_import, parsed.line_separator, config ), removed=config.ignore_comments, comment_prefix=config.comment_prefix, ) for as_import", "+ as_import, removed=config.ignore_comments, comment_prefix=config.comment_prefix, ), parsed.line_separator, config, ) ) from_comments", "* ( config.lines_between_types if from_modules and straight_modules else 0 )", "lines and lines[-1].strip() == \"\": lines.pop(-1) lines.append(\"\") return lines class", "wrap.Modes.VERTICAL) # type: ignore ): do_multiline_reformat = True if do_multiline_reformat:", "> config.line_length and len(from_import_section) > 0 and config.multi_line_output not in", "not config.no_inline_sort or (config.force_single_line and module not in config.single_line_exclusions) )", "), reverse=config.reverse_sort, ) from_modules = parsed.imports[section][\"from\"] if not config.only_sections: from_modules", ".comments import add_to_line as with_comments from .identify import STATEMENT_DECLARATIONS from", "= parsed.imports[section][\"straight\"] if not config.only_sections: straight_modules = sorting.sort( config, straight_modules,", ") if config.from_first: section_output = from_imports + lines_between + straight_imports", "from .settings import DEFAULT_CONFIG, Config def sorted_imports( parsed: parse.ParsedContent, config:", "def _with_straight_imports( parsed: parse.ParsedContent, config: Config, straight_modules: Iterable[str], section: str,", "( f\"{comments and ';' or config.comment_prefix} \" f\"{comment}\" ) if", "config.formatting_function: output = config.formatting_function( parsed.line_separator.join(output), extension, config ).splitlines() output_at =", "in parsed.as_map[\"straight\"]: if parsed.imports[section][\"straight\"][module]: import_definition.append((f\"{import_type} {module}\", module)) import_definition.extend( (f\"{import_type} {module}", "section_output.extend(comments) section_output.append(str(line)) section_name = section no_lines_before = section_name in config.no_lines_before", "import STATEMENT_DECLARATIONS from .settings import DEFAULT_CONFIG, Config def sorted_imports( parsed:", "[]) + list( parsed.categorized_comments[\"from\"].pop(f\"{module}.__combined_as__\", ()) ) import_statement = with_comments( comments,", "in copy.copy(from_imports): if from_import in as_imports: idx = from_imports.index(from_import) if", "if import_statement: output.append(import_statement) return output def _with_straight_imports( parsed: parse.ParsedContent, config:", "List[str] ) -> \"_LineWithComments\": instance = super().__new__(cls, value) instance.comments =", "), parsed.line_separator, config, ) from_imports = [ from_import for from_import", "else: use_comments = comments comments = None single_import_line = with_comments(", "import_statement = with_comments( comments, import_start + (\", \").join(from_import_section), removed=config.ignore_comments, comment_prefix=config.comment_prefix,", "above_comments: List[str] = [] inline_comments: List[str] = [] for module", "no branch section_output.insert(0, section_comment) if pending_lines_before or not no_lines_before: output", "break if config.lines_after_imports != -1: formatted_output[imports_tail:0] = [ \"\" for", "not config.only_sections: output.extend( with_comments( from_comments, wrap.line( import_start + as_import, parsed.line_separator,", "above_comments: output.extend(above_comments) above_comments = None if \"*\" in from_imports and", "for section in sections: straight_modules = parsed.imports[section][\"straight\"] if not config.only_sections:", "imports back to the file. (at the index of the", "{module} {import_type} \" from_imports = list(parsed.imports[section][\"from\"][module]) if ( not config.no_inline_sort", "config.remove_imports] sections: Iterable[str] = itertools.chain(parsed.sections, config.forced_separate) if config.no_sections: parsed.imports[\"no_sections\"] =", "DEFAULT_CONFIG, Config def sorted_imports( parsed: parse.ParsedContent, config: Config = DEFAULT_CONFIG,", "parsed.line_separator, config ), removed=config.ignore_comments, comment_prefix=config.comment_prefix, ) for as_import in sorting.sort(config,", "else: output.append(f\"{import_type} {combined_straight_imports}\") return output for module in straight_modules: if", "with_comments( from_comments, wrap.line( import_start + as_import, parsed.line_separator, config ), removed=config.ignore_comments,", "parse, sorting, wrap from .comments import add_to_line as with_comments from", "section: str, remove_imports: List[str], import_type: str, ) -> List[str]: output:", "max( len(import_line) for import_line in import_statement.split(parsed.line_separator) ) > config.line_length ):", "{}).pop(from_import, None) ) if comment: single_import_line += ( f\"{comments and", "if \"*\" in from_imports: output.append( with_comments( _with_star_comments(parsed, module, []), f\"{import_start}*\",", "any((module in parsed.as_map[\"straight\"] for module in straight_modules)) # combine_straight_imports only", "!= \"\" ): new_out_lines.append(\"\") formatted_output = new_out_lines return _output_as_string(formatted_output, parsed.line_separator)", "comment_prefix=config.comment_prefix, ), parsed.line_separator, config, ) ) from_comments = [] for", "we are # NOT using GRID or VERTICAL wrap modes", "> (index + 1) and tail[index + 1].strip() ): continue", "< parsed.original_line_count: output_at = parsed.import_index formatted_output[output_at:0] = output if output:", "if not straight_modules: return [] above_comments: List[str] = [] inline_comments:", "= with_comments( use_comments, import_start + from_import, removed=config.ignore_comments, comment_prefix=config.comment_prefix, ) single_import_line", "[] ) if ( parsed.imports[section][\"from\"][module][from_import] and not only_show_as_imports ): specific_comment", "comments_above = [] else: new_section_output.append(line) # only_sections options is not", "in sections: if section == \"FUTURE\": base_sections = (\"FUTURE\",) continue", "config.line_length and len(from_import_section) > 1: do_multiline_reformat = True # If", "f\"{use_comments and ';' or config.comment_prefix} \" f\"{comment}\" ) output.append(wrap.line(single_import_line, parsed.line_separator,", "import_definition = [] if module in parsed.as_map[\"straight\"]: if parsed.imports[section][\"straight\"][module]: import_definition.append((f\"{import_type}", "section_title and section_title not in seen_headings: if config.dedup_headings: seen_headings.add(section_title) section_comment", "not should_skip and line.strip(): if ( line.strip().startswith(\"#\") and len(tail) >", "(idx + 1)] = as_imports.pop(from_import) only_show_as_imports = False comments =", "of the first import) sorted alphabetically and split between groups", "= from_imports.pop(0) if not config.only_sections: as_imports[from_import] = sorting.sort(config, as_imports[from_import]) from_comments", "parsed.imports[\"no_sections\"] = {\"straight\": {}, \"from\": {}} base_sections: Tuple[str, ...] =", "= \", \".join(straight_modules) if inline_comments: combined_inline_comments = \" \".join(inline_comments) else:", "= wrap.line( with_comments( _with_star_comments(parsed, module, list(comments or ())), f\"{import_start}*\", removed=config.ignore_comments,", "or ())), f\"{import_start}*\", removed=config.ignore_comments, comment_prefix=config.comment_prefix, ), parsed.line_separator, config, ) from_imports", "parsed.line_separator, config)) from_import_section = [] while from_imports and ( from_imports[0]", "parsed.line_separator, config) ) from_comments = parsed.categorized_comments[\"straight\"].get( f\"{module}.{from_import}\" ) if not", "wrap.line(import_statement, parsed.line_separator, config) if import_statement: output.append(import_statement) return output def _with_straight_imports(", "] only_show_as_imports = True elif config.force_single_line and module not in", "for as_import in sorting.sort(config, as_imports[from_import]) ) else: output.extend( with_comments( from_comments,", "comment = ( parsed.categorized_comments[\"nested\"].get(module, {}).pop(from_import, None) ) if comment: from_imports.remove(from_import)", "from_imports: output.append( with_comments( _with_star_comments(parsed, module, []), f\"{import_start}*\", removed=config.ignore_comments, comment_prefix=config.comment_prefix, )", "parsed.imports[\"no_sections\"][\"straight\"].update( parsed.imports[section].get(\"straight\", {}) ) parsed.imports[\"no_sections\"][\"from\"].update(parsed.imports[section].get(\"from\", {})) sections = base_sections +", "def _with_from_imports( parsed: parse.ParsedContent, config: Config, from_modules: Iterable[str], section: str,", "included if config.combine_straight_imports and not as_imports: if not straight_modules: return", "= [] for line in section_output: if not line: continue", "section_comment not in parsed.lines_without_imports[0:1]: # pragma: no branch section_output.insert(0, section_comment)", "parsed.imports[section][\"straight\"][module]: import_definition.append((f\"{import_type} {module}\", module)) import_definition.extend( (f\"{import_type} {module} as {as_import}\", f\"{module}", "> config.line_length and len(from_import_section) > 1: do_multiline_reformat = True #", "= comments comments = None single_import_line = with_comments( use_comments, import_start", "above_comments = None if \"*\" in from_imports and config.combine_star: import_statement", "above_comments.extend(parsed.categorized_comments[\"above\"][\"straight\"].pop(module)) if module in parsed.categorized_comments[\"straight\"]: inline_comments.extend(parsed.categorized_comments[\"straight\"][module]) combined_straight_imports = \", \".join(straight_modules)", "1].strip() != \"\" ): new_out_lines.append(\"\") formatted_output = new_out_lines return _output_as_string(formatted_output,", "= any((module in parsed.as_map[\"straight\"] for module in straight_modules)) # combine_straight_imports", "continue section_title = config.import_headings.get(section_name.lower(), \"\") if section_title and section_title not", "in straight_modules: if module in parsed.categorized_comments[\"above\"][\"straight\"]: above_comments.extend(parsed.categorized_comments[\"above\"][\"straight\"].pop(module)) if module in", "from_imports: use_comments = [] else: use_comments = comments comments =", "from_import, removed=config.ignore_comments, comment_prefix=config.comment_prefix, ) comment = ( parsed.categorized_comments[\"nested\"].get(module, {}).pop(from_import, None)", "in parsed.categorized_comments[\"above\"][\"straight\"]: above_comments.extend(parsed.categorized_comments[\"above\"][\"straight\"].pop(module)) if module in parsed.categorized_comments[\"straight\"]: inline_comments.extend(parsed.categorized_comments[\"straight\"][module]) combined_straight_imports =", "else 0 ) if config.from_first: section_output = from_imports + lines_between", "= parsed.categorized_comments[\"nested\"].get(module, {}).pop(\"*\", None) if star_comment: return comments + [star_comment]", "*_ = parse.skip_line( line, in_quote=\"\", index=len(formatted_output), section_comments=config.section_comments, needs_import=False, ) if", "if config.from_first: section_output = from_imports + lines_between + straight_imports else:", "= [] else: use_comments = comments comments = None single_import_line", "import_start + (\", \").join(from_import_section), removed=config.ignore_comments, comment_prefix=config.comment_prefix, ) if not from_import_section:", "formatted_output[imports_tail:0] = [\"\", \"\"] else: formatted_output[imports_tail:0] = [\"\"] if parsed.place_imports:", "List[str], import_type: str, ) -> List[str]: output: List[str] = []", "wrap.line( with_comments( _with_star_comments(parsed, module, list(comments or ())), f\"{import_start}*\", removed=config.ignore_comments, comment_prefix=config.comment_prefix,", "if comments_above: output.extend(comments_above) output.extend( with_comments( parsed.categorized_comments[\"straight\"].get(imodule), idef, removed=config.ignore_comments, comment_prefix=config.comment_prefix, )", "or ( config.combine_as_imports and parsed.imports[section][\"from\"][module][from_import] ) ): from_import_section.append(from_imports.pop(0)) if config.combine_as_imports:", "and module not in config.single_line_exclusions: import_statement = \"\" while from_imports:", "), removed=config.ignore_comments, comment_prefix=config.comment_prefix, ) for as_import in as_imports[from_import] ) else:", "module in from_modules: if \"*\" in parsed.imports[section][\"from\"][module]: star_modules.append(module) else: other_modules.append(module)", "if section_output: if section_name in parsed.place_imports: parsed.place_imports[section_name] = section_output continue", "\"\": output.pop() # pragma: no cover while output and output[0].strip()", "from .comments import add_to_line as with_comments from .identify import STATEMENT_DECLARATIONS", "remove_imports: from_imports = [ line for line in from_imports if", ") if not should_skip and line.strip(): if ( line.strip().startswith(\"#\") and", "in remove_imports ] sub_modules = [f\"{module}.{from_import}\" for from_import in from_imports]", "module)) import_definition.extend( (f\"{import_type} {module} as {as_import}\", f\"{module} as {as_import}\") for", "from_comments = [] if \"*\" in from_imports: output.append( with_comments( _with_star_comments(parsed,", "_with_straight_imports( parsed, config, straight_modules, section, remove_imports, import_type ) from_imports =", "in config.single_line_exclusions: import_statement = \"\" while from_imports: from_import = from_imports.pop(0)", ") from_imports = _with_from_imports( parsed, config, from_modules, section, remove_imports, import_type", "[] if \"*\" in from_imports: output.append( with_comments( _with_star_comments(parsed, module, []),", "not as_imports: if not straight_modules: return [] above_comments: List[str] =", "if from_import in as_imports: idx = from_imports.index(from_import) if parsed.imports[section][\"from\"][module][from_import]: from_imports[(idx", "parsed.imports[section][\"from\"][module][from_import] and not only_show_as_imports ): output.append( wrap.line(single_import_line, parsed.line_separator, config) )", "List[str] = [] for module in from_modules: if module in", "if len(import_statement) > config.line_length and len(from_import_section) > 1: do_multiline_reformat =", "import_start + as_import, parsed.line_separator, config ), removed=config.ignore_comments, comment_prefix=config.comment_prefix, ) for", "False comments = parsed.categorized_comments[\"from\"].pop(module, ()) above_comments = parsed.categorized_comments[\"above\"][\"from\"].pop(module, None) while", "if not config.only_sections: as_imports[from_import] = sorting.sort(config, as_imports[from_import]) from_comments = (", "if specific_comment: from_comments.append(specific_comment) output.append( wrap.line( with_comments( from_comments, import_start + from_import,", "\", \".join(straight_modules) if inline_comments: combined_inline_comments = \" \".join(inline_comments) else: combined_inline_comments", "lines.append(\"\") return lines class _LineWithComments(str): comments: List[str] def __new__( cls:", "[]), f\"{import_start}*\", removed=config.ignore_comments, comment_prefix=config.comment_prefix, ) ) from_imports.remove(\"*\") for from_import in", "return new_output def _with_star_comments(parsed: parse.ParsedContent, module: str, comments: List[str]) ->", "f\"{import_type} {combined_straight_imports} # {combined_inline_comments}\" ) else: output.append(f\"{import_type} {combined_straight_imports}\") return output", "{}} base_sections: Tuple[str, ...] = () for section in sections:", "config.lines_between_sections output += section_output pending_lines_before = False else: pending_lines_before =", "from_modules: if module in remove_imports: continue import_start = f\"from {module}", "] elif extension != \"pyi\" and next_construct.startswith(STATEMENT_DECLARATIONS): formatted_output[imports_tail:0] = [\"\",", "key: sorting.module_key( key, config, section_name=section, straight_import=True ), reverse=config.reverse_sort, ) from_modules", "(index + 1) or formatted_output[index + 1].strip() != \"\" ):", "type: ignore ): do_multiline_reformat = True if do_multiline_reformat: import_statement =", "and output[0].strip() == \"\": output.pop(0) if config.formatting_function: output = config.formatting_function(", ") # uncollapse comments section_output = [] for line in", "= [\"\"] if parsed.place_imports: new_out_lines = [] for index, line", "_with_star_comments(parsed: parse.ParsedContent, module: str, comments: List[str]) -> List[str]: star_comment =", "# pragma: no branch should_skip, in_quote, *_ = parse.skip_line( line,", "\"import\", ) -> str: \"\"\"Adds the imports back to the", "if section == \"FUTURE\": base_sections = (\"FUTURE\",) continue parsed.imports[\"no_sections\"][\"straight\"].update( parsed.imports[section].get(\"straight\",", "= config.import_headings.get(section_name.lower(), \"\") if section_title and section_title not in seen_headings:", "return output def _output_as_string(lines: List[str], line_separator: str) -> str: return", "in parsed.as_map[\"straight\"][module] ) else: import_definition.append((f\"{import_type} {module}\", module)) comments_above = parsed.categorized_comments[\"above\"][\"straight\"].pop(module,", "= other_import_statement if not do_multiline_reformat and len(import_statement) > config.line_length: import_statement", "in as_imports ] only_show_as_imports = True elif config.force_single_line and module", "from_import_section.append(from_imports.pop(0)) if config.combine_as_imports: comments = (comments or []) + list(", "parsed.as_map[\"straight\"] for module in straight_modules)) # combine_straight_imports only works for", "remove_imports: continue import_definition = [] if module in parsed.as_map[\"straight\"]: if", ") else: import_definition.append((f\"{import_type} {module}\", module)) comments_above = parsed.categorized_comments[\"above\"][\"straight\"].pop(module, None) if", "= [] as_imports = any((module in parsed.as_map[\"straight\"] for module in", "comments_above.append(line) elif comments_above: new_section_output.append(_LineWithComments(line, comments_above)) comments_above = [] else: new_section_output.append(line)", "from_imports + lines_between + straight_imports else: section_output = straight_imports +", "and from_imports[0] in as_imports: from_import = from_imports.pop(0) if not config.only_sections:", "force_grid_wrap: do_multiline_reformat = True if len(import_statement) > config.line_length and len(from_import_section)", "should_skip, in_quote, *_ = parse.skip_line( line, in_quote=\"\", index=len(formatted_output), section_comments=config.section_comments, needs_import=False,", "module in parsed.categorized_comments[\"straight\"]: inline_comments.extend(parsed.categorized_comments[\"straight\"][module]) combined_straight_imports = \", \".join(straight_modules) if inline_comments:", "enumerate(tail): # pragma: no branch should_skip, in_quote, *_ = parse.skip_line(", "= [] else: new_section_output.append(line) # only_sections options is not imposed", "= sorting.sort( config, new_section_output, key=partial(sorting.section_key, config=config), reverse=config.reverse_sort, ) # uncollapse", "elif config.force_single_line and module not in config.single_line_exclusions: import_statement = \"\"", ") for idef, imodule in import_definition ) return output def", "= sorting.sort( config, from_imports, key=lambda key: sorting.module_key( key, config, True,", "from_comments = parsed.categorized_comments[\"straight\"].get( f\"{module}.{from_import}\" ) if not config.only_sections: output.extend( with_comments(", "= straight_imports + lines_between + from_imports if config.force_sort_within_sections: # collapse", "as {as_module}\" for as_module in parsed.as_map[\"from\"][sub_module] ] for from_import, sub_module", "else: other_modules.append(module) from_modules = star_modules + other_modules straight_imports = _with_straight_imports(", "\"\" output.extend(above_comments) if combined_inline_comments: output.append( f\"{import_type} {combined_straight_imports} # {combined_inline_comments}\" )", "from_import_section: import_statement = \"\" do_multiline_reformat = False force_grid_wrap = config.force_grid_wrap", "wrap.line(single_import_line, parsed.line_separator, config) ) from_comments = parsed.categorized_comments[\"straight\"].get( f\"{module}.{from_import}\" ) if", "not imposed if force_sort_within_sections is True new_section_output = sorting.sort( config,", "_output_as_string(formatted_output, parsed.line_separator) def _with_from_imports( parsed: parse.ParsedContent, config: Config, from_modules: Iterable[str],", "0 if parsed.import_index < parsed.original_line_count: output_at = parsed.import_index formatted_output[output_at:0] =", ") else: output.append(wrap.line(single_import_line, parsed.line_separator, config)) comments = None else: while", "module not in config.single_line_exclusions) ) and not config.only_sections: from_imports =", "from_modules: if \"*\" in parsed.imports[section][\"from\"][module]: star_modules.append(module) else: other_modules.append(module) from_modules =", "+ (\"no_sections\",) output: List[str] = [] seen_headings: Set[str] = set()", "comments return instance def _ensure_newline_before_comment(output: List[str]) -> List[str]: new_output: List[str]", "{}) ) parsed.imports[\"no_sections\"][\"from\"].update(parsed.imports[section].get(\"from\", {})) sections = base_sections + (\"no_sections\",) output:", "[] for module in from_modules: if module in remove_imports: continue", "parsed, config, from_modules, section, remove_imports, import_type ) lines_between = [\"\"]", "while lines and lines[-1].strip() == \"\": lines.pop(-1) lines.append(\"\") return lines", "config=config), reverse=config.reverse_sort, ) # uncollapse comments section_output = [] for", "# If line too long AND have imports AND we", "from_imports=from_import_section, comments=comments, line_separator=parsed.line_separator, config=config, ) if config.multi_line_output == wrap.Modes.GRID: #", "if config.ensure_newline_before_comments: output = _ensure_newline_before_comment(output) while output and output[-1].strip() ==", "break if in_quote: # pragma: no branch next_construct = line", "parsed, config, straight_modules, section, remove_imports, import_type ) from_imports = _with_from_imports(", "import_statement = wrap.line( with_comments( _with_star_comments(parsed, module, list(comments or ())), f\"{import_start}*\",", "parsed.categorized_comments[\"from\"].pop(module, ()) above_comments = parsed.categorized_comments[\"above\"][\"from\"].pop(module, None) while from_imports: if above_comments:", "comment_prefix=config.comment_prefix, ) for as_import in sorting.sort(config, as_imports[from_import]) ) else: output.extend(", "( max( len(import_line) for import_line in import_statement.split(parsed.line_separator) ) > config.line_length", "= wrap.import_statement( import_start=import_start, from_imports=from_import_section, comments=comments, line_separator=parsed.line_separator, config=config, multi_line_output=wrap.Modes.VERTICAL_GRID, # type:", "len(from_import_section) > 1: do_multiline_reformat = True # If line too", "in as_imports[from_import] ) else: output.append(wrap.line(single_import_line, parsed.line_separator, config)) comments = None", "from . import parse, sorting, wrap from .comments import add_to_line", "= with_comments( comments, import_start + from_import, removed=config.ignore_comments, comment_prefix=config.comment_prefix, ) comment", ") import_statement = with_comments( comments, import_start + (\", \").join(from_import_section), removed=config.ignore_comments,", "and len(from_import_section) > 1: do_multiline_reformat = True # If line", "= parsed.categorized_comments[\"above\"][\"straight\"].pop(module, None) if comments_above: output.extend(comments_above) output.extend( with_comments( parsed.categorized_comments[\"straight\"].get(imodule), idef,", "getattr(line, \"comments\", ()) if comments: section_output.extend(comments) section_output.append(str(line)) section_name = section", "\".join(inline_comments) else: combined_inline_comments = \"\" output.extend(above_comments) if combined_inline_comments: output.append( f\"{import_type}", "= with_comments( comments, import_start + (\", \").join(from_import_section), removed=config.ignore_comments, comment_prefix=config.comment_prefix, )", "do_multiline_reformat = True # If line too long AND have", "if len(formatted_output) > imports_tail: next_construct = \"\" tail = formatted_output[imports_tail:]", "new_out_lines.extend(parsed.place_imports[parsed.import_placements[line]]) if ( len(formatted_output) <= (index + 1) or formatted_output[index", "None if \"*\" in from_imports and config.combine_star: import_statement = wrap.line(", "only_show_as_imports = True elif config.force_single_line and module not in config.single_line_exclusions:", "from_modules = parsed.imports[section][\"from\"] if not config.only_sections: from_modules = sorting.sort( config,", "line else False for line, prev_line in zip(output, [None] +", "config.combine_as_imports and parsed.imports[section][\"from\"][module][from_import] ) ): from_import_section.append(from_imports.pop(0)) if config.combine_as_imports: comments =", "config)) from_import_section = [] while from_imports and ( from_imports[0] not", "from_import in as_imports: idx = from_imports.index(from_import) if parsed.imports[section][\"from\"][module][from_import]: from_imports[(idx +", "), parsed.line_separator, config, ) ) from_comments = [] for as_import", "config.combine_as_imports: comments = (comments or []) + list( parsed.categorized_comments[\"from\"].pop(f\"{module}.__combined_as__\", ())", "{}).pop(\"*\", None) if star_comment: return comments + [star_comment] return comments", ") if config.star_first: star_modules = [] other_modules = [] for", "key: sorting.module_key( key, config, True, config.force_alphabetical_sort_within_sections, section_name=section, ), reverse=config.reverse_sort, )", "parsed.line_separator) def _with_from_imports( parsed: parse.ParsedContent, config: Config, from_modules: Iterable[str], section:", "= itertools.chain(parsed.sections, config.forced_separate) if config.no_sections: parsed.imports[\"no_sections\"] = {\"straight\": {}, \"from\":", "config.no_inline_sort or (config.force_single_line and module not in config.single_line_exclusions) ) and", ".pop(from_import, None) ) if specific_comment: from_comments.append(specific_comment) output.append( wrap.line( with_comments( from_comments,", "from_modules = star_modules + other_modules straight_imports = _with_straight_imports( parsed, config,", "comments_above = parsed.categorized_comments[\"above\"][\"straight\"].pop(module, None) if comments_above: output.extend(comments_above) output.extend( with_comments( parsed.categorized_comments[\"straight\"].get(imodule),", "straight_modules)) # combine_straight_imports only works for bare imports, 'as' imports", "if ( parsed.imports[section][\"from\"][module][from_import] and not only_show_as_imports ): output.append( wrap.line(single_import_line, parsed.line_separator,", "= DEFAULT_CONFIG, extension: str = \"py\", import_type: str = \"import\",", "in_quote, *_ = parse.skip_line( line, in_quote=\"\", index=len(formatted_output), section_comments=config.section_comments, needs_import=False, )", "for module in straight_modules)) # combine_straight_imports only works for bare", "_ensure_newline_before_comment(output: List[str]) -> List[str]: new_output: List[str] = [] def is_comment(line:", "if not config.only_sections: as_imports[as_import] = sorting.sort(config, as_imports[as_import]) for from_import in", "1] ] == [\"\"]: formatted_output.pop(imports_tail) if len(formatted_output) > imports_tail: next_construct", "for index, line in enumerate(tail): # pragma: no branch should_skip,", "len(import_line) for import_line in import_statement.split(parsed.line_separator) ) > config.line_length ): import_statement", "= from_imports + lines_between + straight_imports else: section_output = straight_imports", "from_import, removed=config.ignore_comments, comment_prefix=config.comment_prefix, ) single_import_line += ( f\"{use_comments and ';'", "import_start = f\"from {module} {import_type} \" from_imports = list(parsed.imports[section][\"from\"][module]) if", "): do_multiline_reformat = True if do_multiline_reformat: import_statement = wrap.import_statement( import_start=import_start,", "do_multiline_reformat = True if do_multiline_reformat: import_statement = wrap.import_statement( import_start=import_start, from_imports=from_import_section,", "parsed.categorized_comments[\"above\"][\"straight\"].pop(module, None) if comments_above: output.extend(comments_above) output.extend( with_comments( parsed.categorized_comments[\"straight\"].get(imodule), idef, removed=config.ignore_comments,", "as_imports[as_import]) for from_import in copy.copy(from_imports): if from_import in as_imports: idx", "output.append( with_comments( _with_star_comments(parsed, module, []), f\"{import_start}*\", removed=config.ignore_comments, comment_prefix=config.comment_prefix, ) )", "List[str]: output: List[str] = [] as_imports = any((module in parsed.as_map[\"straight\"]", "List[str]) -> List[str]: new_output: List[str] = [] def is_comment(line: Optional[str])", "next_construct = line break if config.lines_after_imports != -1: formatted_output[imports_tail:0] =", "== -1: return _output_as_string(parsed.lines_without_imports, parsed.line_separator) formatted_output: List[str] = parsed.lines_without_imports.copy() remove_imports", "in import_statement.split(parsed.line_separator) ) > config.line_length ): import_statement = other_import_statement if", "line in enumerate(tail): # pragma: no branch should_skip, in_quote, *_", "as_imports.pop(from_import) else: from_imports[idx : (idx + 1)] = as_imports.pop(from_import) only_show_as_imports", "from_imports, key=lambda key: sorting.module_key( key, config, True, config.force_alphabetical_sort_within_sections, section_name=section, ),", "removal in config.remove_imports] sections: Iterable[str] = itertools.chain(parsed.sections, config.forced_separate) if config.no_sections:", "not only_show_as_imports ): specific_comment = ( parsed.categorized_comments[\"nested\"] .get(module, {}) .pop(from_import,", "return line_separator.join(_normalize_empty_lines(lines)) def _normalize_empty_lines(lines: List[str]) -> List[str]: while lines and", "value: Any, comments: List[str] ) -> \"_LineWithComments\": instance = super().__new__(cls,", "while output and output[0].strip() == \"\": output.pop(0) if config.formatting_function: output", "as_imports: from_import = from_imports.pop(0) if not config.only_sections: as_imports[from_import] = sorting.sort(config,", "[] if module in parsed.as_map[\"straight\"]: if parsed.imports[section][\"straight\"][module]: import_definition.append((f\"{import_type} {module}\", module))", "in zip(from_imports, sub_modules) if sub_module in parsed.as_map[\"from\"] } if config.combine_as_imports", "from_comments.append(specific_comment) output.append( wrap.line( with_comments( from_comments, import_start + as_import, removed=config.ignore_comments, comment_prefix=config.comment_prefix,", "...] = () for section in sections: if section ==", "= line break if in_quote: # pragma: no branch next_construct", "= parsed.categorized_comments[\"above\"][\"from\"].pop(module, None) while from_imports: if above_comments: output.extend(above_comments) above_comments =", "if ( len(import_statement) > config.line_length and len(from_import_section) > 0 and", "import_type ) lines_between = [\"\"] * ( config.lines_between_types if from_modules", "sorted_imports( parsed: parse.ParsedContent, config: Config = DEFAULT_CONFIG, extension: str =", "= config.formatting_function( parsed.line_separator.join(output), extension, config ).splitlines() output_at = 0 if", "sorting.sort( config, new_section_output, key=partial(sorting.section_key, config=config), reverse=config.reverse_sort, ) # uncollapse comments", "only_show_as_imports ): output.append( wrap.line(single_import_line, parsed.line_separator, config) ) from_comments = parsed.categorized_comments[\"straight\"].get(", "too long AND have imports AND we are # NOT", "between groups \"\"\" if parsed.import_index == -1: return _output_as_string(parsed.lines_without_imports, parsed.line_separator)", "first import) sorted alphabetically and split between groups \"\"\" if", "( line.strip().startswith(\"#\") and len(tail) > (index + 1) and tail[index", "and config.combine_star: import_statement = wrap.line( with_comments( _with_star_comments(parsed, module, list(comments or", "if not do_multiline_reformat and len(import_statement) > config.line_length: import_statement = wrap.line(import_statement,", "import_start + from_import, removed=config.ignore_comments, comment_prefix=config.comment_prefix, ) comment = ( parsed.categorized_comments[\"nested\"].get(module,", "above_comments = parsed.categorized_comments[\"above\"][\"from\"].pop(module, None) while from_imports: if above_comments: output.extend(above_comments) above_comments", "import_statement = wrap.line(import_statement, parsed.line_separator, config) if import_statement: output.append(import_statement) return output", "in seen_headings: if config.dedup_headings: seen_headings.add(section_title) section_comment = f\"# {section_title}\" if", "comment_prefix=config.comment_prefix, ) if not from_import_section: import_statement = \"\" do_multiline_reformat =", "for from_import, sub_module in zip(from_imports, sub_modules) if sub_module in parsed.as_map[\"from\"]", "removed=config.ignore_comments, comment_prefix=config.comment_prefix, ) ) from_imports.remove(\"*\") for from_import in copy.copy(from_imports): comment", "do_multiline_reformat and len(import_statement) > config.line_length: import_statement = wrap.line(import_statement, parsed.line_separator, config)", "class _LineWithComments(str): comments: List[str] def __new__( cls: Type[\"_LineWithComments\"], value: Any,", "idx = from_imports.index(from_import) if parsed.imports[section][\"from\"][module][from_import]: from_imports[(idx + 1) : (idx", "parsed.imports[section][\"from\"] if not config.only_sections: from_modules = sorting.sort( config, from_modules, key=lambda", "removed=config.ignore_comments, comment_prefix=config.comment_prefix, ) for as_import in as_imports[from_import] ) else: output.append(wrap.line(single_import_line,", "output.append(wrap.line(single_import_line, parsed.line_separator, config)) comments = None else: while from_imports and", "next_construct = \"\" tail = formatted_output[imports_tail:] for index, line in", "= [\"\", \"\"] else: formatted_output[imports_tail:0] = [\"\"] if parsed.place_imports: new_out_lines", "other_modules.append(module) from_modules = star_modules + other_modules straight_imports = _with_straight_imports( parsed,", "{as_import}\") for as_import in parsed.as_map[\"straight\"][module] ) else: import_definition.append((f\"{import_type} {module}\", module))", "f\"{module}.{from_import}\" ) if not config.only_sections: output.extend( with_comments( from_comments, wrap.line( import_start", "(f\"{import_type} {module} as {as_import}\", f\"{module} as {as_import}\") for as_import in", "sections: if section == \"FUTURE\": base_sections = (\"FUTURE\",) continue parsed.imports[\"no_sections\"][\"straight\"].update(", "List[str]: while lines and lines[-1].strip() == \"\": lines.pop(-1) lines.append(\"\") return", "pragma: no branch section_output.insert(0, section_comment) if pending_lines_before or not no_lines_before:", "config.single_line_exclusions: import_statement = \"\" while from_imports: from_import = from_imports.pop(0) single_import_line", "parsed.import_index == -1: return _output_as_string(parsed.lines_without_imports, parsed.line_separator) formatted_output: List[str] = parsed.lines_without_imports.copy()", "in_quote: # pragma: no branch next_construct = line break if", "lines_between + straight_imports else: section_output = straight_imports + lines_between +", "import_start + from_import, removed=config.ignore_comments, comment_prefix=config.comment_prefix, ) single_import_line += ( f\"{use_comments", "), parsed.line_separator, config, ) ) from_comments = [] if \"*\"", "line.startswith(\"#\") if line else False for line, prev_line in zip(output,", "and tail[index + 1].strip() ): continue next_construct = line break", "not included if config.combine_straight_imports and not as_imports: if not straight_modules:", "comments section_output = [] for line in new_section_output: comments =", "only_show_as_imports = False comments = parsed.categorized_comments[\"from\"].pop(module, ()) above_comments = parsed.categorized_comments[\"above\"][\"from\"].pop(module,", "# NOT using GRID or VERTICAL wrap modes if (", "True if len(import_statement) > config.line_length and len(from_import_section) > 1: do_multiline_reformat", "config, new_section_output, key=partial(sorting.section_key, config=config), reverse=config.reverse_sort, ) # uncollapse comments section_output", "return _output_as_string(parsed.lines_without_imports, parsed.line_separator) formatted_output: List[str] = parsed.lines_without_imports.copy() remove_imports = [format_simplified(removal)", "str = \"py\", import_type: str = \"import\", ) -> str:", "parsed.as_map[\"from\"] } if config.combine_as_imports and not (\"*\" in from_imports and", "] sub_modules = [f\"{module}.{from_import}\" for from_import in from_imports] as_imports =", "0 ) if config.from_first: section_output = from_imports + lines_between +", "isort.format import format_simplified from . import parse, sorting, wrap from", "is True new_section_output = sorting.sort( config, new_section_output, key=partial(sorting.section_key, config=config), reverse=config.reverse_sort,", "section_title = config.import_headings.get(section_name.lower(), \"\") if section_title and section_title not in", ": (idx + 1)] = as_imports.pop(from_import) only_show_as_imports = False comments", "config: Config, straight_modules: Iterable[str], section: str, remove_imports: List[str], import_type: str,", "from_import, removed=config.ignore_comments, comment_prefix=config.comment_prefix, ), parsed.line_separator, config, ) ) from_comments =", "parsed.imports[section][\"from\"][module][from_import]: from_imports[(idx + 1) : (idx + 1)] = as_imports.pop(from_import)", "or config.comment_prefix} \" f\"{comment}\" ) output.append(wrap.line(single_import_line, parsed.line_separator, config)) from_import_section =", "len(formatted_output) <= (index + 1) or formatted_output[index + 1].strip() !=", "= \" \".join(inline_comments) else: combined_inline_comments = \"\" output.extend(above_comments) if combined_inline_comments:", "parsed.imports[section][\"from\"][module][from_import] and not only_show_as_imports ): specific_comment = ( parsed.categorized_comments[\"nested\"] .get(module,", "+ len(output) while [ character.strip() for character in formatted_output[imports_tail :", "as_imports[from_import]: specific_comment = ( parsed.categorized_comments[\"nested\"] .get(module, {}) .pop(as_import, None) )", "+ from_imports if config.force_sort_within_sections: # collapse comments comments_above = []", "return line.startswith(\"#\") if line else False for line, prev_line in", "in zip(output, [None] + output): # type: ignore if is_comment(line)", "{as_import}\", f\"{module} as {as_import}\") for as_import in parsed.as_map[\"straight\"][module] ) else:", "# pragma: no branch next_construct = line break if config.lines_after_imports", "and split between groups \"\"\" if parsed.import_index == -1: return", "section_name=section, straight_import=True ), reverse=config.reverse_sort, ) from_modules = parsed.imports[section][\"from\"] if not", "the file. (at the index of the first import) sorted", "if config.lines_after_imports != -1: formatted_output[imports_tail:0] = [ \"\" for line", "Iterable[str] = itertools.chain(parsed.sections, config.forced_separate) if config.no_sections: parsed.imports[\"no_sections\"] = {\"straight\": {},", "output: List[str] = [] seen_headings: Set[str] = set() pending_lines_before =", "sorting.sort(config, as_imports[from_import]) from_comments = ( parsed.categorized_comments[\"straight\"].get(f\"{module}.{from_import}\") or [] ) if", "output = config.formatting_function( parsed.line_separator.join(output), extension, config ).splitlines() output_at = 0", "itertools from functools import partial from typing import Any, Iterable,", "comments = getattr(line, \"comments\", ()) if comments: section_output.extend(comments) section_output.append(str(line)) section_name", "\" from_imports = list(parsed.imports[section][\"from\"][module]) if ( not config.no_inline_sort or (config.force_single_line", "sections: straight_modules = parsed.imports[section][\"straight\"] if not config.only_sections: straight_modules = sorting.sort(", "seen_headings.add(section_title) section_comment = f\"# {section_title}\" if section_comment not in parsed.lines_without_imports[0:1]:", "= parsed.import_index formatted_output[output_at:0] = output if output: imports_tail = output_at", "set() pending_lines_before = False for section in sections: straight_modules =", "comment_prefix=config.comment_prefix, ) ) from_imports.remove(\"*\") for from_import in copy.copy(from_imports): comment =", "1)] = as_imports.pop(from_import) only_show_as_imports = False comments = parsed.categorized_comments[\"from\"].pop(module, ())", "\"\" while from_imports: from_import = from_imports.pop(0) single_import_line = with_comments( comments,", "section_comment = f\"# {section_title}\" if section_comment not in parsed.lines_without_imports[0:1]: #", "if specific_comment: from_comments.append(specific_comment) output.append( wrap.line( with_comments( from_comments, import_start + as_import,", "output_at + len(output) while [ character.strip() for character in formatted_output[imports_tail", "character.strip() for character in formatted_output[imports_tail : imports_tail + 1] ]", "is not imposed if force_sort_within_sections is True new_section_output = sorting.sort(", "List[str] = [] seen_headings: Set[str] = set() pending_lines_before = False", "+ 1].strip() != \"\" ): new_out_lines.append(\"\") formatted_output = new_out_lines return", "in sorting.sort(config, as_imports[from_import]) ) else: output.extend( with_comments( from_comments, wrap.line( import_start", "next_construct.startswith(STATEMENT_DECLARATIONS): formatted_output[imports_tail:0] = [\"\", \"\"] else: formatted_output[imports_tail:0] = [\"\"] if", "return [] above_comments: List[str] = [] inline_comments: List[str] = []", "import copy import itertools from functools import partial from typing", "parsed.imports[section][\"from\"][module]: star_modules.append(module) else: other_modules.append(module) from_modules = star_modules + other_modules straight_imports", "= ( parsed.categorized_comments[\"nested\"].get(module, {}).pop(from_import, None) ) if comment: single_import_line +=", "None else: while from_imports and from_imports[0] in as_imports: from_import =", "= ( parsed.categorized_comments[\"nested\"].get(module, {}).pop(from_import, None) ) if comment: from_imports.remove(from_import) if", "new_section_output.append(line) # only_sections options is not imposed if force_sort_within_sections is", "for from_import in copy.copy(from_imports): comment = ( parsed.categorized_comments[\"nested\"].get(module, {}).pop(from_import, None)", "from_imports and ( from_imports[0] not in as_imports or ( config.combine_as_imports", "VERTICAL wrap modes if ( len(import_statement) > config.line_length and len(from_import_section)", "{}) .pop(from_import, None) ) if specific_comment: from_comments.append(specific_comment) output.append( wrap.line( with_comments(", "formatted_output[index + 1].strip() != \"\" ): new_out_lines.append(\"\") formatted_output = new_out_lines", "# collapse comments comments_above = [] new_section_output: List[str] = []", "<reponame>hwaipy/InteractionFreeNode<filename>runtime/python/Lib/site-packages/isort/output.py import copy import itertools from functools import partial from", "and module not in config.single_line_exclusions) ) and not config.only_sections: from_imports", "+ from_import, removed=config.ignore_comments, comment_prefix=config.comment_prefix, ) single_import_line += ( f\"{use_comments and", "and ';' or config.comment_prefix} \" f\"{comment}\" ) output.append(wrap.line(single_import_line, parsed.line_separator, config))", "as_imports.pop(from_import) only_show_as_imports = False comments = parsed.categorized_comments[\"from\"].pop(module, ()) above_comments =", "Config, straight_modules: Iterable[str], section: str, remove_imports: List[str], import_type: str, )", "( len(formatted_output) <= (index + 1) or formatted_output[index + 1].strip()", "(\", \").join(from_import_section), removed=config.ignore_comments, comment_prefix=config.comment_prefix, ) if not from_import_section: import_statement =", "import Any, Iterable, List, Optional, Set, Tuple, Type from isort.format", "remove_imports, import_type ) lines_between = [\"\"] * ( config.lines_between_types if", "comments = parsed.categorized_comments[\"from\"].pop(module, ()) above_comments = parsed.categorized_comments[\"above\"][\"from\"].pop(module, None) while from_imports:", "prev_line != \"\" and not is_comment(prev_line): new_output.append(\"\") new_output.append(line) return new_output", "if module in remove_imports: continue import_definition = [] if module", "pragma: no cover while output and output[0].strip() == \"\": output.pop(0)", "config.comment_prefix} \" f\"{comment}\" ) if from_import in as_imports: if (", "index=len(formatted_output), section_comments=config.section_comments, needs_import=False, ) if not should_skip and line.strip(): if", "= [] for index, line in enumerate(formatted_output): new_out_lines.append(line) if line", "new_out_lines.append(line) if line in parsed.import_placements: new_out_lines.extend(parsed.place_imports[parsed.import_placements[line]]) if ( len(formatted_output) <=", ") from_comments = parsed.categorized_comments[\"straight\"].get( f\"{module}.{from_import}\" ) if not config.only_sections: output.extend(", "reverse=config.reverse_sort, ) if config.star_first: star_modules = [] other_modules = []", "( config.combine_as_imports and parsed.imports[section][\"from\"][module][from_import] ) ): from_import_section.append(from_imports.pop(0)) if config.combine_as_imports: comments", "== wrap.Modes.GRID: # type: ignore other_import_statement = wrap.import_statement( import_start=import_start, from_imports=from_import_section,", "copy.copy(from_imports): if from_import in as_imports: idx = from_imports.index(from_import) if parsed.imports[section][\"from\"][module][from_import]:", "output.extend( with_comments( from_comments, wrap.line( import_start + as_import, parsed.line_separator, config ),", "next_construct = line break if in_quote: # pragma: no branch", "{}) .pop(as_import, None) ) if specific_comment: from_comments.append(specific_comment) output.append( wrap.line( with_comments(", "in as_imports: if ( parsed.imports[section][\"from\"][module][from_import] and not only_show_as_imports ): output.append(", "wrap from .comments import add_to_line as with_comments from .identify import", "not config.only_sections: as_imports[as_import] = sorting.sort(config, as_imports[as_import]) for from_import in copy.copy(from_imports):", "or []) + list( parsed.categorized_comments[\"from\"].pop(f\"{module}.__combined_as__\", ()) ) import_statement = with_comments(", "() for section in sections: if section == \"FUTURE\": base_sections", "if not config.only_sections: from_modules = sorting.sort( config, from_modules, key=lambda key:", "[] for line in section_output: if not line: continue if", "config.no_inline_sort: for as_import in as_imports: if not config.only_sections: as_imports[as_import] =", "module in straight_modules)) # combine_straight_imports only works for bare imports,", "force_sort_within_sections is True new_section_output = sorting.sort( config, new_section_output, key=partial(sorting.section_key, config=config),", "character in formatted_output[imports_tail : imports_tail + 1] ] == [\"\"]:", "= parsed.categorized_comments[\"straight\"].get( f\"{module}.{from_import}\" ) if not config.only_sections: output.extend( with_comments( from_comments,", "<= (index + 1) or formatted_output[index + 1].strip() != \"\"", "in from_imports and config.combine_star: import_statement = wrap.line( with_comments( _with_star_comments(parsed, module,", "= [\"\"] * ( config.lines_between_types if from_modules and straight_modules else", "import_statement = \"\" do_multiline_reformat = False force_grid_wrap = config.force_grid_wrap if", "section_output continue section_title = config.import_headings.get(section_name.lower(), \"\") if section_title and section_title", "output def _with_straight_imports( parsed: parse.ParsedContent, config: Config, straight_modules: Iterable[str], section:", "and lines[-1].strip() == \"\": lines.pop(-1) lines.append(\"\") return lines class _LineWithComments(str):", "and config.multi_line_output not in (wrap.Modes.GRID, wrap.Modes.VERTICAL) # type: ignore ):", "extension, config ).splitlines() output_at = 0 if parsed.import_index < parsed.original_line_count:", "with_comments( from_comments, import_start + as_import, removed=config.ignore_comments, comment_prefix=config.comment_prefix, ), parsed.line_separator, config,", "{module}\", module)) comments_above = parsed.categorized_comments[\"above\"][\"straight\"].pop(module, None) if comments_above: output.extend(comments_above) output.extend(", "1) or formatted_output[index + 1].strip() != \"\" ): new_out_lines.append(\"\") formatted_output", "type: ignore other_import_statement = wrap.import_statement( import_start=import_start, from_imports=from_import_section, comments=comments, line_separator=parsed.line_separator, config=config,", "enumerate(formatted_output): new_out_lines.append(line) if line in parsed.import_placements: new_out_lines.extend(parsed.place_imports[parsed.import_placements[line]]) if ( len(formatted_output)", "if from_import in as_imports: if ( parsed.imports[section][\"from\"][module][from_import] and not only_show_as_imports", "sorting, wrap from .comments import add_to_line as with_comments from .identify", "idef, removed=config.ignore_comments, comment_prefix=config.comment_prefix, ) for idef, imodule in import_definition )", "\" \".join(inline_comments) else: combined_inline_comments = \"\" output.extend(above_comments) if combined_inline_comments: output.append(", "removed=config.ignore_comments, comment_prefix=config.comment_prefix, ) if not from_import_section: import_statement = \"\" do_multiline_reformat", "[] for line in new_section_output: comments = getattr(line, \"comments\", ())", "[] else: new_section_output.append(line) # only_sections options is not imposed if", ") if from_import in as_imports: if ( parsed.imports[section][\"from\"][module][from_import] and not", "from_comments, import_start + as_import, removed=config.ignore_comments, comment_prefix=config.comment_prefix, ), parsed.line_separator, config, )", "comments_above: new_section_output.append(_LineWithComments(line, comments_above)) comments_above = [] else: new_section_output.append(line) # only_sections", "sorted alphabetically and split between groups \"\"\" if parsed.import_index ==", "-> List[str]: star_comment = parsed.categorized_comments[\"nested\"].get(module, {}).pop(\"*\", None) if star_comment: return", "straight_modules, section, remove_imports, import_type ) from_imports = _with_from_imports( parsed, config,", "output if output: imports_tail = output_at + len(output) while [", "import_statement = wrap.import_statement( import_start=import_start, from_imports=from_import_section, comments=comments, line_separator=parsed.line_separator, config=config, ) if", "key=lambda key: sorting.module_key( key, config, section_name=section, straight_import=True ), reverse=config.reverse_sort, )", "if not config.no_inline_sort: for as_import in as_imports: if not config.only_sections:", "for module in from_modules: if module in remove_imports: continue import_start", "config=config, ) if config.multi_line_output == wrap.Modes.GRID: # type: ignore other_import_statement", "line, prev_line in zip(output, [None] + output): # type: ignore", "= { from_import: [ f\"{from_import} as {as_module}\" for as_module in", "config.line_length: import_statement = wrap.line(import_statement, parsed.line_separator, config) if import_statement: output.append(import_statement) return", "Tuple, Type from isort.format import format_simplified from . import parse,", "from_imports = sorting.sort( config, from_imports, key=lambda key: sorting.module_key( key, config,", "0 and config.multi_line_output not in (wrap.Modes.GRID, wrap.Modes.VERTICAL) # type: ignore", "Any, Iterable, List, Optional, Set, Tuple, Type from isort.format import", "= [] for module in straight_modules: if module in parsed.categorized_comments[\"above\"][\"straight\"]:", "config.only_sections: straight_modules = sorting.sort( config, straight_modules, key=lambda key: sorting.module_key( key,", "import_statement.split(parsed.line_separator) ) > config.line_length ): import_statement = other_import_statement if not", "using GRID or VERTICAL wrap modes if ( len(import_statement) >", "module, list(comments or ())), f\"{import_start}*\", removed=config.ignore_comments, comment_prefix=config.comment_prefix, ), parsed.line_separator, config,", "config.force_sort_within_sections: # collapse comments comments_above = [] new_section_output: List[str] =", "sections: Iterable[str] = itertools.chain(parsed.sections, config.forced_separate) if config.no_sections: parsed.imports[\"no_sections\"] = {\"straight\":", "len(tail) > (index + 1) and tail[index + 1].strip() ):", "if comment: from_imports.remove(from_import) if from_imports: use_comments = [] else: use_comments", "import_type: str, ) -> List[str]: output: List[str] = [] for", "branch next_construct = line break if config.lines_after_imports != -1: formatted_output[imports_tail:0]", "= as_imports.pop(from_import) else: from_imports[idx : (idx + 1)] = as_imports.pop(from_import)", "partial from typing import Any, Iterable, List, Optional, Set, Tuple,", "and len(tail) > (index + 1) and tail[index + 1].strip()", "config.star_first: star_modules = [] other_modules = [] for module in", "module in from_modules: if module in remove_imports: continue import_start =", "parsed.categorized_comments[\"nested\"] .get(module, {}) .pop(from_import, None) ) if specific_comment: from_comments.append(specific_comment) output.append(", "* config.lines_between_sections output += section_output pending_lines_before = False else: pending_lines_before", "= \"\" tail = formatted_output[imports_tail:] for index, line in enumerate(tail):", "section no_lines_before = section_name in config.no_lines_before if section_output: if section_name", "\"\": lines.pop(-1) lines.append(\"\") return lines class _LineWithComments(str): comments: List[str] def", "): from_import_section.append(from_imports.pop(0)) if config.combine_as_imports: comments = (comments or []) +", "modes if ( len(import_statement) > config.line_length and len(from_import_section) > 0", "as_imports: idx = from_imports.index(from_import) if parsed.imports[section][\"from\"][module][from_import]: from_imports[(idx + 1) :", "if parsed.imports[section][\"from\"][module][from_import]: from_imports[(idx + 1) : (idx + 1)] =", "# {combined_inline_comments}\" ) else: output.append(f\"{import_type} {combined_straight_imports}\") return output for module", "while output and output[-1].strip() == \"\": output.pop() # pragma: no", "!= \"pyi\" and next_construct.startswith(STATEMENT_DECLARATIONS): formatted_output[imports_tail:0] = [\"\", \"\"] else: formatted_output[imports_tail:0]", "parsed.as_map[\"straight\"][module] ) else: import_definition.append((f\"{import_type} {module}\", module)) comments_above = parsed.categorized_comments[\"above\"][\"straight\"].pop(module, None)", "= [] for module in from_modules: if \"*\" in parsed.imports[section][\"from\"][module]:", "use_comments, import_start + from_import, removed=config.ignore_comments, comment_prefix=config.comment_prefix, ) single_import_line += (", "as_import in as_imports[from_import]: specific_comment = ( parsed.categorized_comments[\"nested\"] .get(module, {}) .pop(as_import,", ") ): from_import_section.append(from_imports.pop(0)) if config.combine_as_imports: comments = (comments or [])", "in enumerate(tail): # pragma: no branch should_skip, in_quote, *_ =", "sub_modules = [f\"{module}.{from_import}\" for from_import in from_imports] as_imports = {", "= True # If line too long AND have imports", "= [] for line in new_section_output: comments = getattr(line, \"comments\",", "section_name in parsed.place_imports: parsed.place_imports[section_name] = section_output continue section_title = config.import_headings.get(section_name.lower(),", "config.dedup_headings: seen_headings.add(section_title) section_comment = f\"# {section_title}\" if section_comment not in", "if section_name in parsed.place_imports: parsed.place_imports[section_name] = section_output continue section_title =", "sorting.module_key( key, config, section_name=section, straight_import=True ), reverse=config.reverse_sort, ) from_modules =", "( parsed.categorized_comments[\"straight\"].get(f\"{module}.{from_import}\") or [] ) if ( parsed.imports[section][\"from\"][module][from_import] and not", "config) if import_statement: output.append(import_statement) return output def _with_straight_imports( parsed: parse.ParsedContent,", "comments, import_start + from_import, removed=config.ignore_comments, comment_prefix=config.comment_prefix, ) comment = (", "# pragma: no branch section_output.insert(0, section_comment) if pending_lines_before or not", "combined_straight_imports = \", \".join(straight_modules) if inline_comments: combined_inline_comments = \" \".join(inline_comments)", "with_comments( parsed.categorized_comments[\"straight\"].get(imodule), idef, removed=config.ignore_comments, comment_prefix=config.comment_prefix, ) for idef, imodule in", "from_modules, key=lambda key: sorting.module_key(key, config, section_name=section), reverse=config.reverse_sort, ) if config.star_first:", "straight_modules: if module in parsed.categorized_comments[\"above\"][\"straight\"]: above_comments.extend(parsed.categorized_comments[\"above\"][\"straight\"].pop(module)) if module in parsed.categorized_comments[\"straight\"]:", "not config.only_sections: as_imports[from_import] = sorting.sort(config, as_imports[from_import]) from_comments = ( parsed.categorized_comments[\"straight\"].get(f\"{module}.{from_import}\")", "specific_comment = ( parsed.categorized_comments[\"nested\"] .get(module, {}) .pop(as_import, None) ) if", "config, from_imports, key=lambda key: sorting.module_key( key, config, True, config.force_alphabetical_sort_within_sections, section_name=section,", "str, comments: List[str]) -> List[str]: star_comment = parsed.categorized_comments[\"nested\"].get(module, {}).pop(\"*\", None)", "remove_imports ] sub_modules = [f\"{module}.{from_import}\" for from_import in from_imports] as_imports", "in section_output: if not line: continue if line.startswith(\"#\"): comments_above.append(line) elif", "in config.single_line_exclusions) ) and not config.only_sections: from_imports = sorting.sort( config,", "other_modules straight_imports = _with_straight_imports( parsed, config, straight_modules, section, remove_imports, import_type", "= ( parsed.categorized_comments[\"nested\"] .get(module, {}) .pop(from_import, None) ) if specific_comment:", "= formatted_output[imports_tail:] for index, line in enumerate(tail): # pragma: no", ") else: output.extend( with_comments( from_comments, wrap.line( import_start + as_import, parsed.line_separator,", "comment_prefix=config.comment_prefix, ) for as_import in as_imports[from_import] ) else: output.append(wrap.line(single_import_line, parsed.line_separator,", "\"\" for line in range(config.lines_after_imports) ] elif extension != \"pyi\"", "no_lines_before if config.ensure_newline_before_comments: output = _ensure_newline_before_comment(output) while output and output[-1].strip()", "# uncollapse comments section_output = [] for line in new_section_output:", "super().__new__(cls, value) instance.comments = comments return instance def _ensure_newline_before_comment(output: List[str])", "in from_imports: output.append( with_comments( _with_star_comments(parsed, module, []), f\"{import_start}*\", removed=config.ignore_comments, comment_prefix=config.comment_prefix,", "no_lines_before = section_name in config.no_lines_before if section_output: if section_name in", "and not (\"*\" in from_imports and config.combine_star): if not config.no_inline_sort:", "ignore other_import_statement = wrap.import_statement( import_start=import_start, from_imports=from_import_section, comments=comments, line_separator=parsed.line_separator, config=config, multi_line_output=wrap.Modes.VERTICAL_GRID,", "[ line for line in from_imports if f\"{module}.{line}\" not in", "output.append( f\"{import_type} {combined_straight_imports} # {combined_inline_comments}\" ) else: output.append(f\"{import_type} {combined_straight_imports}\") return", "str, ) -> List[str]: output: List[str] = [] as_imports =", "and prev_line != \"\" and not is_comment(prev_line): new_output.append(\"\") new_output.append(line) return", "import_statement = other_import_statement if not do_multiline_reformat and len(import_statement) > config.line_length:", "List, Optional, Set, Tuple, Type from isort.format import format_simplified from", "+ 1) or formatted_output[index + 1].strip() != \"\" ): new_out_lines.append(\"\")", "new_section_output.append(_LineWithComments(line, comments_above)) comments_above = [] else: new_section_output.append(line) # only_sections options", "comments = None single_import_line = with_comments( use_comments, import_start + from_import,", "no cover while output and output[0].strip() == \"\": output.pop(0) if", "section_name=section, ), reverse=config.reverse_sort, ) if remove_imports: from_imports = [ line", "import add_to_line as with_comments from .identify import STATEMENT_DECLARATIONS from .settings", "for import_line in import_statement.split(parsed.line_separator) ) > config.line_length ): import_statement =", "collapse comments comments_above = [] new_section_output: List[str] = [] for", "as_imports[from_import]) ) else: output.extend( with_comments( from_comments, wrap.line( import_start + as_import,", "from_import in copy.copy(from_imports): if from_import in as_imports: idx = from_imports.index(from_import)", "seen_headings: if config.dedup_headings: seen_headings.add(section_title) section_comment = f\"# {section_title}\" if section_comment", "config.force_single_line and module not in config.single_line_exclusions: import_statement = \"\" while", "comment = ( parsed.categorized_comments[\"nested\"].get(module, {}).pop(from_import, None) ) if comment: single_import_line", "( parsed.categorized_comments[\"nested\"].get(module, {}).pop(from_import, None) ) if comment: from_imports.remove(from_import) if from_imports:", "if config.force_sort_within_sections: # collapse comments comments_above = [] new_section_output: List[str]", "config, section_name=section), reverse=config.reverse_sort, ) if config.star_first: star_modules = [] other_modules", "config.no_sections: parsed.imports[\"no_sections\"] = {\"straight\": {}, \"from\": {}} base_sections: Tuple[str, ...]", "pragma: no branch should_skip, in_quote, *_ = parse.skip_line( line, in_quote=\"\",", "section in sections: if section == \"FUTURE\": base_sections = (\"FUTURE\",)", "for line in range(config.lines_after_imports) ] elif extension != \"pyi\" and", "parsed.imports[section][\"from\"][module][from_import] ) ): from_import_section.append(from_imports.pop(0)) if config.combine_as_imports: comments = (comments or", "= \"import\", ) -> str: \"\"\"Adds the imports back to", "parsed.line_separator, config, ) ) from_comments = [] if \"*\" in", "[] above_comments: List[str] = [] inline_comments: List[str] = [] for", "= [] for module in from_modules: if module in remove_imports:", "output.append(import_statement) return output def _with_straight_imports( parsed: parse.ParsedContent, config: Config, straight_modules:", "): import_statement = other_import_statement if not do_multiline_reformat and len(import_statement) >", "# pragma: no cover while output and output[0].strip() == \"\":", "for as_module in parsed.as_map[\"from\"][sub_module] ] for from_import, sub_module in zip(from_imports,", "config, section_name=section, straight_import=True ), reverse=config.reverse_sort, ) from_modules = parsed.imports[section][\"from\"] if", "config, ) ) from_comments = [] for as_import in as_imports[from_import]:", "from typing import Any, Iterable, List, Optional, Set, Tuple, Type", "line_separator: str) -> str: return line_separator.join(_normalize_empty_lines(lines)) def _normalize_empty_lines(lines: List[str]) ->", "[format_simplified(removal) for removal in config.remove_imports] sections: Iterable[str] = itertools.chain(parsed.sections, config.forced_separate)", "from_imports.remove(from_import) if from_imports: use_comments = [] else: use_comments = comments", "config, ) ) from_comments = [] if \"*\" in from_imports:", "straight_modules: Iterable[str], section: str, remove_imports: List[str], import_type: str, ) ->", "only_sections options is not imposed if force_sort_within_sections is True new_section_output", "_ensure_newline_before_comment(output) while output and output[-1].strip() == \"\": output.pop() # pragma:", "from_imports = [ line for line in from_imports if f\"{module}.{line}\"", "formatted_output: List[str] = parsed.lines_without_imports.copy() remove_imports = [format_simplified(removal) for removal in", "\"from\": {}} base_sections: Tuple[str, ...] = () for section in", "config.force_alphabetical_sort_within_sections, section_name=section, ), reverse=config.reverse_sort, ) if remove_imports: from_imports = [", "= as_imports.pop(from_import) only_show_as_imports = False comments = parsed.categorized_comments[\"from\"].pop(module, ()) above_comments", "= [] inline_comments: List[str] = [] for module in straight_modules:", "[] while from_imports and ( from_imports[0] not in as_imports or", "# only_sections options is not imposed if force_sort_within_sections is True", "line_separator=parsed.line_separator, config=config, ) if config.multi_line_output == wrap.Modes.GRID: # type: ignore", "\"comments\", ()) if comments: section_output.extend(comments) section_output.append(str(line)) section_name = section no_lines_before", "with_comments( comments, import_start + (\", \").join(from_import_section), removed=config.ignore_comments, comment_prefix=config.comment_prefix, ) if", "config)) comments = None else: while from_imports and from_imports[0] in", "parsed.line_separator, config, ) ) from_comments = [] for as_import in", "config.only_sections: as_imports[as_import] = sorting.sort(config, as_imports[as_import]) for from_import in copy.copy(from_imports): if", "config.lines_after_imports != -1: formatted_output[imports_tail:0] = [ \"\" for line in", "\"\" tail = formatted_output[imports_tail:] for index, line in enumerate(tail): #", "new_section_output: comments = getattr(line, \"comments\", ()) if comments: section_output.extend(comments) section_output.append(str(line))", "if \"*\" in parsed.imports[section][\"from\"][module]: star_modules.append(module) else: other_modules.append(module) from_modules = star_modules", "for idef, imodule in import_definition ) return output def _output_as_string(lines:", "in range(config.lines_after_imports) ] elif extension != \"pyi\" and next_construct.startswith(STATEMENT_DECLARATIONS): formatted_output[imports_tail:0]", "module, []), f\"{import_start}*\", removed=config.ignore_comments, comment_prefix=config.comment_prefix, ) ) from_imports.remove(\"*\") for from_import", "not in as_imports or ( config.combine_as_imports and parsed.imports[section][\"from\"][module][from_import] ) ):", "pending_lines_before or not no_lines_before if config.ensure_newline_before_comments: output = _ensure_newline_before_comment(output) while", "is_comment(line) and prev_line != \"\" and not is_comment(prev_line): new_output.append(\"\") new_output.append(line)", ") output.append(wrap.line(single_import_line, parsed.line_separator, config)) from_import_section = [] while from_imports and", "+ 1)] = as_imports.pop(from_import) else: from_imports[idx : (idx + 1)]", "( len(import_statement) > config.line_length and len(from_import_section) > 0 and config.multi_line_output", "parsed.import_index formatted_output[output_at:0] = output if output: imports_tail = output_at +", "config, True, config.force_alphabetical_sort_within_sections, section_name=section, ), reverse=config.reverse_sort, ) if remove_imports: from_imports", "line too long AND have imports AND we are #", "imposed if force_sort_within_sections is True new_section_output = sorting.sort( config, new_section_output,", "removed=config.ignore_comments, comment_prefix=config.comment_prefix, ) for as_import in sorting.sort(config, as_imports[from_import]) ) else:", "{ from_import: [ f\"{from_import} as {as_module}\" for as_module in parsed.as_map[\"from\"][sub_module]", "in as_imports or ( config.combine_as_imports and parsed.imports[section][\"from\"][module][from_import] ) ): from_import_section.append(from_imports.pop(0))", "line, in_quote=\"\", index=len(formatted_output), section_comments=config.section_comments, needs_import=False, ) if not should_skip and", "if section_comment not in parsed.lines_without_imports[0:1]: # pragma: no branch section_output.insert(0,", "format_simplified from . import parse, sorting, wrap from .comments import", "def is_comment(line: Optional[str]) -> bool: return line.startswith(\"#\") if line else", ") if comment: single_import_line += ( f\"{comments and ';' or", "output for module in straight_modules: if module in remove_imports: continue", "star_modules = [] other_modules = [] for module in from_modules:", "len(formatted_output) > imports_tail: next_construct = \"\" tail = formatted_output[imports_tail:] for", "= (\"FUTURE\",) continue parsed.imports[\"no_sections\"][\"straight\"].update( parsed.imports[section].get(\"straight\", {}) ) parsed.imports[\"no_sections\"][\"from\"].update(parsed.imports[section].get(\"from\", {})) sections", "comments, import_start + (\", \").join(from_import_section), removed=config.ignore_comments, comment_prefix=config.comment_prefix, ) if not", "ignore if is_comment(line) and prev_line != \"\" and not is_comment(prev_line):", "parse.ParsedContent, module: str, comments: List[str]) -> List[str]: star_comment = parsed.categorized_comments[\"nested\"].get(module,", ") from_comments = [] for as_import in as_imports[from_import]: specific_comment =", "wrap.import_statement( import_start=import_start, from_imports=from_import_section, comments=comments, line_separator=parsed.line_separator, config=config, multi_line_output=wrap.Modes.VERTICAL_GRID, # type: ignore", "parsed.categorized_comments[\"nested\"].get(module, {}).pop(\"*\", None) if star_comment: return comments + [star_comment] return", "[] new_section_output: List[str] = [] for line in section_output: if", "while [ character.strip() for character in formatted_output[imports_tail : imports_tail +", "config ).splitlines() output_at = 0 if parsed.import_index < parsed.original_line_count: output_at", "in as_imports[from_import]: specific_comment = ( parsed.categorized_comments[\"nested\"] .get(module, {}) .pop(as_import, None)", "# type: ignore ) if ( max( len(import_line) for import_line", "= [format_simplified(removal) for removal in config.remove_imports] sections: Iterable[str] = itertools.chain(parsed.sections,", "line for line in from_imports if f\"{module}.{line}\" not in remove_imports", "from_imports if config.force_sort_within_sections: # collapse comments comments_above = [] new_section_output:", "= from_imports.pop(0) single_import_line = with_comments( comments, import_start + from_import, removed=config.ignore_comments,", "and not only_show_as_imports ): specific_comment = ( parsed.categorized_comments[\"nested\"] .get(module, {})", "in config.remove_imports] sections: Iterable[str] = itertools.chain(parsed.sections, config.forced_separate) if config.no_sections: parsed.imports[\"no_sections\"]", "len(from_import_section) >= force_grid_wrap: do_multiline_reformat = True if len(import_statement) > config.line_length", "[] as_imports = any((module in parsed.as_map[\"straight\"] for module in straight_modules))", "config, ) from_imports = [ from_import for from_import in from_imports", "-> str: \"\"\"Adds the imports back to the file. (at", "= star_modules + other_modules straight_imports = _with_straight_imports( parsed, config, straight_modules,", ") -> str: \"\"\"Adds the imports back to the file.", "from_import in as_imports ] only_show_as_imports = True elif config.force_single_line and", "imports_tail + 1] ] == [\"\"]: formatted_output.pop(imports_tail) if len(formatted_output) >", "import_line in import_statement.split(parsed.line_separator) ) > config.line_length ): import_statement = other_import_statement", "if config.combine_straight_imports and not as_imports: if not straight_modules: return []", ") for as_import in sorting.sort(config, as_imports[from_import]) ) else: output.extend( with_comments(", "if config.no_sections: parsed.imports[\"no_sections\"] = {\"straight\": {}, \"from\": {}} base_sections: Tuple[str,", "remove_imports, import_type ) from_imports = _with_from_imports( parsed, config, from_modules, section,", "f\"{comments and ';' or config.comment_prefix} \" f\"{comment}\" ) if from_import", "lines.pop(-1) lines.append(\"\") return lines class _LineWithComments(str): comments: List[str] def __new__(", "not from_import_section: import_statement = \"\" do_multiline_reformat = False force_grid_wrap =", "pending_lines_before = False for section in sections: straight_modules = parsed.imports[section][\"straight\"]", "config.only_sections: from_modules = sorting.sort( config, from_modules, key=lambda key: sorting.module_key(key, config,", "[] else: use_comments = comments comments = None single_import_line =", "] for from_import, sub_module in zip(from_imports, sub_modules) if sub_module in", ") -> List[str]: output: List[str] = [] as_imports = any((module", "parsed.line_separator, config ), removed=config.ignore_comments, comment_prefix=config.comment_prefix, ) for as_import in as_imports[from_import]", "comments=comments, line_separator=parsed.line_separator, config=config, multi_line_output=wrap.Modes.VERTICAL_GRID, # type: ignore ) if (", "import parse, sorting, wrap from .comments import add_to_line as with_comments", "key=lambda key: sorting.module_key( key, config, True, config.force_alphabetical_sort_within_sections, section_name=section, ), reverse=config.reverse_sort,", "in copy.copy(from_imports): comment = ( parsed.categorized_comments[\"nested\"].get(module, {}).pop(from_import, None) ) if", "import DEFAULT_CONFIG, Config def sorted_imports( parsed: parse.ParsedContent, config: Config =", "if not config.only_sections: straight_modules = sorting.sort( config, straight_modules, key=lambda key:", "= False force_grid_wrap = config.force_grid_wrap if force_grid_wrap and len(from_import_section) >=", ") from_imports = [ from_import for from_import in from_imports if", "= [] new_section_output: List[str] = [] for line in section_output:", "output[0].strip() == \"\": output.pop(0) if config.formatting_function: output = config.formatting_function( parsed.line_separator.join(output),", "None) while from_imports: if above_comments: output.extend(above_comments) above_comments = None if", "= sorting.sort( config, straight_modules, key=lambda key: sorting.module_key( key, config, section_name=section,", "+ from_import, removed=config.ignore_comments, comment_prefix=config.comment_prefix, ), parsed.line_separator, config, ) ) from_comments", "= wrap.import_statement( import_start=import_start, from_imports=from_import_section, comments=comments, line_separator=parsed.line_separator, config=config, ) if config.multi_line_output", "the index of the first import) sorted alphabetically and split", "formatted_output[imports_tail:] for index, line in enumerate(tail): # pragma: no branch", "= sorting.sort(config, as_imports[as_import]) for from_import in copy.copy(from_imports): if from_import in", "from_imports = [ from_import for from_import in from_imports if from_import", "combine_straight_imports only works for bare imports, 'as' imports not included", "formatted_output[output_at:0] = output if output: imports_tail = output_at + len(output)", "section_title not in seen_headings: if config.dedup_headings: seen_headings.add(section_title) section_comment = f\"#", "if from_modules and straight_modules else 0 ) if config.from_first: section_output", "section_output: if section_name in parsed.place_imports: parsed.place_imports[section_name] = section_output continue section_title", "from_imports if from_import in as_imports ] only_show_as_imports = True elif", "= (comments or []) + list( parsed.categorized_comments[\"from\"].pop(f\"{module}.__combined_as__\", ()) ) import_statement", "= from_imports.index(from_import) if parsed.imports[section][\"from\"][module][from_import]: from_imports[(idx + 1) : (idx +", "other_modules = [] for module in from_modules: if \"*\" in", "-> bool: return line.startswith(\"#\") if line else False for line,", "= True if do_multiline_reformat: import_statement = wrap.import_statement( import_start=import_start, from_imports=from_import_section, comments=comments,", "\"\" and not is_comment(prev_line): new_output.append(\"\") new_output.append(line) return new_output def _with_star_comments(parsed:", "long AND have imports AND we are # NOT using", "removed=config.ignore_comments, comment_prefix=config.comment_prefix, ) comment = ( parsed.categorized_comments[\"nested\"].get(module, {}).pop(from_import, None) )", "[f\"{module}.{from_import}\" for from_import in from_imports] as_imports = { from_import: [", "== \"\": output.pop(0) if config.formatting_function: output = config.formatting_function( parsed.line_separator.join(output), extension,", "from_imports[idx : (idx + 1)] = as_imports.pop(from_import) only_show_as_imports = False", "key, config, True, config.force_alphabetical_sort_within_sections, section_name=section, ), reverse=config.reverse_sort, ) if remove_imports:", "f\"{module}.{line}\" not in remove_imports ] sub_modules = [f\"{module}.{from_import}\" for from_import", "output_at = 0 if parsed.import_index < parsed.original_line_count: output_at = parsed.import_index", "index of the first import) sorted alphabetically and split between", "sorting.sort( config, from_modules, key=lambda key: sorting.module_key(key, config, section_name=section), reverse=config.reverse_sort, )", "(\"*\" in from_imports and config.combine_star): if not config.no_inline_sort: for as_import", "config.single_line_exclusions) ) and not config.only_sections: from_imports = sorting.sort( config, from_imports,", "sorting.sort(config, as_imports[as_import]) for from_import in copy.copy(from_imports): if from_import in as_imports:", "in as_imports: idx = from_imports.index(from_import) if parsed.imports[section][\"from\"][module][from_import]: from_imports[(idx + 1)", "): continue next_construct = line break if in_quote: # pragma:", ") if specific_comment: from_comments.append(specific_comment) output.append( wrap.line( with_comments( from_comments, import_start +", "= [ \"\" for line in range(config.lines_after_imports) ] elif extension", "Optional, Set, Tuple, Type from isort.format import format_simplified from .", "parsed.categorized_comments[\"above\"][\"from\"].pop(module, None) while from_imports: if above_comments: output.extend(above_comments) above_comments = None", "+= [\"\"] * config.lines_between_sections output += section_output pending_lines_before = False", "as_imports[from_import] = sorting.sort(config, as_imports[from_import]) from_comments = ( parsed.categorized_comments[\"straight\"].get(f\"{module}.{from_import}\") or []", "= _ensure_newline_before_comment(output) while output and output[-1].strip() == \"\": output.pop() #", "if ( max( len(import_line) for import_line in import_statement.split(parsed.line_separator) ) >", "If line too long AND have imports AND we are", "from_import = from_imports.pop(0) single_import_line = with_comments( comments, import_start + from_import,", "= section_name in config.no_lines_before if section_output: if section_name in parsed.place_imports:", "[] for module in from_modules: if \"*\" in parsed.imports[section][\"from\"][module]: star_modules.append(module)", "config.line_length and len(from_import_section) > 0 and config.multi_line_output not in (wrap.Modes.GRID,", "parsed.line_separator.join(output), extension, config ).splitlines() output_at = 0 if parsed.import_index <", "= () for section in sections: if section == \"FUTURE\":", "straight_modules = sorting.sort( config, straight_modules, key=lambda key: sorting.module_key( key, config,", "not (\"*\" in from_imports and config.combine_star): if not config.no_inline_sort: for", "= new_out_lines return _output_as_string(formatted_output, parsed.line_separator) def _with_from_imports( parsed: parse.ParsedContent, config:", "parsed.imports[section][\"straight\"] if not config.only_sections: straight_modules = sorting.sort( config, straight_modules, key=lambda", "else: section_output = straight_imports + lines_between + from_imports if config.force_sort_within_sections:", "imports AND we are # NOT using GRID or VERTICAL", "from_import, sub_module in zip(from_imports, sub_modules) if sub_module in parsed.as_map[\"from\"] }", "or formatted_output[index + 1].strip() != \"\" ): new_out_lines.append(\"\") formatted_output =", "single_import_line = with_comments( comments, import_start + from_import, removed=config.ignore_comments, comment_prefix=config.comment_prefix, )", "def _output_as_string(lines: List[str], line_separator: str) -> str: return line_separator.join(_normalize_empty_lines(lines)) def", "config.combine_straight_imports and not as_imports: if not straight_modules: return [] above_comments:", "extension != \"pyi\" and next_construct.startswith(STATEMENT_DECLARATIONS): formatted_output[imports_tail:0] = [\"\", \"\"] else:", "with_comments from .identify import STATEMENT_DECLARATIONS from .settings import DEFAULT_CONFIG, Config", "with_comments( from_comments, import_start + from_import, removed=config.ignore_comments, comment_prefix=config.comment_prefix, ), parsed.line_separator, config,", "()) above_comments = parsed.categorized_comments[\"above\"][\"from\"].pop(module, None) while from_imports: if above_comments: output.extend(above_comments)", ": (idx + 1)] = as_imports.pop(from_import) else: from_imports[idx : (idx", "= parsed.lines_without_imports.copy() remove_imports = [format_simplified(removal) for removal in config.remove_imports] sections:", "line_separator=parsed.line_separator, config=config, multi_line_output=wrap.Modes.VERTICAL_GRID, # type: ignore ) if ( max(", "wrap.line( with_comments( from_comments, import_start + as_import, removed=config.ignore_comments, comment_prefix=config.comment_prefix, ), parsed.line_separator,", "\"*\" in from_imports: output.append( with_comments( _with_star_comments(parsed, module, []), f\"{import_start}*\", removed=config.ignore_comments,", "= base_sections + (\"no_sections\",) output: List[str] = [] seen_headings: Set[str]", "= set() pending_lines_before = False for section in sections: straight_modules", "not in seen_headings: if config.dedup_headings: seen_headings.add(section_title) section_comment = f\"# {section_title}\"", "= [] if \"*\" in from_imports: output.append( with_comments( _with_star_comments(parsed, module,", "in remove_imports: continue import_start = f\"from {module} {import_type} \" from_imports", "inline_comments: List[str] = [] for module in straight_modules: if module", "comments_above = [] new_section_output: List[str] = [] for line in", "no branch next_construct = line break if config.lines_after_imports != -1:", "works for bare imports, 'as' imports not included if config.combine_straight_imports", "-> str: return line_separator.join(_normalize_empty_lines(lines)) def _normalize_empty_lines(lines: List[str]) -> List[str]: while", "Config def sorted_imports( parsed: parse.ParsedContent, config: Config = DEFAULT_CONFIG, extension:", "module in parsed.categorized_comments[\"above\"][\"straight\"]: above_comments.extend(parsed.categorized_comments[\"above\"][\"straight\"].pop(module)) if module in parsed.categorized_comments[\"straight\"]: inline_comments.extend(parsed.categorized_comments[\"straight\"][module]) combined_straight_imports", "if config.combine_as_imports and not (\"*\" in from_imports and config.combine_star): if", "remove_imports: continue import_start = f\"from {module} {import_type} \" from_imports =", "if parsed.place_imports: new_out_lines = [] for index, line in enumerate(formatted_output):", "from_imports[0] not in as_imports or ( config.combine_as_imports and parsed.imports[section][\"from\"][module][from_import] )", "(wrap.Modes.GRID, wrap.Modes.VERTICAL) # type: ignore ): do_multiline_reformat = True if", "+ list( parsed.categorized_comments[\"from\"].pop(f\"{module}.__combined_as__\", ()) ) import_statement = with_comments( comments, import_start", "straight_modules = parsed.imports[section][\"straight\"] if not config.only_sections: straight_modules = sorting.sort( config,", "if ( not config.no_inline_sort or (config.force_single_line and module not in", "in as_imports: if not config.only_sections: as_imports[as_import] = sorting.sort(config, as_imports[as_import]) for", "line.strip().startswith(\"#\") and len(tail) > (index + 1) and tail[index +", "no_lines_before: output += [\"\"] * config.lines_between_sections output += section_output pending_lines_before", "branch should_skip, in_quote, *_ = parse.skip_line( line, in_quote=\"\", index=len(formatted_output), section_comments=config.section_comments,", "_normalize_empty_lines(lines: List[str]) -> List[str]: while lines and lines[-1].strip() == \"\":", "= comments return instance def _ensure_newline_before_comment(output: List[str]) -> List[str]: new_output:", "key, config, section_name=section, straight_import=True ), reverse=config.reverse_sort, ) from_modules = parsed.imports[section][\"from\"]", "AND have imports AND we are # NOT using GRID", "for module in from_modules: if \"*\" in parsed.imports[section][\"from\"][module]: star_modules.append(module) else:", "parse.skip_line( line, in_quote=\"\", index=len(formatted_output), section_comments=config.section_comments, needs_import=False, ) if not should_skip", "= f\"from {module} {import_type} \" from_imports = list(parsed.imports[section][\"from\"][module]) if (", "formatted_output[imports_tail:0] = [ \"\" for line in range(config.lines_after_imports) ] elif", "\"\" ): new_out_lines.append(\"\") formatted_output = new_out_lines return _output_as_string(formatted_output, parsed.line_separator) def", "in straight_modules)) # combine_straight_imports only works for bare imports, 'as'", "continue import_definition = [] if module in parsed.as_map[\"straight\"]: if parsed.imports[section][\"straight\"][module]:", "+ output): # type: ignore if is_comment(line) and prev_line !=", "> 1: do_multiline_reformat = True # If line too long", "file. (at the index of the first import) sorted alphabetically", "), removed=config.ignore_comments, comment_prefix=config.comment_prefix, ) for as_import in sorting.sort(config, as_imports[from_import]) )", "Config = DEFAULT_CONFIG, extension: str = \"py\", import_type: str =", "len(import_statement) > config.line_length and len(from_import_section) > 1: do_multiline_reformat = True", "== \"FUTURE\": base_sections = (\"FUTURE\",) continue parsed.imports[\"no_sections\"][\"straight\"].update( parsed.imports[section].get(\"straight\", {}) )", "key: sorting.module_key(key, config, section_name=section), reverse=config.reverse_sort, ) if config.star_first: star_modules =", "True, config.force_alphabetical_sort_within_sections, section_name=section, ), reverse=config.reverse_sort, ) if remove_imports: from_imports =", "if config.multi_line_output == wrap.Modes.GRID: # type: ignore other_import_statement = wrap.import_statement(", "output.pop() # pragma: no cover while output and output[0].strip() ==", "= [] if module in parsed.as_map[\"straight\"]: if parsed.imports[section][\"straight\"][module]: import_definition.append((f\"{import_type} {module}\",", "List[str] = [] inline_comments: List[str] = [] for module in", "list( parsed.categorized_comments[\"from\"].pop(f\"{module}.__combined_as__\", ()) ) import_statement = with_comments( comments, import_start +", "not config.only_sections: from_modules = sorting.sort( config, from_modules, key=lambda key: sorting.module_key(key,", "\").join(from_import_section), removed=config.ignore_comments, comment_prefix=config.comment_prefix, ) if not from_import_section: import_statement = \"\"", "= True if len(import_statement) > config.line_length and len(from_import_section) > 1:", ") if not config.only_sections: output.extend( with_comments( from_comments, wrap.line( import_start +", "f\"{comment}\" ) output.append(wrap.line(single_import_line, parsed.line_separator, config)) from_import_section = [] while from_imports", "+= section_output pending_lines_before = False else: pending_lines_before = pending_lines_before or", "if parsed.import_index == -1: return _output_as_string(parsed.lines_without_imports, parsed.line_separator) formatted_output: List[str] =", "force_grid_wrap and len(from_import_section) >= force_grid_wrap: do_multiline_reformat = True if len(import_statement)", "== \"\": lines.pop(-1) lines.append(\"\") return lines class _LineWithComments(str): comments: List[str]", "[ from_import for from_import in from_imports if from_import in as_imports", "section_name=section), reverse=config.reverse_sort, ) if config.star_first: star_modules = [] other_modules =", "if line.startswith(\"#\"): comments_above.append(line) elif comments_above: new_section_output.append(_LineWithComments(line, comments_above)) comments_above = []", "else: output.append(wrap.line(single_import_line, parsed.line_separator, config)) comments = None else: while from_imports", "{combined_straight_imports}\") return output for module in straight_modules: if module in", ") if ( max( len(import_line) for import_line in import_statement.split(parsed.line_separator) )", "if output: imports_tail = output_at + len(output) while [ character.strip()", "from_imports.remove(\"*\") for from_import in copy.copy(from_imports): comment = ( parsed.categorized_comments[\"nested\"].get(module, {}).pop(from_import,", "= \"\" do_multiline_reformat = False force_grid_wrap = config.force_grid_wrap if force_grid_wrap", "type: ignore if is_comment(line) and prev_line != \"\" and not", "config.import_headings.get(section_name.lower(), \"\") if section_title and section_title not in seen_headings: if", "( from_imports[0] not in as_imports or ( config.combine_as_imports and parsed.imports[section][\"from\"][module][from_import]", "as_import, parsed.line_separator, config ), removed=config.ignore_comments, comment_prefix=config.comment_prefix, ) for as_import in", "comments comments = None single_import_line = with_comments( use_comments, import_start +", "()) ) import_statement = with_comments( comments, import_start + (\", \").join(from_import_section),", "{as_module}\" for as_module in parsed.as_map[\"from\"][sub_module] ] for from_import, sub_module in", "copy.copy(from_imports): comment = ( parsed.categorized_comments[\"nested\"].get(module, {}).pop(from_import, None) ) if comment:", "';' or config.comment_prefix} \" f\"{comment}\" ) output.append(wrap.line(single_import_line, parsed.line_separator, config)) from_import_section", "if module in parsed.categorized_comments[\"straight\"]: inline_comments.extend(parsed.categorized_comments[\"straight\"][module]) combined_straight_imports = \", \".join(straight_modules) if", "f\"{comment}\" ) if from_import in as_imports: if ( parsed.imports[section][\"from\"][module][from_import] and", "for as_import in as_imports: if not config.only_sections: as_imports[as_import] = sorting.sort(config,", "from .identify import STATEMENT_DECLARATIONS from .settings import DEFAULT_CONFIG, Config def", ") -> List[str]: output: List[str] = [] for module in", "import_start + from_import, removed=config.ignore_comments, comment_prefix=config.comment_prefix, ), parsed.line_separator, config, ) )", "[ \"\" for line in range(config.lines_after_imports) ] elif extension !=", "straight_imports + lines_between + from_imports if config.force_sort_within_sections: # collapse comments", "parsed.line_separator, config)) comments = None else: while from_imports and from_imports[0]", "_with_star_comments(parsed, module, []), f\"{import_start}*\", removed=config.ignore_comments, comment_prefix=config.comment_prefix, ) ) from_imports.remove(\"*\") for", "parsed.lines_without_imports.copy() remove_imports = [format_simplified(removal) for removal in config.remove_imports] sections: Iterable[str]", "reverse=config.reverse_sort, ) if remove_imports: from_imports = [ line for line", "for from_import in from_imports] as_imports = { from_import: [ f\"{from_import}", "imports not included if config.combine_straight_imports and not as_imports: if not", "): output.append( wrap.line(single_import_line, parsed.line_separator, config) ) from_comments = parsed.categorized_comments[\"straight\"].get( f\"{module}.{from_import}\"", "Iterable, List, Optional, Set, Tuple, Type from isort.format import format_simplified", "[] seen_headings: Set[str] = set() pending_lines_before = False for section", "_LineWithComments(str): comments: List[str] def __new__( cls: Type[\"_LineWithComments\"], value: Any, comments:", ") if ( parsed.imports[section][\"from\"][module][from_import] and not only_show_as_imports ): specific_comment =", "import_type: str = \"import\", ) -> str: \"\"\"Adds the imports", "List[str]: star_comment = parsed.categorized_comments[\"nested\"].get(module, {}).pop(\"*\", None) if star_comment: return comments", "if module in parsed.categorized_comments[\"above\"][\"straight\"]: above_comments.extend(parsed.categorized_comments[\"above\"][\"straight\"].pop(module)) if module in parsed.categorized_comments[\"straight\"]: inline_comments.extend(parsed.categorized_comments[\"straight\"][module])", "if in_quote: # pragma: no branch next_construct = line break", "single_import_line += ( f\"{use_comments and ';' or config.comment_prefix} \" f\"{comment}\"", "False else: pending_lines_before = pending_lines_before or not no_lines_before if config.ensure_newline_before_comments:", "line in from_imports if f\"{module}.{line}\" not in remove_imports ] sub_modules", "reverse=config.reverse_sort, ) from_modules = parsed.imports[section][\"from\"] if not config.only_sections: from_modules =", "new_output def _with_star_comments(parsed: parse.ParsedContent, module: str, comments: List[str]) -> List[str]:", "section_output.append(str(line)) section_name = section no_lines_before = section_name in config.no_lines_before if", "output): # type: ignore if is_comment(line) and prev_line != \"\"", "List[str] = [] def is_comment(line: Optional[str]) -> bool: return line.startswith(\"#\")", "= None single_import_line = with_comments( use_comments, import_start + from_import, removed=config.ignore_comments,", "output += [\"\"] * config.lines_between_sections output += section_output pending_lines_before =", "from_imports] as_imports = { from_import: [ f\"{from_import} as {as_module}\" for", "= \"\" while from_imports: from_import = from_imports.pop(0) single_import_line = with_comments(", "straight_modules, key=lambda key: sorting.module_key( key, config, section_name=section, straight_import=True ), reverse=config.reverse_sort,", "lines[-1].strip() == \"\": lines.pop(-1) lines.append(\"\") return lines class _LineWithComments(str): comments:", "do_multiline_reformat = False force_grid_wrap = config.force_grid_wrap if force_grid_wrap and len(from_import_section)", "in parsed.imports[section][\"from\"][module]: star_modules.append(module) else: other_modules.append(module) from_modules = star_modules + other_modules", "groups \"\"\" if parsed.import_index == -1: return _output_as_string(parsed.lines_without_imports, parsed.line_separator) formatted_output:", "as_module in parsed.as_map[\"from\"][sub_module] ] for from_import, sub_module in zip(from_imports, sub_modules)", "-> List[str]: output: List[str] = [] as_imports = any((module in", "True if do_multiline_reformat: import_statement = wrap.import_statement( import_start=import_start, from_imports=from_import_section, comments=comments, line_separator=parsed.line_separator,", "from_imports.index(from_import) if parsed.imports[section][\"from\"][module][from_import]: from_imports[(idx + 1) : (idx + 1)]", "straight_modules: if module in remove_imports: continue import_definition = [] if", "# type: ignore if is_comment(line) and prev_line != \"\" and", "DEFAULT_CONFIG, extension: str = \"py\", import_type: str = \"import\", )", "new_section_output = sorting.sort( config, new_section_output, key=partial(sorting.section_key, config=config), reverse=config.reverse_sort, ) #", "parsed.import_index < parsed.original_line_count: output_at = parsed.import_index formatted_output[output_at:0] = output if", "len(from_import_section) > 0 and config.multi_line_output not in (wrap.Modes.GRID, wrap.Modes.VERTICAL) #", "in from_imports if from_import in as_imports ] only_show_as_imports = True", "output += section_output pending_lines_before = False else: pending_lines_before = pending_lines_before", "import itertools from functools import partial from typing import Any,", "comments = (comments or []) + list( parsed.categorized_comments[\"from\"].pop(f\"{module}.__combined_as__\", ()) )", "not is_comment(prev_line): new_output.append(\"\") new_output.append(line) return new_output def _with_star_comments(parsed: parse.ParsedContent, module:", "comments: List[str] ) -> \"_LineWithComments\": instance = super().__new__(cls, value) instance.comments", "from_comments, wrap.line( import_start + as_import, parsed.line_separator, config ), removed=config.ignore_comments, comment_prefix=config.comment_prefix,", "parsed.as_map[\"straight\"]: if parsed.imports[section][\"straight\"][module]: import_definition.append((f\"{import_type} {module}\", module)) import_definition.extend( (f\"{import_type} {module} as", "line.startswith(\"#\"): comments_above.append(line) elif comments_above: new_section_output.append(_LineWithComments(line, comments_above)) comments_above = [] else:", "as_imports = { from_import: [ f\"{from_import} as {as_module}\" for as_module", "+ straight_imports else: section_output = straight_imports + lines_between + from_imports", "( parsed.categorized_comments[\"nested\"] .get(module, {}) .pop(as_import, None) ) if specific_comment: from_comments.append(specific_comment)", "as_imports = any((module in parsed.as_map[\"straight\"] for module in straight_modules)) #", "new_output.append(line) return new_output def _with_star_comments(parsed: parse.ParsedContent, module: str, comments: List[str])", "use_comments = comments comments = None single_import_line = with_comments( use_comments,", "for module in straight_modules: if module in remove_imports: continue import_definition", "def _ensure_newline_before_comment(output: List[str]) -> List[str]: new_output: List[str] = [] def", "= section no_lines_before = section_name in config.no_lines_before if section_output: if", "to the file. (at the index of the first import)", "len(import_statement) > config.line_length: import_statement = wrap.line(import_statement, parsed.line_separator, config) if import_statement:", "new_section_output: List[str] = [] for line in section_output: if not", "-> List[str]: output: List[str] = [] for module in from_modules:", "+ lines_between + straight_imports else: section_output = straight_imports + lines_between", "> imports_tail: next_construct = \"\" tail = formatted_output[imports_tail:] for index,", "import partial from typing import Any, Iterable, List, Optional, Set,", "list(parsed.imports[section][\"from\"][module]) if ( not config.no_inline_sort or (config.force_single_line and module not", "if section_title and section_title not in seen_headings: if config.dedup_headings: seen_headings.add(section_title)", "section, remove_imports, import_type ) from_imports = _with_from_imports( parsed, config, from_modules,", "\"\"\"Adds the imports back to the file. (at the index", "= _with_from_imports( parsed, config, from_modules, section, remove_imports, import_type ) lines_between", "-> List[str]: while lines and lines[-1].strip() == \"\": lines.pop(-1) lines.append(\"\")", "for from_import in copy.copy(from_imports): if from_import in as_imports: idx =", "and ( from_imports[0] not in as_imports or ( config.combine_as_imports and", ") -> \"_LineWithComments\": instance = super().__new__(cls, value) instance.comments = comments", "[ character.strip() for character in formatted_output[imports_tail : imports_tail + 1]", "if from_imports: use_comments = [] else: use_comments = comments comments", "for index, line in enumerate(formatted_output): new_out_lines.append(line) if line in parsed.import_placements:", "in import_definition ) return output def _output_as_string(lines: List[str], line_separator: str)", "new_output.append(\"\") new_output.append(line) return new_output def _with_star_comments(parsed: parse.ParsedContent, module: str, comments:", ") for as_import in as_imports[from_import] ) else: output.append(wrap.line(single_import_line, parsed.line_separator, config))", "1: do_multiline_reformat = True # If line too long AND", "in from_modules: if \"*\" in parsed.imports[section][\"from\"][module]: star_modules.append(module) else: other_modules.append(module) from_modules", "def __new__( cls: Type[\"_LineWithComments\"], value: Any, comments: List[str] ) ->", "parsed.line_separator, config) if import_statement: output.append(import_statement) return output def _with_straight_imports( parsed:", "[\"\"] * config.lines_between_sections output += section_output pending_lines_before = False else:", "= [ line for line in from_imports if f\"{module}.{line}\" not", "output.pop(0) if config.formatting_function: output = config.formatting_function( parsed.line_separator.join(output), extension, config ).splitlines()", "if combined_inline_comments: output.append( f\"{import_type} {combined_straight_imports} # {combined_inline_comments}\" ) else: output.append(f\"{import_type}", "{\"straight\": {}, \"from\": {}} base_sections: Tuple[str, ...] = () for", "f\"{import_start}*\", removed=config.ignore_comments, comment_prefix=config.comment_prefix, ) ) from_imports.remove(\"*\") for from_import in copy.copy(from_imports):", "output: List[str] = [] as_imports = any((module in parsed.as_map[\"straight\"] for", "removed=config.ignore_comments, comment_prefix=config.comment_prefix, ), parsed.line_separator, config, ) ) from_comments = []", "if sub_module in parsed.as_map[\"from\"] } if config.combine_as_imports and not (\"*\"", "parse.ParsedContent, config: Config = DEFAULT_CONFIG, extension: str = \"py\", import_type:", "combined_inline_comments = \" \".join(inline_comments) else: combined_inline_comments = \"\" output.extend(above_comments) if", "and not only_show_as_imports ): output.append( wrap.line(single_import_line, parsed.line_separator, config) ) from_comments", ") ) from_comments = [] for as_import in as_imports[from_import]: specific_comment", "return _output_as_string(formatted_output, parsed.line_separator) def _with_from_imports( parsed: parse.ParsedContent, config: Config, from_modules:", "= ( parsed.categorized_comments[\"nested\"] .get(module, {}) .pop(as_import, None) ) if specific_comment:", "line in section_output: if not line: continue if line.startswith(\"#\"): comments_above.append(line)", "in parsed.as_map[\"from\"][sub_module] ] for from_import, sub_module in zip(from_imports, sub_modules) if", "if force_grid_wrap and len(from_import_section) >= force_grid_wrap: do_multiline_reformat = True if", "+ from_import, removed=config.ignore_comments, comment_prefix=config.comment_prefix, ) comment = ( parsed.categorized_comments[\"nested\"].get(module, {}).pop(from_import,", "with_comments( use_comments, import_start + from_import, removed=config.ignore_comments, comment_prefix=config.comment_prefix, ) single_import_line +=", "if config.combine_as_imports: comments = (comments or []) + list( parsed.categorized_comments[\"from\"].pop(f\"{module}.__combined_as__\",", "as_imports[from_import] ) else: output.append(wrap.line(single_import_line, parsed.line_separator, config)) comments = None else:", "# combine_straight_imports only works for bare imports, 'as' imports not", "as_import in as_imports[from_import] ) else: output.append(wrap.line(single_import_line, parsed.line_separator, config)) comments =", "parsed.categorized_comments[\"nested\"].get(module, {}).pop(from_import, None) ) if comment: from_imports.remove(from_import) if from_imports: use_comments", "if line in parsed.import_placements: new_out_lines.extend(parsed.place_imports[parsed.import_placements[line]]) if ( len(formatted_output) <= (index", "Iterable[str], section: str, remove_imports: List[str], import_type: str, ) -> List[str]:", ".get(module, {}) .pop(as_import, None) ) if specific_comment: from_comments.append(specific_comment) output.append( wrap.line(", "+ 1)] = as_imports.pop(from_import) only_show_as_imports = False comments = parsed.categorized_comments[\"from\"].pop(module,", "import_type: str, ) -> List[str]: output: List[str] = [] as_imports", "specific_comment: from_comments.append(specific_comment) output.append( wrap.line( with_comments( from_comments, import_start + from_import, removed=config.ignore_comments,", "if remove_imports: from_imports = [ line for line in from_imports", "(comments or []) + list( parsed.categorized_comments[\"from\"].pop(f\"{module}.__combined_as__\", ()) ) import_statement =", "__new__( cls: Type[\"_LineWithComments\"], value: Any, comments: List[str] ) -> \"_LineWithComments\":", "\"\": output.pop(0) if config.formatting_function: output = config.formatting_function( parsed.line_separator.join(output), extension, config", "Type[\"_LineWithComments\"], value: Any, comments: List[str] ) -> \"_LineWithComments\": instance =", "= [] other_modules = [] for module in from_modules: if", "config=config, multi_line_output=wrap.Modes.VERTICAL_GRID, # type: ignore ) if ( max( len(import_line)", "new_out_lines return _output_as_string(formatted_output, parsed.line_separator) def _with_from_imports( parsed: parse.ParsedContent, config: Config,", "remove_imports = [format_simplified(removal) for removal in config.remove_imports] sections: Iterable[str] =", "section_name = section no_lines_before = section_name in config.no_lines_before if section_output:", "from_import for from_import in from_imports if from_import in as_imports ]", "parsed: parse.ParsedContent, config: Config, straight_modules: Iterable[str], section: str, remove_imports: List[str],", "+ 1) and tail[index + 1].strip() ): continue next_construct =", "sorting.sort( config, straight_modules, key=lambda key: sorting.module_key( key, config, section_name=section, straight_import=True", "not no_lines_before if config.ensure_newline_before_comments: output = _ensure_newline_before_comment(output) while output and", "+= ( f\"{comments and ';' or config.comment_prefix} \" f\"{comment}\" )", "= parse.skip_line( line, in_quote=\"\", index=len(formatted_output), section_comments=config.section_comments, needs_import=False, ) if not", "output.extend(above_comments) above_comments = None if \"*\" in from_imports and config.combine_star:", "comments_above)) comments_above = [] else: new_section_output.append(line) # only_sections options is", "as {as_import}\", f\"{module} as {as_import}\") for as_import in parsed.as_map[\"straight\"][module] )", "uncollapse comments section_output = [] for line in new_section_output: comments", "config.combine_star): if not config.no_inline_sort: for as_import in as_imports: if not", "config.lines_between_types if from_modules and straight_modules else 0 ) if config.from_first:", "= list(parsed.imports[section][\"from\"][module]) if ( not config.no_inline_sort or (config.force_single_line and module", "straight_imports = _with_straight_imports( parsed, config, straight_modules, section, remove_imports, import_type )", "= {\"straight\": {}, \"from\": {}} base_sections: Tuple[str, ...] = ()", "parsed.categorized_comments[\"straight\"]: inline_comments.extend(parsed.categorized_comments[\"straight\"][module]) combined_straight_imports = \", \".join(straight_modules) if inline_comments: combined_inline_comments =", "module in remove_imports: continue import_definition = [] if module in", "section_output.insert(0, section_comment) if pending_lines_before or not no_lines_before: output += [\"\"]", "config.multi_line_output == wrap.Modes.GRID: # type: ignore other_import_statement = wrap.import_statement( import_start=import_start,", "return output for module in straight_modules: if module in remove_imports:", "cover while output and output[0].strip() == \"\": output.pop(0) if config.formatting_function:", "new_section_output, key=partial(sorting.section_key, config=config), reverse=config.reverse_sort, ) # uncollapse comments section_output =", "\" f\"{comment}\" ) output.append(wrap.line(single_import_line, parsed.line_separator, config)) from_import_section = [] while", "pending_lines_before or not no_lines_before: output += [\"\"] * config.lines_between_sections output", "line in range(config.lines_after_imports) ] elif extension != \"pyi\" and next_construct.startswith(STATEMENT_DECLARATIONS):", "( not config.no_inline_sort or (config.force_single_line and module not in config.single_line_exclusions)", "for line, prev_line in zip(output, [None] + output): # type:", "or (config.force_single_line and module not in config.single_line_exclusions) ) and not", "import) sorted alphabetically and split between groups \"\"\" if parsed.import_index", "new_output: List[str] = [] def is_comment(line: Optional[str]) -> bool: return", "import_start=import_start, from_imports=from_import_section, comments=comments, line_separator=parsed.line_separator, config=config, multi_line_output=wrap.Modes.VERTICAL_GRID, # type: ignore )", "= None else: while from_imports and from_imports[0] in as_imports: from_import", "sorting.module_key(key, config, section_name=section), reverse=config.reverse_sort, ) if config.star_first: star_modules = []", "and output[-1].strip() == \"\": output.pop() # pragma: no cover while", "else: new_section_output.append(line) # only_sections options is not imposed if force_sort_within_sections", "): new_out_lines.append(\"\") formatted_output = new_out_lines return _output_as_string(formatted_output, parsed.line_separator) def _with_from_imports(", ").splitlines() output_at = 0 if parsed.import_index < parsed.original_line_count: output_at =", "from functools import partial from typing import Any, Iterable, List,", "list(comments or ())), f\"{import_start}*\", removed=config.ignore_comments, comment_prefix=config.comment_prefix, ), parsed.line_separator, config, )", "parsed.categorized_comments[\"straight\"].get(f\"{module}.{from_import}\") or [] ) if ( parsed.imports[section][\"from\"][module][from_import] and not only_show_as_imports", "parsed.categorized_comments[\"straight\"].get(imodule), idef, removed=config.ignore_comments, comment_prefix=config.comment_prefix, ) for idef, imodule in import_definition", "new_out_lines = [] for index, line in enumerate(formatted_output): new_out_lines.append(line) if", "tail[index + 1].strip() ): continue next_construct = line break if", "imports_tail: next_construct = \"\" tail = formatted_output[imports_tail:] for index, line", "{}).pop(from_import, None) ) if comment: from_imports.remove(from_import) if from_imports: use_comments =", ") ) from_imports.remove(\"*\") for from_import in copy.copy(from_imports): comment = (", ") ) from_comments = [] if \"*\" in from_imports: output.append(", "config ), removed=config.ignore_comments, comment_prefix=config.comment_prefix, ) for as_import in as_imports[from_import] )", "if parsed.imports[section][\"straight\"][module]: import_definition.append((f\"{import_type} {module}\", module)) import_definition.extend( (f\"{import_type} {module} as {as_import}\",", "pending_lines_before = False else: pending_lines_before = pending_lines_before or not no_lines_before", "if force_sort_within_sections is True new_section_output = sorting.sort( config, new_section_output, key=partial(sorting.section_key,", "Optional[str]) -> bool: return line.startswith(\"#\") if line else False for", "section_output pending_lines_before = False else: pending_lines_before = pending_lines_before or not", "from_imports = list(parsed.imports[section][\"from\"][module]) if ( not config.no_inline_sort or (config.force_single_line and", "str) -> str: return line_separator.join(_normalize_empty_lines(lines)) def _normalize_empty_lines(lines: List[str]) -> List[str]:", "and line.strip(): if ( line.strip().startswith(\"#\") and len(tail) > (index +", "if ( line.strip().startswith(\"#\") and len(tail) > (index + 1) and", "-> List[str]: new_output: List[str] = [] def is_comment(line: Optional[str]) ->", "+ (\", \").join(from_import_section), removed=config.ignore_comments, comment_prefix=config.comment_prefix, ) if not from_import_section: import_statement", "and not as_imports: if not straight_modules: return [] above_comments: List[str]", "comment_prefix=config.comment_prefix, ), parsed.line_separator, config, ) ) from_comments = [] if", "return instance def _ensure_newline_before_comment(output: List[str]) -> List[str]: new_output: List[str] =", "( parsed.imports[section][\"from\"][module][from_import] and not only_show_as_imports ): specific_comment = ( parsed.categorized_comments[\"nested\"]", ") return output def _output_as_string(lines: List[str], line_separator: str) -> str:", "from_import in from_imports if from_import in as_imports ] only_show_as_imports =", "parsed.import_placements: new_out_lines.extend(parsed.place_imports[parsed.import_placements[line]]) if ( len(formatted_output) <= (index + 1) or", "config, from_modules, key=lambda key: sorting.module_key(key, config, section_name=section), reverse=config.reverse_sort, ) if", "do_multiline_reformat = True if len(import_statement) > config.line_length and len(from_import_section) >", "comments comments_above = [] new_section_output: List[str] = [] for line", "for bare imports, 'as' imports not included if config.combine_straight_imports and", "for line in section_output: if not line: continue if line.startswith(\"#\"):", "in parsed.as_map[\"from\"] } if config.combine_as_imports and not (\"*\" in from_imports", "wrap.import_statement( import_start=import_start, from_imports=from_import_section, comments=comments, line_separator=parsed.line_separator, config=config, ) if config.multi_line_output ==", "= \"py\", import_type: str = \"import\", ) -> str: \"\"\"Adds", "value) instance.comments = comments return instance def _ensure_newline_before_comment(output: List[str]) ->", "from_import: [ f\"{from_import} as {as_module}\" for as_module in parsed.as_map[\"from\"][sub_module] ]", "( parsed.imports[section][\"from\"][module][from_import] and not only_show_as_imports ): output.append( wrap.line(single_import_line, parsed.line_separator, config)", "as_imports[from_import]) from_comments = ( parsed.categorized_comments[\"straight\"].get(f\"{module}.{from_import}\") or [] ) if (", ") if config.multi_line_output == wrap.Modes.GRID: # type: ignore other_import_statement =", "{combined_straight_imports} # {combined_inline_comments}\" ) else: output.append(f\"{import_type} {combined_straight_imports}\") return output for", "= parsed.imports[section][\"from\"] if not config.only_sections: from_modules = sorting.sort( config, from_modules,", "from_comments, import_start + from_import, removed=config.ignore_comments, comment_prefix=config.comment_prefix, ), parsed.line_separator, config, )", "if parsed.import_index < parsed.original_line_count: output_at = parsed.import_index formatted_output[output_at:0] = output", "parsed.line_separator) formatted_output: List[str] = parsed.lines_without_imports.copy() remove_imports = [format_simplified(removal) for removal", "are # NOT using GRID or VERTICAL wrap modes if", ") comment = ( parsed.categorized_comments[\"nested\"].get(module, {}).pop(from_import, None) ) if comment:", "as_imports ] only_show_as_imports = True elif config.force_single_line and module not", "from_imports.pop(0) if not config.only_sections: as_imports[from_import] = sorting.sort(config, as_imports[from_import]) from_comments =", "zip(from_imports, sub_modules) if sub_module in parsed.as_map[\"from\"] } if config.combine_as_imports and", "= [] for as_import in as_imports[from_import]: specific_comment = ( parsed.categorized_comments[\"nested\"]", "not line: continue if line.startswith(\"#\"): comments_above.append(line) elif comments_above: new_section_output.append(_LineWithComments(line, comments_above))", "as_import in sorting.sort(config, as_imports[from_import]) ) else: output.extend( with_comments( from_comments, wrap.line(", "not only_show_as_imports ): output.append( wrap.line(single_import_line, parsed.line_separator, config) ) from_comments =", "= super().__new__(cls, value) instance.comments = comments return instance def _ensure_newline_before_comment(output:", "import_definition.append((f\"{import_type} {module}\", module)) comments_above = parsed.categorized_comments[\"above\"][\"straight\"].pop(module, None) if comments_above: output.extend(comments_above)", "\"*\" in parsed.imports[section][\"from\"][module]: star_modules.append(module) else: other_modules.append(module) from_modules = star_modules +", "line in enumerate(formatted_output): new_out_lines.append(line) if line in parsed.import_placements: new_out_lines.extend(parsed.place_imports[parsed.import_placements[line]]) if", "config) ) from_comments = parsed.categorized_comments[\"straight\"].get( f\"{module}.{from_import}\" ) if not config.only_sections:", "if config.formatting_function: output = config.formatting_function( parsed.line_separator.join(output), extension, config ).splitlines() output_at", "\"FUTURE\": base_sections = (\"FUTURE\",) continue parsed.imports[\"no_sections\"][\"straight\"].update( parsed.imports[section].get(\"straight\", {}) ) parsed.imports[\"no_sections\"][\"from\"].update(parsed.imports[section].get(\"from\",", "sorting.module_key( key, config, True, config.force_alphabetical_sort_within_sections, section_name=section, ), reverse=config.reverse_sort, ) if", "and len(from_import_section) >= force_grid_wrap: do_multiline_reformat = True if len(import_statement) >", ") lines_between = [\"\"] * ( config.lines_between_types if from_modules and", "module in parsed.as_map[\"straight\"]: if parsed.imports[section][\"straight\"][module]: import_definition.append((f\"{import_type} {module}\", module)) import_definition.extend( (f\"{import_type}", "output[-1].strip() == \"\": output.pop() # pragma: no cover while output", "comments: section_output.extend(comments) section_output.append(str(line)) section_name = section no_lines_before = section_name in", "as {as_import}\") for as_import in parsed.as_map[\"straight\"][module] ) else: import_definition.append((f\"{import_type} {module}\",", "def _normalize_empty_lines(lines: List[str]) -> List[str]: while lines and lines[-1].strip() ==", "bool: return line.startswith(\"#\") if line else False for line, prev_line", "and config.combine_star): if not config.no_inline_sort: for as_import in as_imports: if", "None) ) if comment: single_import_line += ( f\"{comments and ';'", "{combined_inline_comments}\" ) else: output.append(f\"{import_type} {combined_straight_imports}\") return output for module in", "config, straight_modules, section, remove_imports, import_type ) from_imports = _with_from_imports( parsed,", "while from_imports: from_import = from_imports.pop(0) single_import_line = with_comments( comments, import_start", ") parsed.imports[\"no_sections\"][\"from\"].update(parsed.imports[section].get(\"from\", {})) sections = base_sections + (\"no_sections\",) output: List[str]", "output.append( wrap.line( with_comments( from_comments, import_start + as_import, removed=config.ignore_comments, comment_prefix=config.comment_prefix, ),", "use_comments = [] else: use_comments = comments comments = None", "as_import in parsed.as_map[\"straight\"][module] ) else: import_definition.append((f\"{import_type} {module}\", module)) comments_above =", "[] for as_import in as_imports[from_import]: specific_comment = ( parsed.categorized_comments[\"nested\"] .get(module,", "in from_imports if f\"{module}.{line}\" not in remove_imports ] sub_modules =", "import_definition.extend( (f\"{import_type} {module} as {as_import}\", f\"{module} as {as_import}\") for as_import", "for as_import in as_imports[from_import]: specific_comment = ( parsed.categorized_comments[\"nested\"] .get(module, {})", "_with_straight_imports( parsed: parse.ParsedContent, config: Config, straight_modules: Iterable[str], section: str, remove_imports:", "from_comments.append(specific_comment) output.append( wrap.line( with_comments( from_comments, import_start + from_import, removed=config.ignore_comments, comment_prefix=config.comment_prefix,", "( parsed.categorized_comments[\"nested\"] .get(module, {}) .pop(from_import, None) ) if specific_comment: from_comments.append(specific_comment)", "config ), removed=config.ignore_comments, comment_prefix=config.comment_prefix, ) for as_import in sorting.sort(config, as_imports[from_import])", "section, remove_imports, import_type ) lines_between = [\"\"] * ( config.lines_between_types", "for as_import in as_imports[from_import] ) else: output.append(wrap.line(single_import_line, parsed.line_separator, config)) comments", "+ 1].strip() ): continue next_construct = line break if in_quote:", "and parsed.imports[section][\"from\"][module][from_import] ) ): from_import_section.append(from_imports.pop(0)) if config.combine_as_imports: comments = (comments", "else: while from_imports and from_imports[0] in as_imports: from_import = from_imports.pop(0)", "for character in formatted_output[imports_tail : imports_tail + 1] ] ==", "from_imports and config.combine_star: import_statement = wrap.line( with_comments( _with_star_comments(parsed, module, list(comments", "for as_import in parsed.as_map[\"straight\"][module] ) else: import_definition.append((f\"{import_type} {module}\", module)) comments_above", "str: return line_separator.join(_normalize_empty_lines(lines)) def _normalize_empty_lines(lines: List[str]) -> List[str]: while lines", "for line in new_section_output: comments = getattr(line, \"comments\", ()) if", "while from_imports: if above_comments: output.extend(above_comments) above_comments = None if \"*\"", "= _with_straight_imports( parsed, config, straight_modules, section, remove_imports, import_type ) from_imports", "parsed.place_imports[section_name] = section_output continue section_title = config.import_headings.get(section_name.lower(), \"\") if section_title", "not in parsed.lines_without_imports[0:1]: # pragma: no branch section_output.insert(0, section_comment) if", "module)) comments_above = parsed.categorized_comments[\"above\"][\"straight\"].pop(module, None) if comments_above: output.extend(comments_above) output.extend( with_comments(", "comment_prefix=config.comment_prefix, ) for idef, imodule in import_definition ) return output", "import_statement = \"\" while from_imports: from_import = from_imports.pop(0) single_import_line =", "removed=config.ignore_comments, comment_prefix=config.comment_prefix, ), parsed.line_separator, config, ) from_imports = [ from_import", "for module in straight_modules: if module in parsed.categorized_comments[\"above\"][\"straight\"]: above_comments.extend(parsed.categorized_comments[\"above\"][\"straight\"].pop(module)) if", "with_comments( comments, import_start + from_import, removed=config.ignore_comments, comment_prefix=config.comment_prefix, ) comment =", "import_statement: output.append(import_statement) return output def _with_straight_imports( parsed: parse.ParsedContent, config: Config,", "line_separator.join(_normalize_empty_lines(lines)) def _normalize_empty_lines(lines: List[str]) -> List[str]: while lines and lines[-1].strip()", "+ 1) : (idx + 1)] = as_imports.pop(from_import) else: from_imports[idx", "new_out_lines.append(\"\") formatted_output = new_out_lines return _output_as_string(formatted_output, parsed.line_separator) def _with_from_imports( parsed:", "parsed.imports[section].get(\"straight\", {}) ) parsed.imports[\"no_sections\"][\"from\"].update(parsed.imports[section].get(\"from\", {})) sections = base_sections + (\"no_sections\",)", "or not no_lines_before if config.ensure_newline_before_comments: output = _ensure_newline_before_comment(output) while output", "1)] = as_imports.pop(from_import) else: from_imports[idx : (idx + 1)] =", "or [] ) if ( parsed.imports[section][\"from\"][module][from_import] and not only_show_as_imports ):", "comments: List[str] def __new__( cls: Type[\"_LineWithComments\"], value: Any, comments: List[str]", "output.append( wrap.line( with_comments( from_comments, import_start + from_import, removed=config.ignore_comments, comment_prefix=config.comment_prefix, ),", "continue import_start = f\"from {module} {import_type} \" from_imports = list(parsed.imports[section][\"from\"][module])", "config, straight_modules, key=lambda key: sorting.module_key( key, config, section_name=section, straight_import=True ),", "output def _output_as_string(lines: List[str], line_separator: str) -> str: return line_separator.join(_normalize_empty_lines(lines))", "from_imports = _with_from_imports( parsed, config, from_modules, section, remove_imports, import_type )", "_with_from_imports( parsed: parse.ParsedContent, config: Config, from_modules: Iterable[str], section: str, remove_imports:", "section_name in config.no_lines_before if section_output: if section_name in parsed.place_imports: parsed.place_imports[section_name]", "not config.no_inline_sort: for as_import in as_imports: if not config.only_sections: as_imports[as_import]", "the first import) sorted alphabetically and split between groups \"\"\"", "if config.star_first: star_modules = [] other_modules = [] for module", "config.line_length ): import_statement = other_import_statement if not do_multiline_reformat and len(import_statement)", "config.comment_prefix} \" f\"{comment}\" ) output.append(wrap.line(single_import_line, parsed.line_separator, config)) from_import_section = []", "as_import in as_imports: if not config.only_sections: as_imports[as_import] = sorting.sort(config, as_imports[as_import])", "( config.lines_between_types if from_modules and straight_modules else 0 ) if", "functools import partial from typing import Any, Iterable, List, Optional,", "output: List[str] = [] for module in from_modules: if module", "\"py\", import_type: str = \"import\", ) -> str: \"\"\"Adds the", "module in straight_modules: if module in parsed.categorized_comments[\"above\"][\"straight\"]: above_comments.extend(parsed.categorized_comments[\"above\"][\"straight\"].pop(module)) if module", "config.no_lines_before if section_output: if section_name in parsed.place_imports: parsed.place_imports[section_name] = section_output", "= False comments = parsed.categorized_comments[\"from\"].pop(module, ()) above_comments = parsed.categorized_comments[\"above\"][\"from\"].pop(module, None)", "else: import_definition.append((f\"{import_type} {module}\", module)) comments_above = parsed.categorized_comments[\"above\"][\"straight\"].pop(module, None) if comments_above:", "and next_construct.startswith(STATEMENT_DECLARATIONS): formatted_output[imports_tail:0] = [\"\", \"\"] else: formatted_output[imports_tail:0] = [\"\"]", "is_comment(line: Optional[str]) -> bool: return line.startswith(\"#\") if line else False", "back to the file. (at the index of the first", "comment_prefix=config.comment_prefix, ), parsed.line_separator, config, ) from_imports = [ from_import for", "\"*\" in from_imports and config.combine_star: import_statement = wrap.line( with_comments( _with_star_comments(parsed,", "= section_output continue section_title = config.import_headings.get(section_name.lower(), \"\") if section_title and", "None) if comments_above: output.extend(comments_above) output.extend( with_comments( parsed.categorized_comments[\"straight\"].get(imodule), idef, removed=config.ignore_comments, comment_prefix=config.comment_prefix,", ") from_comments = [] if \"*\" in from_imports: output.append( with_comments(", "None) ) if specific_comment: from_comments.append(specific_comment) output.append( wrap.line( with_comments( from_comments, import_start", "{module}\", module)) import_definition.extend( (f\"{import_type} {module} as {as_import}\", f\"{module} as {as_import}\")", ") else: output.append(f\"{import_type} {combined_straight_imports}\") return output for module in straight_modules:", "from_imports if f\"{module}.{line}\" not in remove_imports ] sub_modules = [f\"{module}.{from_import}\"", "imports, 'as' imports not included if config.combine_straight_imports and not as_imports:", "= [] while from_imports and ( from_imports[0] not in as_imports", "True elif config.force_single_line and module not in config.single_line_exclusions: import_statement =", "Set[str] = set() pending_lines_before = False for section in sections:", "str, remove_imports: List[str], import_type: str, ) -> List[str]: output: List[str]", "from_import in from_imports] as_imports = { from_import: [ f\"{from_import} as", "= 0 if parsed.import_index < parsed.original_line_count: output_at = parsed.import_index formatted_output[output_at:0]", "section in sections: straight_modules = parsed.imports[section][\"straight\"] if not config.only_sections: straight_modules", "for line in from_imports if f\"{module}.{line}\" not in remove_imports ]", ".pop(as_import, None) ) if specific_comment: from_comments.append(specific_comment) output.append( wrap.line( with_comments( from_comments,", "instance def _ensure_newline_before_comment(output: List[str]) -> List[str]: new_output: List[str] = []", "seen_headings: Set[str] = set() pending_lines_before = False for section in", "and straight_modules else 0 ) if config.from_first: section_output = from_imports", "in from_modules: if module in remove_imports: continue import_start = f\"from", ".settings import DEFAULT_CONFIG, Config def sorted_imports( parsed: parse.ParsedContent, config: Config", "alphabetically and split between groups \"\"\" if parsed.import_index == -1:", "= [] seen_headings: Set[str] = set() pending_lines_before = False for", "output.extend( with_comments( parsed.categorized_comments[\"straight\"].get(imodule), idef, removed=config.ignore_comments, comment_prefix=config.comment_prefix, ) for idef, imodule", "( parsed.categorized_comments[\"nested\"].get(module, {}).pop(from_import, None) ) if comment: single_import_line += (", "as_imports[as_import] = sorting.sort(config, as_imports[as_import]) for from_import in copy.copy(from_imports): if from_import", "import_start + as_import, removed=config.ignore_comments, comment_prefix=config.comment_prefix, ), parsed.line_separator, config, ) )", "or not no_lines_before: output += [\"\"] * config.lines_between_sections output +=", "List[str] = [] for module in straight_modules: if module in", "only_show_as_imports ): specific_comment = ( parsed.categorized_comments[\"nested\"] .get(module, {}) .pop(from_import, None)", "wrap.line( with_comments( from_comments, import_start + from_import, removed=config.ignore_comments, comment_prefix=config.comment_prefix, ), parsed.line_separator,", "None single_import_line = with_comments( use_comments, import_start + from_import, removed=config.ignore_comments, comment_prefix=config.comment_prefix,", "List[str] = parsed.lines_without_imports.copy() remove_imports = [format_simplified(removal) for removal in config.remove_imports]", "config: Config, from_modules: Iterable[str], section: str, remove_imports: List[str], import_type: str,", "import_type ) from_imports = _with_from_imports( parsed, config, from_modules, section, remove_imports,", "\"\"\" if parsed.import_index == -1: return _output_as_string(parsed.lines_without_imports, parsed.line_separator) formatted_output: List[str]", "config.force_grid_wrap if force_grid_wrap and len(from_import_section) >= force_grid_wrap: do_multiline_reformat = True", "if pending_lines_before or not no_lines_before: output += [\"\"] * config.lines_between_sections", "straight_modules: return [] above_comments: List[str] = [] inline_comments: List[str] =", "while from_imports and ( from_imports[0] not in as_imports or (", "section_output = [] for line in new_section_output: comments = getattr(line,", ": imports_tail + 1] ] == [\"\"]: formatted_output.pop(imports_tail) if len(formatted_output)", "if module in parsed.as_map[\"straight\"]: if parsed.imports[section][\"straight\"][module]: import_definition.append((f\"{import_type} {module}\", module)) import_definition.extend(", "import_definition ) return output def _output_as_string(lines: List[str], line_separator: str) ->", "formatted_output = new_out_lines return _output_as_string(formatted_output, parsed.line_separator) def _with_from_imports( parsed: parse.ParsedContent,", "imports_tail = output_at + len(output) while [ character.strip() for character", "and not is_comment(prev_line): new_output.append(\"\") new_output.append(line) return new_output def _with_star_comments(parsed: parse.ParsedContent,", "and section_title not in seen_headings: if config.dedup_headings: seen_headings.add(section_title) section_comment =", "formatted_output[imports_tail:0] = [\"\"] if parsed.place_imports: new_out_lines = [] for index,", "(config.force_single_line and module not in config.single_line_exclusions) ) and not config.only_sections:", "line break if in_quote: # pragma: no branch next_construct =" ]
[ "in self._data]) with io.open(fname, 'w', newline='', encoding='utf-8') as of: #", "kw = dict( type=TEXT, file_name=TEXT, path=TEXT, mtime=DATETIME, ctime=DATETIME, size=INT )", "= \"Read tags for %d paths out of %d entries.\"", "MediaData._fields: self._data[key].append(None) for tag in self.tags: self._tag_data[tag.name].append(tag.default) index = self._relpath2index[relpath]", "file does not have a 'path' column.\" return False, msg", "your path column matches \" \"the media paths.\") return False,", "[datetime_to_long(x) for x in data['ctime_']] self._data = data self._tag_data =", "&= x <= term.end return result def _search_media(expr, m_key, get_tag):", "import Directory from . import processor logger = logging.getLogger(__name__) if", "query.Not): subquery = list(expr.children())[0] return not _search_media(subquery, m_key, get_tag) else:", "but only the tags, extensions and the other settings of", "keymap['_ctime'] = 'ctime_' keymap['_mtime'] = 'mtime_' for index, (key, m)", "pass msg = \"Read tags for %d paths out of", "isinstance(expr, string_types): return expr in value.lower() else: return expr ==", "set the number of files. It simply cleans up the", "Unknown tags are not added. Parameters ---------- fname : str", "sniffer.has_header(sample) dialect = sniffer.sniff(sample) with io.open(fname, 'r', newline='', encoding='utf-8') as", "index = self._relpath2index[relpath] for i, key in enumerate(MediaData._fields): self._data[key][index] =", "is saved. save_file = Str last_save_time = Str _data =", "0: fname = sanitize_name(self.name) + '.vxn' d = get_project_dir() return", "copy to specified path. \"\"\" fp = open_file(fp, 'wb') tags", "of CRUD interface to the data #### def clean(self): \"\"\"Scan", "= _data[key][last] for key in self._tag_data: _tag_data[key][index] = _tag_data[key][last] last_relpath", "= [(key, self.get(key).to_dict()) for key in self._relpath2index] tags = [(t.name,", "not None: result &= x >= term.start if term.enddate is", "for tag in new.changed: self._tag_data[tag][index] = obj.tags[tag] def _read_version1_media(self, media):", "tag_types) for key, index in self._relpath2index.items(): if _search_media(parsed_q, index, self._get_media_attr):", "+ tags self.update_tags(tags) def update_tags(self, new_tags): old_tags = self.tags new_tag_names", "if 'file_name' not in m: data['file_name'].append(basename(key)) data['mtime_'] = [datetime_to_long(x) for", "processors=processors ) json_tricks.dump(data, fp, compression=True) fp.close() logger.info('Saved project: %s', self.name)", "sure you \" \"save the project.\") return True, msg def", "if expr.fieldname == 'ctime': value = get_tag(m_key, 'ctime_') elif expr.fieldname", "if not has_header: return False, \"The CSV file does not", "relpath2index[x]) for x in relpaths] for relpath, index in sorted(indices,", "os from os.path import (abspath, basename, dirname, exists, expanduser, join,", "return _check_range(get_tag(m_key, attr), expr) else: print(\"Unsupported term: %r\" % expr)", "ValueError: pass msg = \"Read tags for %d paths out", "= dict( string=TEXT, text=TEXT, int=INT, float=FLOAT, bool=BOOLEAN ) for tag", "== 'ctime': value = get_tag(m_key, 'ctime_') elif expr.fieldname == 'mtime':", "open(fname_or_file, mode) def sanitize_name(name): name = name.lower() name = re.sub(r'\\s+',", "'mtime': value = get_tag(m_key, 'mtime_') return _check_date_range(value, expr) elif isinstance(expr,", "x < term.end else: result &= x <= term.end return", "newline='', encoding='utf-8') as fp: reader = csv.reader(fp, dialect) next(reader) #", "for x in self.processors] for k, m in media: m['_ctime']", "= datetime.datetime.fromtimestamp(os.stat(path).st_ctime) return dt.ctime() def _get_sample(fname): sample = '' with", "obj, tname, old, new): index = self._relpath2index[obj.relpath] for tag in", "media matches expression. \"\"\" if expr.is_leaf(): if isinstance(expr, query.Term): attr", "%s', fname) has_header, header, dialect = _get_csv_headers(fname) if not has_header:", "\"\"\" self._setup_root() def _scan(dir): for f in dir.files: if not", "_get_sample(fname): sample = '' with io.open(fname, 'r', newline='', encoding='utf-8') as", "relpath, splitext) import re import shutil import sys from traits.api", "error message if any. Note that this only applies tags", "= [datetime_to_long(x) for x in data['ctime_']] self._data = data self._tag_data", "success status and the error message if any. Note that", "get_tag) else: print(\"Unsupported term: %r\" % expr) return False class", "Media) extensions = List(Str) processors = List(processor.FactoryBase) number_of_files = Long", "header for record in reader: total += 1 path =", "tags, extensions and the other settings of the project. This", "in p.processors: proc._done.clear() return p # #### CRUD interface to", "HasTraits, Instance, List, Long, Str) from whoosh import fields, qparser,", "_tag_data = Dict _relpath2index = Dict() _query_parser = Instance(qparser.QueryParser) def", "self._media.values(): for tag in removed: del m.tags[tag.name] for tag in", "directory. This will not clobber existing records but will add", "we still read the old saved files. \"\"\" def _rewrite_dir(state):", "TagInfo(HasTraits): name = Str type = Enum(\"string\", \"text\", \"int\", \"float\",", "only applies tags for column headers with known tags. Unknown", "some media, return a Media instance. \"\"\" if relpath in", "not appear to have a header.\" if 'path' not in", "compression=True) fp.close() logger.info('Saved project: %s', self.name) def scan(self, refresh=False): \"\"\"Find", "in self._relpath2index def keys(self): \"\"\"Return all the keys for the", "x in relpaths] for relpath, index in sorted(indices, reverse=True): last", "m_key, get_tag) if result: break return result elif isinstance(expr, query.Not):", "term.text = type_map[fieldtype](term.text) else: term.text = term.text.lower() elif isinstance(term, query.Phrase):", "[processor.dump(x) for x in self.processors] for k, m in media:", "return state fp = open_file(fp, 'wb') media = [(key, self.get(key).to_dict())", "all the useful metadata. Parameters ----------- fname: str: a path", "v in m.items(): data[keymap[k]].append(v) if 'file_name' not in m: data['file_name'].append(basename(key))", "if fp is None: if not exists(self.save_file): return fp =", "in keymap: keymap[k] = k keymap['_ctime'] = 'ctime_' keymap['_mtime'] =", "subquery = list(expr.children())[0] return not _search_media(subquery, m_key, get_tag) else: print(\"Unsupported", "expr.fieldname return _check_value(get_tag(m_key, attr), expr.text) elif isinstance(expr, query.Phrase): attr =", "term.text.lower() elif isinstance(term, query.Phrase): term.words = [x.lower() for x in", "data self._tag_data = tag_data self._relpath2index = relpath2index def _delete_record(self, index,", "i in range(len(self._relpath2index)): line = [] for col in cols:", "'path' not in header: msg = \"The CSV file does", "\"the media paths.\") return False, msg else: msg += (\"\\nPlease", "k keymap['_ctime'] = 'ctime_' keymap['_mtime'] = 'mtime_' for index, (key,", "None: if term.startexcl: result &= x > term.start else: result", "lambda x: x.lower() in TRUE, 'string': lambda x: x, 'text':", "else: data = {} index = self._relpath2index[relpath] for key in", "qparser, query from whoosh.util.times import datetime_to_long, long_to_datetime from .common import", "== 'mtime': value = get_tag(m_key, 'mtime_') return _check_date_range(value, expr) elif", "media): data = self.__data_default() tag_data = self.__tag_data_default() relpath2index = {}", "self.extensions def _get_tag_types(self): result = dict(COMMON_TAGS) result.update(dict((t.name, t.type) for t", "get_tag(m_key, 'mtime_') return _check_date_range(value, expr) elif isinstance(expr, query.NumericRange): attr =", "removed = [] added = [] for tag in new_tags:", "#### End of CRUD interface to the data #### def", "keymap[k] = k keymap['_ctime'] = 'ctime_' keymap['_mtime'] = 'mtime_' for", "sample = _get_sample(fname) sniffer = csv.Sniffer() has_header = sniffer.has_header(sample) dialect", "def import_csv(self, fname): \"\"\"Read tag information from given CSV filename.", "of this project. This does not copy the data but", "data given the media data and tags. Parameters ---------- f:", "not None: if term.startexcl: result &= x > term.start else:", "line.append(elem) writer.writerow(line) def import_csv(self, fname): \"\"\"Read tag information from given", "in MediaData._fields: data[key] = [] return data def __tag_data_default(self): tags", "map[self.type] def open_file(fname_or_file, mode='rb'): if hasattr(fname_or_file, 'read'): return fname_or_file else:", "get_non_existing_filename(join(d, fname)) else: return '' def _update_last_save_time(self): self.last_save_time = get_file_saved_time(self.save_file)", "List(Str) processors = List(processor.FactoryBase) number_of_files = Long # Path where", "%d entries.\" % (count, total) if count == 0 and", "self.__data_default() tag_data = self.__tag_data_default() relpath2index = {} keymap = dict.fromkeys(MediaData._fields)", "Instance, List, Long, Str) from whoosh import fields, qparser, query", "path. \"\"\" fp = open_file(fp, 'wb') tags = [(t.name, t.type)", "= [(t.name, t.type) for t in self.tags] root = _rewrite_dir(self.root.__getstate__())", "relpath2index def _delete_record(self, index, relpath): for key in MediaData._fields: del", "[] for tag in new_tags: if tag.name not in tag_info:", "files that no longer exist. \"\"\" logger.info('Cleaning project: %s', self.name)", "# Path where the project data is saved. save_file =", "tag in self.tags: kw[tag.name] = type_to_field[tag.type] return Schema(**kw) def _make_query_parser(self):", "name=self.name, description=self.description, tags=tags, media_data=self._data, tag_data=self._tag_data, relpath2index=self._relpath2index, root=root, processors=processors ) json_tricks.dump(data,", "data is available. \"\"\" return relpath in self._relpath2index def keys(self):", "for tag, header_index in tags.items(): data = record[header_index] try: value", "the db of files that no longer exist. \"\"\" logger.info('Cleaning", "0 and total > 0: msg += (\"\\nPlease check that", "= self._media.get(rpath) if index is not None: count += 1", "entries. This is useful when you remove or rename files.", "if attr in self._data: return self._data[attr][index] elif attr in self._tag_data:", "number_of_files = Long # Path where the project data is", "count == 0 and total > 0: msg += (\"\\nPlease", "path of some media, remove them from the database. \"\"\"", "available. \"\"\" return relpath in self._relpath2index def keys(self): \"\"\"Return all", "root = Directory() root.__setstate__(data.get('root')) self.extensions = root.extensions self.root = root", "current media info to a file object \"\"\" if len(self.save_file)", "new_tag_names: removed.append(tag) for tag in removed: del self._tag_data[tag.name] n_entries =", "(count, total) if count == 0 and total > 0:", "expr.children(): result |= _search_media(child, m_key, get_tag) if result: break return", "logger = logging.getLogger(__name__) if sys.version_info[0] > 2: unicode = str", "out of %d entries.\" % (count, total) if count ==", "fp.close() logger.info('Saved project: %s', self.name) def scan(self, refresh=False): \"\"\"Find all", "in self._relpath2index.items(): if _search_media(parsed_q, index, self._get_media_attr): yield basename(key), key def", "of the project. This will not copy any of the", "metadata to a csv file. If `cols` are not specified,", "tags def _media_tag_handler(self, obj, tname, old, new): index = self._relpath2index[obj.relpath]", "_tag_data causing an error. So we only # set self.tags", "fp = open_file(fp, 'wb') tags = [(t.name, t.type) for t", "media.tags[tag.name] = value else: self._tag_data[tag.name][index] = value except ValueError: pass", "paths.\"\"\" return self._relpath2index.keys() def _get_media_attr(self, index, attr): \"\"\"Given an index", "_check_range(x, term): result = True if term.start is not None:", "False else: if isinstance(expr, query.And): result = True for child", "= csv.writer(of) writer.writerow(cols) for i in range(len(self._relpath2index)): line = []", "splitext) import re import shutil import sys from traits.api import", "in media: m['_ctime'] = long_to_datetime(m['_ctime']) m['_mtime'] = long_to_datetime(m['_mtime']) data =", "m in self._media.values(): for tag in removed: del m.tags[tag.name] for", "= data['relpath2index'] root = Directory() root.__setstate__(data.get('root')) self.extensions = root.extensions self.root", "description=self.description, tags=tags, media=media, root=root, processors=processors ) json_tricks.dump(data, fp, compression=True) fp.close()", "term: %r\" % expr) return False else: if isinstance(expr, query.And):", "else: fp = open_file(fp, 'rb') data = json_tricks.load( fp, preserve_order=False,", "return '' def _update_last_save_time(self): self.last_save_time = get_file_saved_time(self.save_file) def _last_save_time_default(self): if", "self.processors] data = dict( version=2, path=self.path, name=self.name, description=self.description, tags=tags, media_data=self._data,", "def _read_version1_media(self, media): data = self.__data_default() tag_data = self.__tag_data_default() relpath2index", "media = Media.from_data(MediaData(**data), tags) media.on_trait_change(self._media_tag_handler, 'tags_items') self._media[relpath] = media return", "% q) return tag_types = self._get_tag_types() _cleanup_query(parsed_q, tag_types) for key,", "import (Any, Dict, Enum, HasTraits, Instance, List, Long, Str) from", "= open_file(fp, 'rb') data = json_tricks.load( fp, preserve_order=False, ignore_comments=False )", "list(self.tags) + tags self.update_tags(tags) def update_tags(self, new_tags): old_tags = self.tags", "return False, msg tags = {x: header.index(x.name) for x in", "applies tags for column headers with known tags. Unknown tags", "= (basestring,) import backports.csv as csv INT = fields.NUMERIC(numtype=int) FLOAT", "m.items(): data[keymap[k]].append(v) if 'file_name' not in m: data['file_name'].append(basename(key)) data['mtime_'] =", "in new_tag_names: removed.append(tag) for tag in removed: del self._tag_data[tag.name] n_entries", "= [tag.default]*n_entries # The above can be the first time", "tname, v in tags.items(): tag_data[tname].append(v) for k, v in m.items():", "to_remove = [] relpath2index = self._relpath2index for rpath in list(relpath2index.keys()):", "remove any dead entries. This is useful when you remove", "exists for testing and making sure we still read the", "but will add any new ones. \"\"\" self._setup_root() def _scan(dir):", "[] return data def __tag_data_default(self): tags = {} for key", "def _get_sample(fname): sample = '' with io.open(fname, 'r', newline='', encoding='utf-8')", "fname) has_header, header, dialect = _get_csv_headers(fname) if not has_header: return", "ignore_comments=False ) fp.close() self.name = data.get('name', '') self.description = data.get('description',", "def keys(self): \"\"\"Return all the keys for the media relative", "and total > 0: msg += (\"\\nPlease check that your", "= obj.tags[tag] def _read_version1_media(self, media): data = self.__data_default() tag_data =", "Directory() root.__setstate__(data.get('root')) self.extensions = root.extensions self.root = root self.number_of_files =", "def copy(self): \"\"\"Make a copy of this project. This does", "expr.fieldname text = \" \".join(expr.words) return _check_value(get_tag(m_key, attr), text) elif", "return sample def _get_csv_headers(fname): sample = _get_sample(fname) sniffer = csv.Sniffer()", "added = [] for tag in new_tags: if tag.name not", "data.get('name', '') self.description = data.get('description', '') self.path = data.get('path') self.tags", "self._tag_data[tag.name].append(tag.default) index = self._relpath2index[relpath] for i, key in enumerate(MediaData._fields): self._data[key][index]", "tags.items(): tag_data[tname].append(v) for k, v in m.items(): data[keymap[k]].append(v) if 'file_name'", "the database. \"\"\" relpath2index = self._relpath2index indices = [(x, relpath2index[x])", "import sys from traits.api import (Any, Dict, Enum, HasTraits, Instance,", "'.vxn' d = get_project_dir() return get_non_existing_filename(join(d, fname)) else: return ''", "ext = splitext(basename(fname)) return join(dirname(fname), base + '_a' + ext)", "False for child in expr.children(): result |= _search_media(child, m_key, get_tag)", "path_idx = header.index('path') TRUE = ('1', 't', 'true', 'y', 'yes')", "= dirname(old_save_file) new_save_file = join(old_dir, sanitize_name(name) + '.vxn') if new_save_file", "isinstance(term, query.Term): if isinstance(term.text, (str, unicode, bytes)): fieldtype = tag_types[term.fieldname]", "f in dir.files: if not self.has_media(f.relpath) or refresh: data =", "for column headers with known tags. Unknown tags are not", "in header: msg = \"The CSV file does not have", "(\"\\nPlease check that your path column matches \" \"the media", "return [TagInfo(name='completed', type='bool')] def _save_file_default(self): if len(self.name) > 0: fname", "tags=tags, media=media, root=root, processors=processors ) json_tricks.dump(data, fp, compression=True) fp.close() logger.info('Saved", "cols: if col in data_cols: elem = self._data[col][i] else: elem", "total += 1 path = record[path_idx] rpath = relpath(path, self.path)", "\"bool\": False} return map[self.type] def open_file(fname_or_file, mode='rb'): if hasattr(fname_or_file, 'read'):", "+ '_a' + ext) else: return fname COMMON_TAGS = dict(", "info to a file object \"\"\" if len(self.save_file) > 0:", "(key, m) in enumerate(media): relpath2index[key] = index tags = m.pop('tags')", "in tags.items(): tag_data[tname].append(v) for k, v in m.items(): data[keymap[k]].append(v) if", "traits, copy='deep') # Clear out the _done information from the", "recursively inside the root directory. This will not clobber existing", "del self._tag_data[key][index] if relpath in self._media: del self._media[relpath] del self._relpath2index[relpath]", "so if self.tags is set then the # removed tags", "root is None or realpath(root.path) != realpath(path): self.root = Directory(path=path,", "= name.lower() name = re.sub(r'\\s+', '_', name) return re.sub(r'\\W+', '',", "the cached media for m in self._media.values(): for tag in", "break return result elif isinstance(expr, query.Not): subquery = list(expr.children())[0] return", "_cleanup_query(parsed_q, tag_types) for key, index in self._relpath2index.items(): if _search_media(parsed_q, index,", "m_key, get_tag): \"\"\"Given search expression, index to media, and a", "= list(expr.children())[0] return not _search_media(subquery, m_key, get_tag) else: print(\"Unsupported term:", "dir.files: if not self.has_media(f.relpath) or refresh: data = get_media_data(f.path, f.relpath)", "version=1, path=self.path, name=self.name, description=self.description, tags=tags, media=media, root=root, processors=processors ) json_tricks.dump(data,", "not self.has_media(f.relpath) or refresh: data = get_media_data(f.path, f.relpath) self.update(data) for", "keymap = dict.fromkeys(MediaData._fields) for k in keymap: keymap[k] = k", "bool=BOOLEAN ) for tag in self.tags: kw[tag.name] = type_to_field[tag.type] return", "for key in MediaData._fields: _data[key][index] = _data[key][last] for key in", "existing records but will add any new ones. \"\"\" self._setup_root()", "self.number_of_files = len(self._relpath2index) def search(self, q): \"\"\"A generator which yields", "below. self.tags = new_tags # Update the cached media for", "_search_media(expr, m_key, get_tag): \"\"\"Given search expression, index to media, and", "= {} for key in MediaData._fields: data[key] = [] return", "import datetime import io import json_tricks import logging import os", "Dict(Str, Media) extensions = List(Str) processors = List(processor.FactoryBase) number_of_files =", "= self._data[key][index] tags = {} for key in self._tag_data: tags[key]", "Instance(Directory) tags = List(TagInfo) _media = Dict(Str, Media) extensions =", "is set then the # removed tags will not exist", "= new_tags # Update the cached media for m in", "all the keys for the media relative paths.\"\"\" return self._relpath2index.keys()", "result &= x <= term.end return result def _search_media(expr, m_key,", "= get_tag(m_key, 'mtime_') return _check_date_range(value, expr) elif isinstance(expr, query.NumericRange): attr", "clobber existing records but will add any new ones. \"\"\"", "fname) all_keys = ((set(MediaData._fields) | set(self._tag_data.keys())) - set(('ctime_', 'mtime_'))) if", "making sure we still read the old saved files. \"\"\"", "|= _search_media(child, m_key, get_tag) if result: break return result elif", "will not exist in _tag_data causing an error. So we", "Schema kw = dict( type=TEXT, file_name=TEXT, path=TEXT, mtime=DATETIME, ctime=DATETIME, size=INT", "True, msg def load(self, fp=None): \"\"\"Load media info from opened", "= Str _data = Dict _tag_data = Dict _relpath2index =", "_get_media_attr(self, index, attr): \"\"\"Given an index to the media, return", "to write. \"\"\" logger.info('Exporting CSV: %s', fname) all_keys = ((set(MediaData._fields)", "the success status and the error message if any. Note", "elif isinstance(expr, query.Not): subquery = list(expr.children())[0] return not _search_media(subquery, m_key,", "= list(self.tags) + tags self.update_tags(tags) def update_tags(self, new_tags): old_tags =", "Str _data = Dict _tag_data = Dict _relpath2index = Dict()", "db of files that no longer exist. \"\"\" logger.info('Cleaning project:", "root.extensions self.root = root self.number_of_files = len(self._relpath2index) def save(self): \"\"\"Save", "print(\"Unsupported term: %r\" % expr) return False else: if isinstance(expr,", "type_to_field = dict( string=TEXT, text=TEXT, int=INT, float=FLOAT, bool=BOOLEAN ) for", "return its value. \"\"\" if attr in self._data: return self._data[attr][index]", "as fp: sample += fp.readline() + fp.readline() return sample def", "%s\" % q) return tag_types = self._get_tag_types() _cleanup_query(parsed_q, tag_types) for", "vixen.directory.File instance tags: dict \"\"\" relpath = media_data.relpath if not", "keys(self): \"\"\"Return all the keys for the media relative paths.\"\"\"", "with known tags. Unknown tags are not added. Parameters ----------", "import_csv(self, fname): \"\"\"Read tag information from given CSV filename. Returns", "= 0 with io.open(fname, 'r', newline='', encoding='utf-8') as fp: reader", "= {} for key in self.tags: tags[key.name] = [] return", "{} for key in self._tag_data: tags[key] = self._tag_data[key][index] media =", "attr = expr.fieldname return _check_range(get_tag(m_key, attr), expr) else: print(\"Unsupported term:", "project: %s', self.name) self.clean() self.scan(refresh=True) # #### Private protocol ################################################", "True if the media data is available. \"\"\" return relpath", "= abspath(expanduser(self.path)) root = self.root if root is None or", "t in self.tags)) return result def _make_schema(self): from whoosh.fields import", "datetime.datetime.fromtimestamp(os.stat(path).st_ctime) return dt.ctime() def _get_sample(fname): sample = '' with io.open(fname,", "key in MediaData._fields: data[key] = self._data[key][index] tags = {} for", "self.tags)) return result def _make_schema(self): from whoosh.fields import BOOLEAN, DATETIME,", "value = get_tag(m_key, 'ctime_') elif expr.fieldname == 'mtime': value =", "if col in data_cols: elem = self._data[col][i] else: elem =", "self._data _tag_data = self._tag_data for key in MediaData._fields: _data[key][index] =", "ones. \"\"\" self._setup_root() def _scan(dir): for f in dir.files: if", "term.words] def _check_value(value, expr): if isinstance(expr, string_types): return expr in", "= [processor.load(x) for x in data.get('processors', [])] version = data.get('version')", "This does not copy the data but only the tags,", "'true', 'y', 'yes') type_map = { 'bool': lambda x: x.lower()", "# Update the cached media for m in self._media.values(): for", "header_index in tags.items(): data = record[header_index] try: value = type_map[tag.type](data)", "p # #### CRUD interface to the data #### def", "import logging import os from os.path import (abspath, basename, dirname,", "processors = [processor.dump(x) for x in self.processors] data = dict(", "= csv.Sniffer() has_header = sniffer.has_header(sample) dialect = sniffer.sniff(sample) with io.open(fname,", "index = self._relpath2index[obj.relpath] for tag in new.changed: self._tag_data[tag][index] = obj.tags[tag]", "_query_parser = Instance(qparser.QueryParser) def add_tags(self, tags): tags = list(self.tags) +", "elif isinstance(term, query.Phrase): term.words = [x.lower() for x in term.words]", "False, \"The CSV file does not appear to have a", "self._make_query_parser() def copy(self): \"\"\"Make a copy of this project. This", "not None: self.root.extensions = ext def _extensions_items_changed(self): if self.root is", "some media, remove them from the database. \"\"\" relpath2index =", "datetime import io import json_tricks import logging import os from", "Path where the project data is saved. save_file = Str", "new_save_file = join(old_dir, sanitize_name(name) + '.vxn') if new_save_file != old_save_file:", "'') self.description = data.get('description', '') self.path = data.get('path') self.tags =", "if hasattr(fname_or_file, 'read'): return fname_or_file else: return open(fname_or_file, mode) def", "return Schema(**kw) def _make_query_parser(self): schema = self._make_schema() qp = qparser.QueryParser('path',", "copy='deep') # Clear out the _done information from the processors", "for the media relative paths.\"\"\" return self._relpath2index.keys() def _get_media_attr(self, index,", "= len(self._relpath2index) self._relpath2index[relpath] = index for key in MediaData._fields: self._data[key].append(None)", "is not None: count += 1 for tag, header_index in", "def get_non_existing_filename(fname): if exists(fname): base, ext = splitext(basename(fname)) return join(dirname(fname),", "'.vxn') if new_save_file != old_save_file: self.save_file = new_save_file if exists(old_save_file):", "a getter to get the attribute check if the media", "each file satisfying the search query. \"\"\" logger.info('Searching for %s',", "copy to specified path. This mainly exists for testing and", "from .directory import Directory from . import processor logger =", "_tags_default(self): return [TagInfo(name='completed', type='bool')] def _save_file_default(self): if len(self.name) > 0:", "for tag in self.tags: kw[tag.name] = type_to_field[tag.type] return Schema(**kw) def", "= Dict _tag_data = Dict _relpath2index = Dict() _query_parser =", "_get_csv_headers(fname): sample = _get_sample(fname) sniffer = csv.Sniffer() has_header = sniffer.has_header(sample)", "tag in old_tags) removed = [] added = [] for", "len(relpath2index) - 1 if index == last: self._delete_record(last, relpath) else:", "\"\"\"Given the relative path of some media, return a Media", "x >= term.start if term.enddate is not None and result:", "Media.from_data(MediaData(**data), tags) media.on_trait_change(self._media_tag_handler, 'tags_items') self._media[relpath] = media return media def", "has_header, header, dialect = _get_csv_headers(fname) if not has_header: return False,", "cols = all_keys cols = list(sorted(cols)) data_cols = set([x for", "get the attribute check if the media matches expression. \"\"\"", "fields.NUMERIC(numtype=float) def get_file_saved_time(path): dt = datetime.datetime.fromtimestamp(os.stat(path).st_ctime) return dt.ctime() def _get_sample(fname):", "of some media, return a Media instance. \"\"\" if relpath", "Input filename. \"\"\" logger.info('Importing tags from: %s', fname) has_header, header,", "try: value = type_map[tag.type](data) if media is not None: media.tags[tag.name]", "self.last_save_time = get_file_saved_time(self.save_file) def _last_save_time_default(self): if exists(self.save_file): return get_file_saved_time(self.save_file) else:", "if term.end is not None and result: if term.endexcl: result", "def _delete_record(self, index, relpath): for key in MediaData._fields: del self._data[key][index]", "media, return a Media instance. \"\"\" if relpath in self._media:", "return get_file_saved_time(self.save_file) else: return '' def _name_changed(self, name): if len(name)", "\".join(expr.words) return _check_value(get_tag(m_key, attr), text) elif isinstance(expr, query.DateRange): if expr.fieldname", "= all_keys cols = list(sorted(cols)) data_cols = set([x for x", "return media def remove(self, relpaths): \"\"\"Given a list of relative", "in self.tags if x.name in header} path_idx = header.index('path') TRUE", "result elif isinstance(expr, query.Or): result = False for child in", "description=self.description, tags=tags, media_data=self._data, tag_data=self._tag_data, relpath2index=self._relpath2index, root=root, processors=processors ) json_tricks.dump(data, fp,", "text = \" \".join(expr.words) return _check_value(get_tag(m_key, attr), text) elif isinstance(expr,", "re.sub(r'\\s+', '_', name) return re.sub(r'\\W+', '', name) def get_non_existing_filename(fname): if", "name = Str type = Enum(\"string\", \"text\", \"int\", \"float\", \"bool\")", "does not appear to have a header.\" if 'path' not", "name = name.lower() name = re.sub(r'\\s+', '_', name) return re.sub(r'\\W+',", "attr = expr.fieldname text = \" \".join(expr.words) return _check_value(get_tag(m_key, attr),", "if any. Note that this only applies tags for column", "a list of relative path of some media, remove them", "index = self._relpath2index[relpath] for key in MediaData._fields: data[key] = self._data[key][index]", "self._relpath2index def keys(self): \"\"\"Return all the keys for the media", "saved. save_file = Str last_save_time = Str _data = Dict", "= _get_csv_headers(fname) if not has_header: return False, \"The CSV file", "relpath in self._media: return self._media[relpath] else: data = {} index", "__repr__(self): return 'TagInfo(%r, %r)' % (self.name, self.type) def _default_default(self): map", "elem = self._tag_data[col][i] line.append(elem) writer.writerow(line) def import_csv(self, fname): \"\"\"Read tag", "tags = [(t.name, t.type) for t in self.tags] root =", "value.lower() else: return expr == value def _check_range(x, term): result", "# #### CRUD interface to the data #### def update(self,", "logger.info('Exporting CSV: %s', fname) all_keys = ((set(MediaData._fields) | set(self._tag_data.keys())) -", "get_tag) if result: break return result elif isinstance(expr, query.Not): subquery", "size='int', type='string' ) def _cleanup_query(q, tag_types): type_map = dict(float=FLOAT.from_bytes, int=INT.from_bytes)", "open_file(fp, 'wb') tags = [(t.name, t.type) for t in self.tags]", "expr): if isinstance(expr, string_types): return expr in value.lower() else: return", "= len(relpath2index) - 1 if index == last: self._delete_record(last, relpath)", "0.0, \"bool\": False} return map[self.type] def open_file(fname_or_file, mode='rb'): if hasattr(fname_or_file,", "exists(self.save_file): return fp = open_file(self.save_file, 'rb') else: fp = open_file(fp,", "new project for example. In this case, # self.__tag_data_default is", "_check_range(get_tag(m_key, attr), expr) else: print(\"Unsupported term: %r\" % expr) return", "from whoosh.fields import BOOLEAN, DATETIME, TEXT, Schema kw = dict(", "= _data['relpath'][last] self._relpath2index[last_relpath] = index def _save_as_v1(self, fp): \"\"\"Save copy", "data = dict( version=2, path=self.path, name=self.name, description=self.description, tags=tags, media_data=self._data, tag_data=self._tag_data,", "string_types = (basestring,) import backports.csv as csv INT = fields.NUMERIC(numtype=int)", "def _rewrite_dir(state): \"Rewrite directories in the old format.\" state['files'] =", "result elif isinstance(expr, query.Not): subquery = list(expr.children())[0] return not _search_media(subquery,", "data_cols: elem = self._data[col][i] else: elem = self._tag_data[col][i] line.append(elem) writer.writerow(line)", "tags. Unknown tags are not added. Parameters ---------- fname :", "' copy' p = Project(name=name) traits = ['description', 'extensions', 'path',", "else: return '' def _update_last_save_time(self): self.last_save_time = get_file_saved_time(self.save_file) def _last_save_time_default(self):", "= Instance(qparser.QueryParser) def add_tags(self, tags): tags = list(self.tags) + tags", "the project data is saved. save_file = Str last_save_time =", "sys from traits.api import (Any, Dict, Enum, HasTraits, Instance, List,", "media_data.relpath if not self.has_media(relpath): index = len(self._relpath2index) self._relpath2index[relpath] = index", "m: data['file_name'].append(basename(key)) data['mtime_'] = [datetime_to_long(x) for x in data['mtime_']] data['ctime_']", "sample = '' with io.open(fname, 'r', newline='', encoding='utf-8') as fp:", "_rewrite_dir(self.root.__getstate__()) processors = [processor.dump(x) for x in self.processors] for k,", "result: if term.endexcl: result &= x < term.end else: result", "'_', name) return re.sub(r'\\W+', '', name) def get_non_existing_filename(fname): if exists(fname):", "key in MediaData._fields: data[key] = [] return data def __tag_data_default(self):", "_update_last_save_time(self): self.last_save_time = get_file_saved_time(self.save_file) def _last_save_time_default(self): if exists(self.save_file): return get_file_saved_time(self.save_file)", "self.name = data.get('name', '') self.description = data.get('description', '') self.path =", "'' def _update_last_save_time(self): self.last_save_time = get_file_saved_time(self.save_file) def _last_save_time_default(self): if exists(self.save_file):", "_done information from the processors for proc in p.processors: proc._done.clear()", "return dt.ctime() def _get_sample(fname): sample = '' with io.open(fname, 'r',", "enumerate(MediaData._fields): self._data[key][index] = media_data[i] if tags: for key, value in", "self._tag_data: tags[key] = self._tag_data[key][index] media = Media.from_data(MediaData(**data), tags) media.on_trait_change(self._media_tag_handler, 'tags_items')", "def _save_file_default(self): if len(self.name) > 0: fname = sanitize_name(self.name) +", "logging.getLogger(__name__) if sys.version_info[0] > 2: unicode = str string_types =", "break return result elif isinstance(expr, query.Or): result = False for", "the media relative paths.\"\"\" return self._relpath2index.keys() def _get_media_attr(self, index, attr):", "m_key, get_tag) else: print(\"Unsupported term: %r\" % expr) return False", "copy any of the processor states but only their settings.", "_data[key][last] for key in self._tag_data: _tag_data[key][index] = _tag_data[key][last] last_relpath =", "In this case, # self.__tag_data_default is called, so if self.tags", "any new ones. \"\"\" self._setup_root() def _scan(dir): for f in", "columns to write. \"\"\" logger.info('Exporting CSV: %s', fname) all_keys =", "x in data['mtime_']] data['ctime_'] = [datetime_to_long(x) for x in data['ctime_']]", "new_save_file if exists(old_save_file): shutil.move(old_save_file, self.save_file) def _extensions_changed(self, ext): if self.root", "def export_csv(self, fname, cols=None): \"\"\"Export metadata to a csv file.", "v in tags.items(): tag_data[tname].append(v) for k, v in m.items(): data[keymap[k]].append(v)", "file_name='string', path='string', relpath='string', ctime='string', mtime='string', size='int', type='string' ) def _cleanup_query(q,", "when # creating a new project for example. In this", "len(name) > 0: old_save_file = self.save_file old_dir = dirname(old_save_file) new_save_file", "processors = List(processor.FactoryBase) number_of_files = Long # Path where the", "_read_version1_media(self, media): data = self.__data_default() tag_data = self.__tag_data_default() relpath2index =", "self._data[key][index] = media_data[i] if tags: for key, value in tags.items():", "&= x > term.start else: result &= x >= term.start", "import json_tricks import logging import os from os.path import (abspath,", "self.description = data.get('description', '') self.path = data.get('path') self.tags = [TagInfo(name=x[0],", "dict \"\"\" relpath = media_data.relpath if not self.has_media(relpath): index =", "self._data[attr][index] elif attr in self._tag_data: return self._tag_data[attr][index] # #### End", "project.\") return True, msg def load(self, fp=None): \"\"\"Load media info", "q.leaves(): if isinstance(term, query.Term): if isinstance(term.text, (str, unicode, bytes)): fieldtype", "import backports.csv as csv INT = fields.NUMERIC(numtype=int) FLOAT = fields.NUMERIC(numtype=float)", "term.end else: result &= x <= term.end return result def", "relpath): for key in MediaData._fields: del self._data[key][index] for key in", "in relpaths] for relpath, index in sorted(indices, reverse=True): last =", "tag_types[term.fieldname] if fieldtype in type_map: term.text = type_map[fieldtype](term.text) else: term.text", "not in tag_info: added.append(tag) elif tag_info[tag.name] != tag.type: removed.append(tag) added.append(tag)", "for each file satisfying the search query. \"\"\" logger.info('Searching for", "+= fp.readline() + fp.readline() return sample def _get_csv_headers(fname): sample =", "_check_date_range(value, expr) elif isinstance(expr, query.NumericRange): attr = expr.fieldname return _check_range(get_tag(m_key,", "return False, \"The CSV file does not appear to have", "1: self._read_version1_media(data['media']) else: self._data = data['media_data'] self._tag_data = data['tag_data'] self._relpath2index", "or refresh: data = get_media_data(f.path, f.relpath) self.update(data) for d in", "import DateParserPlugin qp.add_plugin(DateParserPlugin()) return qp def __query_parser_default(self): return self._make_query_parser() def", "for x in data.get('processors', [])] version = data.get('version') if version", "result def _check_date_range(x, term): result = True if term.startdate is", "cols = list(sorted(cols)) data_cols = set([x for x in cols", "- 1 if index == last: self._delete_record(last, relpath) else: self._replace_with_last_record(index,", "fname)) else: return '' def _update_last_save_time(self): self.last_save_time = get_file_saved_time(self.save_file) def", "'ctime_') elif expr.fieldname == 'mtime': value = get_tag(m_key, 'mtime_') return", "is available. \"\"\" return relpath in self._relpath2index def keys(self): \"\"\"Return", "self._tag_data[attr][index] # #### End of CRUD interface to the data", "for key in self._tag_data: _tag_data[key][index] = _tag_data[key][last] last_relpath = _data['relpath'][last]", "value. \"\"\" if attr in self._data: return self._data[attr][index] elif attr", "term: %r\" % expr) return False class Project(HasTraits): name =", "self._tag_data[tag.name] n_entries = len(self._relpath2index) for tag in added: self._tag_data[tag.name] =", "the media data is available. \"\"\" return relpath in self._relpath2index", "int, 'float': float } count = 0 total = 0", "\"Rewrite directories in the old format.\" state['files'] = [x[0] for", "in self._tag_data: del self._tag_data[key][index] if relpath in self._media: del self._media[relpath]", "list(relpath2index.keys()): fname = os.path.join(root_path, rpath) if not os.path.exists(fname): to_remove.append(rpath) self.remove(to_remove)", "= 'mtime_' for index, (key, m) in enumerate(media): relpath2index[key] =", "{} index = self._relpath2index[relpath] for key in MediaData._fields: data[key] =", "index == last: self._delete_record(last, relpath) else: self._replace_with_last_record(index, last) self._delete_record(last, relpath)", "elem = self._data[col][i] else: elem = self._tag_data[col][i] line.append(elem) writer.writerow(line) def", "not _search_media(subquery, m_key, get_tag) else: print(\"Unsupported term: %r\" % expr)", "if 'path' not in header: msg = \"The CSV file", "if relpath in self._media: del self._media[relpath] del self._relpath2index[relpath] def _replace_with_last_record(self,", "Str description = Str path = Str root = Instance(Directory)", "search expression, index to media, and a getter to get", "m.pop('tags') for tname, v in tags.items(): tag_data[tname].append(v) for k, v", "tags = {x: header.index(x.name) for x in self.tags if x.name", "= Directory() root.__setstate__(data.get('root')) self.extensions = root.extensions self.root = root self.number_of_files", "import io import json_tricks import logging import os from os.path", "attribute check if the media matches expression. \"\"\" if expr.is_leaf():", "the imported tags and make sure you \" \"save the", "set([x for x in cols if x in self._data]) with", "if not self.has_media(f.relpath) or refresh: data = get_media_data(f.path, f.relpath) self.update(data)", "float } count = 0 total = 0 with io.open(fname,", "if term.startexcl: result &= x > term.start else: result &=", "(str,) import csv else: string_types = (basestring,) import backports.csv as", "isinstance(term, query.Phrase): term.words = [x.lower() for x in term.words] def", "in self.tags] root = _rewrite_dir(self.root.__getstate__()) processors = [processor.dump(x) for x", "last): _data = self._data _tag_data = self._tag_data for key in", "logger.info('Refreshing project: %s', self.name) self.clean() self.scan(refresh=True) # #### Private protocol", "traits.api import (Any, Dict, Enum, HasTraits, Instance, List, Long, Str)", "+= (\"\\nPlease check the imported tags and make sure you", "self.tags: kw[tag.name] = type_to_field[tag.type] return Schema(**kw) def _make_query_parser(self): schema =", "self.path = data.get('path') self.tags = [TagInfo(name=x[0], type=x[1]) for x in", "tag in self.tags: self._tag_data[tag.name].append(tag.default) index = self._relpath2index[relpath] for i, key", "old_tags) removed = [] added = [] for tag in", "'string': lambda x: x, 'text': lambda x: x, 'int': int,", "set(self._tag_data.keys())) - set(('ctime_', 'mtime_'))) if cols is None: cols =", "relpath='string', ctime='string', mtime='string', size='int', type='string' ) def _cleanup_query(q, tag_types): type_map", "for key, index in self._relpath2index.items(): if _search_media(parsed_q, index, self._get_media_attr): yield", "m_key, get_tag) if not result: break return result elif isinstance(expr,", "return open(fname_or_file, mode) def sanitize_name(name): name = name.lower() name =", "long_to_datetime(m['_mtime']) data = dict( version=1, path=self.path, name=self.name, description=self.description, tags=tags, media=media,", "= dict.fromkeys(MediaData._fields) for k in keymap: keymap[k] = k keymap['_ctime']", "will not copy any of the processor states but only", "fp): \"\"\"Save copy to specified path. This mainly exists for", "= record[path_idx] rpath = relpath(path, self.path) index = self._relpath2index.get(rpath, None)", "'TagInfo(%r, %r)' % (self.name, self.type) def _default_default(self): map = {\"string\":", "= logging.getLogger(__name__) if sys.version_info[0] > 2: unicode = str string_types", "in sorted(indices, reverse=True): last = len(relpath2index) - 1 if index", "for tag in added: m.tags[tag.name] = tag.default self._query_parser = self._make_query_parser()", "but only their settings. \"\"\" name = self.name + '", "self.save_file = new_save_file if exists(old_save_file): shutil.move(old_save_file, self.save_file) def _extensions_changed(self, ext):", "time when self._tag_data is accessed, when # creating a new", "of some media, remove them from the database. \"\"\" relpath2index", "self._tag_data: del self._tag_data[key][index] if relpath in self._media: del self._media[relpath] del", "[TagInfo(name=x[0], type=x[1]) for x in data['tags']] self.processors = [processor.load(x) for", "processors = [processor.dump(x) for x in self.processors] for k, m", "settings of the project. This will not copy any of", "logger.info('Searching for %s', q) try: parsed_q = self._query_parser.parse(q) except Exception:", "\"float\", \"bool\") default = Any def __repr__(self): return 'TagInfo(%r, %r)'", "when self._tag_data is accessed, when # creating a new project", "elif isinstance(expr, query.DateRange): if expr.fieldname == 'ctime': value = get_tag(m_key,", "self.processors] for k, m in media: m['_ctime'] = long_to_datetime(m['_ctime']) m['_mtime']", "'tags_items') self._media[relpath] = media return media def remove(self, relpaths): \"\"\"Given", "in self._media.values(): for tag in removed: del m.tags[tag.name] for tag", "False, msg else: msg += (\"\\nPlease check the imported tags", "\"\", \"text\": \"\", \"int\": 0, \"float\": 0.0, \"bool\": False} return", "'r', newline='', encoding='utf-8') as fp: sample += fp.readline() + fp.readline()", "True if term.start is not None: if term.startexcl: result &=", "update_tags(self, new_tags): old_tags = self.tags new_tag_names = set(tag.name for tag", "self._tag_data[tag.name] = [tag.default]*n_entries # The above can be the first", "= self._tag_data for key in MediaData._fields: _data[key][index] = _data[key][last] for", "for key in MediaData._fields: del self._data[key][index] for key in self._tag_data:", "result &= _search_media(child, m_key, get_tag) if not result: break return", "# Write the header. writer = csv.writer(of) writer.writerow(cols) for i", "q) try: parsed_q = self._query_parser.parse(q) except Exception: logger.warn(\"Invalid search expression:", "_delete_record(self, index, relpath): for key in MediaData._fields: del self._data[key][index] for", "t.type) for t in self.tags] root = _rewrite_dir(self.root.__getstate__()) processors =", "False class Project(HasTraits): name = Str description = Str path", "with io.open(fname, 'r', newline='', encoding='utf-8') as fp: sample += fp.readline()", "exists, expanduser, join, realpath, relpath, splitext) import re import shutil", "End of CRUD interface to the data #### def clean(self):", "json_tricks.load( fp, preserve_order=False, ignore_comments=False ) fp.close() self.name = data.get('name', '')", "file does not appear to have a header.\" if 'path'", "[])] version = data.get('version') if version == 1: self._read_version1_media(data['media']) else:", "them from the database. \"\"\" relpath2index = self._relpath2index indices =", "def save_as(self, fp): \"\"\"Save copy to specified path. \"\"\" fp", "= str string_types = (str,) import csv else: string_types =", "'r', newline='', encoding='utf-8') as fp: reader = csv.reader(fp, dialect) next(reader)", "data = self.__data_default() tag_data = self.__tag_data_default() relpath2index = {} keymap", "are not added. Parameters ---------- fname : str Input filename.", "keymap: keymap[k] = k keymap['_ctime'] = 'ctime_' keymap['_mtime'] = 'mtime_'", "for t in self.tags] root = _rewrite_dir(self.root.__getstate__()) processors = [processor.dump(x)", "and tags. Parameters ---------- f: vixen.directory.File instance tags: dict \"\"\"", "media is not None: media.update(media_data, tags) def get(self, relpath): \"\"\"Given", "self.tags below. self.tags = new_tags # Update the cached media", "elif attr in self._tag_data: return self._tag_data[attr][index] # #### End of", "| set(self._tag_data.keys())) - set(('ctime_', 'mtime_'))) if cols is None: cols", "index, self._get_media_attr): yield basename(key), key def refresh(self): logger.info('Refreshing project: %s',", "media_data[i] if tags: for key, value in tags.items(): self._tag_data[key][index] =", "expr == value def _check_range(x, term): result = True if", "= self._relpath2index indices = [(x, relpath2index[x]) for x in relpaths]", "value def _check_range(x, term): result = True if term.start is", "self.tags: tags[key.name] = [] return tags def _media_tag_handler(self, obj, tname,", "in expr.children(): result &= _search_media(child, m_key, get_tag) if not result:", "if isinstance(expr, string_types): return expr in value.lower() else: return expr", "self._media[relpath] = media return media def remove(self, relpaths): \"\"\"Given a", "os.path.join(root_path, rpath) if not os.path.exists(fname): to_remove.append(rpath) self.remove(to_remove) def export_csv(self, fname,", "self.name) def scan(self, refresh=False): \"\"\"Find all the media recursively inside", "def sanitize_name(name): name = name.lower() name = re.sub(r'\\s+', '_', name)", "'wb') media = [(key, self.get(key).to_dict()) for key in self._relpath2index] tags", "for x in self.tags if x.name in header} path_idx =", "fp.close() self.name = data.get('name', '') self.description = data.get('description', '') self.path", "added. Parameters ---------- fname : str Input filename. \"\"\" logger.info('Importing", "'') self.path = data.get('path') self.tags = [TagInfo(name=x[0], type=x[1]) for x", "%s', q) try: parsed_q = self._query_parser.parse(q) except Exception: logger.warn(\"Invalid search", "= [] relpath2index = self._relpath2index for rpath in list(relpath2index.keys()): fname", "in self.tags: tags[key.name] = [] return tags def _media_tag_handler(self, obj,", "\"\"\" def _rewrite_dir(state): \"Rewrite directories in the old format.\" state['files']", "self._setup_root() def _scan(dir): for f in dir.files: if not self.has_media(f.relpath)", "= [datetime_to_long(x) for x in data['mtime_']] data['ctime_'] = [datetime_to_long(x) for", "\"text\": \"\", \"int\": 0, \"float\": 0.0, \"bool\": False} return map[self.type]", "refresh: data = get_media_data(f.path, f.relpath) self.update(data) for d in dir.directories:", "def _check_date_range(x, term): result = True if term.startdate is not", "= \" \".join(expr.words) return _check_value(get_tag(m_key, attr), text) elif isinstance(expr, query.DateRange):", "_last_save_time_default(self): if exists(self.save_file): return get_file_saved_time(self.save_file) else: return '' def _name_changed(self,", "tag_data = self.__tag_data_default() relpath2index = {} keymap = dict.fromkeys(MediaData._fields) for", "term.startdate is not None: result &= x >= term.start if", "last: self._delete_record(last, relpath) else: self._replace_with_last_record(index, last) self._delete_record(last, relpath) def has_media(self,", "tags from: %s', fname) has_header, header, dialect = _get_csv_headers(fname) if", "= 'ctime_' keymap['_mtime'] = 'mtime_' for index, (key, m) in", "in self.processors] data = dict( version=2, path=self.path, name=self.name, description=self.description, tags=tags,", "None and result: if term.endexcl: result &= x < term.end", "Directory from . import processor logger = logging.getLogger(__name__) if sys.version_info[0]", "2: unicode = str string_types = (str,) import csv else:", "def _get_tag_types(self): result = dict(COMMON_TAGS) result.update(dict((t.name, t.type) for t in", "\"\"\" relpath = media_data.relpath if not self.has_media(relpath): index = len(self._relpath2index)", "tags. Parameters ---------- f: vixen.directory.File instance tags: dict \"\"\" relpath", "_get_sample(fname) sniffer = csv.Sniffer() has_header = sniffer.has_header(sample) dialect = sniffer.sniff(sample)", "to specified path. This mainly exists for testing and making", "simply cleans up the db of files that no longer", "media = [(key, self.get(key).to_dict()) for key in self._relpath2index] tags =", "[(t.name, t.type) for t in self.tags] root = _rewrite_dir(self.root.__getstate__()) processors", "the data but only the tags, extensions and the other", "long_to_datetime from .common import get_project_dir from .media import Media, MediaData,", "list of relative path of some media, remove them from", "+ fp.readline() return sample def _get_csv_headers(fname): sample = _get_sample(fname) sniffer", "return self._tag_data[attr][index] # #### End of CRUD interface to the", "= self.__data_default() tag_data = self.__tag_data_default() relpath2index = {} keymap =", "the data #### def clean(self): \"\"\"Scan the project and remove", "self.root.refresh() _scan(self.root) self.number_of_files = len(self._relpath2index) def search(self, q): \"\"\"A generator", "True if term.startdate is not None: result &= x >=", "not None: count += 1 for tag, header_index in tags.items():", "join, realpath, relpath, splitext) import re import shutil import sys", "media recursively inside the root directory. This will not clobber", "self.update(data) for d in dir.directories: if refresh: d.refresh() _scan(d) if", "in tag_info: added.append(tag) elif tag_info[tag.name] != tag.type: removed.append(tag) added.append(tag) for", "'path', 'processors', 'tags'] p.copy_traits(self, traits, copy='deep') # Clear out the", "\"The CSV file does not appear to have a header.\"", "CSV file does not have a 'path' column.\" return False,", "= {} for key in self._tag_data: tags[key] = self._tag_data[key][index] media", "records but will add any new ones. \"\"\" self._setup_root() def", "self._tag_data = data['tag_data'] self._relpath2index = data['relpath2index'] root = Directory() root.__setstate__(data.get('root'))", "'' def _name_changed(self, name): if len(name) > 0: old_save_file =", "'w', newline='', encoding='utf-8') as of: # Write the header. writer", "case, # self.__tag_data_default is called, so if self.tags is set", "type=TEXT, file_name=TEXT, path=TEXT, mtime=DATETIME, ctime=DATETIME, size=INT ) type_to_field = dict(", "this only applies tags for column headers with known tags.", "whoosh.fields import BOOLEAN, DATETIME, TEXT, Schema kw = dict( type=TEXT,", "Str type = Enum(\"string\", \"text\", \"int\", \"float\", \"bool\") default =", "_tag_data[key][last] last_relpath = _data['relpath'][last] self._relpath2index[last_relpath] = index def _save_as_v1(self, fp):", "an index to the media, return its value. \"\"\" if", "if index is not None: count += 1 for tag,", "Directory(path=path, extensions=self.extensions) def _tags_default(self): return [TagInfo(name='completed', type='bool')] def _save_file_default(self): if", "datetime_to_long, long_to_datetime from .common import get_project_dir from .media import Media,", "save(self): \"\"\"Save current media info to a file object \"\"\"", "last_relpath = _data['relpath'][last] self._relpath2index[last_relpath] = index def _save_as_v1(self, fp): \"\"\"Save", "tname, old, new): index = self._relpath2index[obj.relpath] for tag in new.changed:", "else: return expr == value def _check_range(x, term): result =", "result: break return result elif isinstance(expr, query.Not): subquery = list(expr.children())[0]", "List(processor.FactoryBase) number_of_files = Long # Path where the project data", "= 0 total = 0 with io.open(fname, 'r', newline='', encoding='utf-8')", "self._get_tag_types() _cleanup_query(parsed_q, tag_types) for key, index in self._relpath2index.items(): if _search_media(parsed_q,", "old saved files. \"\"\" def _rewrite_dir(state): \"Rewrite directories in the", "preserve_order=False, ignore_comments=False ) fp.close() self.name = data.get('name', '') self.description =", "self.root is not None: self.root.extensions = ext def _extensions_items_changed(self): if", "= k keymap['_ctime'] = 'ctime_' keymap['_mtime'] = 'mtime_' for index,", "fp = open_file(self.save_file, 'rb') else: fp = open_file(fp, 'rb') data", "+= 1 path = record[path_idx] rpath = relpath(path, self.path) index", "def __query_parser_default(self): return self._make_query_parser() def __data_default(self): data = {} for", "%r\" % expr) return False class Project(HasTraits): name = Str", "#### CRUD interface to the data #### def update(self, media_data,", "file satisfying the search query. \"\"\" logger.info('Searching for %s', q)", "status and the error message if any. Note that this", "example. In this case, # self.__tag_data_default is called, so if", "= self.path to_remove = [] relpath2index = self._relpath2index for rpath", "tags) def get(self, relpath): \"\"\"Given the relative path of some", "exist in _tag_data causing an error. So we only #", "col in cols: if col in data_cols: elem = self._data[col][i]", "re.sub(r'\\W+', '', name) def get_non_existing_filename(fname): if exists(fname): base, ext =", "in list(relpath2index.keys()): fname = os.path.join(root_path, rpath) if not os.path.exists(fname): to_remove.append(rpath)", "that no longer exist. \"\"\" logger.info('Cleaning project: %s', self.name) root_path", "child in expr.children(): result |= _search_media(child, m_key, get_tag) if result:", "relpath, index in sorted(indices, reverse=True): last = len(relpath2index) - 1", "headers with known tags. Unknown tags are not added. Parameters", "+ ext) else: return fname COMMON_TAGS = dict( file_name='string', path='string',", "isinstance(expr, query.Phrase): attr = expr.fieldname text = \" \".join(expr.words) return", "import shutil import sys from traits.api import (Any, Dict, Enum,", "'' with io.open(fname, 'r', newline='', encoding='utf-8') as fp: sample +=", "type_map[fieldtype](term.text) else: term.text = term.text.lower() elif isinstance(term, query.Phrase): term.words =", "x <= term.end return result def _check_date_range(x, term): result =", "not self.has_media(relpath): index = len(self._relpath2index) self._relpath2index[relpath] = index for key", "refresh the directory tree or set the number of files.", "x, 'text': lambda x: x, 'int': int, 'float': float }", "self.number_of_files = len(self._relpath2index) def save(self): \"\"\"Save current media info to", "data['ctime_']] self._data = data self._tag_data = tag_data self._relpath2index = relpath2index", "def _last_save_time_default(self): if exists(self.save_file): return get_file_saved_time(self.save_file) else: return '' def", "return join(dirname(fname), base + '_a' + ext) else: return fname", "if not os.path.exists(fname): to_remove.append(rpath) self.remove(to_remove) def export_csv(self, fname, cols=None): \"\"\"Export", "del self._relpath2index[relpath] def _replace_with_last_record(self, index, last): _data = self._data _tag_data", "(self.name, self.type) def _default_default(self): map = {\"string\": \"\", \"text\": \"\",", "= len(self._relpath2index) def search(self, q): \"\"\"A generator which yields the", "(basestring,) import backports.csv as csv INT = fields.NUMERIC(numtype=int) FLOAT =", "a sequence of columns to write. \"\"\" logger.info('Exporting CSV: %s',", "creating a new project for example. In this case, #", "fields, qparser, query from whoosh.util.times import datetime_to_long, long_to_datetime from .common", "self._media: del self._media[relpath] del self._relpath2index[relpath] def _replace_with_last_record(self, index, last): _data", "self._relpath2index.items(): if _search_media(parsed_q, index, self._get_media_attr): yield basename(key), key def refresh(self):", "relpath2index = self._relpath2index for rpath in list(relpath2index.keys()): fname = os.path.join(root_path,", "self.root.extensions = self.extensions def _get_tag_types(self): result = dict(COMMON_TAGS) result.update(dict((t.name, t.type)", "List, Long, Str) from whoosh import fields, qparser, query from", "return expr == value def _check_range(x, term): result = True", "self._data[col][i] else: elem = self._tag_data[col][i] line.append(elem) writer.writerow(line) def import_csv(self, fname):", "'r', newline='', encoding='utf-8') as fp: reader = csv.reader(fp, dialect) header", "clean(self): \"\"\"Scan the project and remove any dead entries. This", "project. This does not copy the data but only the", "path column matches \" \"the media paths.\") return False, msg", "= len(self._relpath2index) def save(self): \"\"\"Save current media info to a", "= index def _save_as_v1(self, fp): \"\"\"Save copy to specified path.", "search query. \"\"\" logger.info('Searching for %s', q) try: parsed_q =", "the header. writer = csv.writer(of) writer.writerow(cols) for i in range(len(self._relpath2index)):", "import csv else: string_types = (basestring,) import backports.csv as csv", "sequence of columns to write. \"\"\" logger.info('Exporting CSV: %s', fname)", "'tags'] p.copy_traits(self, traits, copy='deep') # Clear out the _done information", "relpath): \"\"\"Given the relative path of some media, return a", "type_map = dict(float=FLOAT.from_bytes, int=INT.from_bytes) for term in q.leaves(): if isinstance(term,", "exists(old_save_file): shutil.move(old_save_file, self.save_file) def _extensions_changed(self, ext): if self.root is not", "0: msg += (\"\\nPlease check that your path column matches", "and a getter to get the attribute check if the", "media relative paths.\"\"\" return self._relpath2index.keys() def _get_media_attr(self, index, attr): \"\"\"Given", "from the database. \"\"\" relpath2index = self._relpath2index indices = [(x,", "the data #### def update(self, media_data, tags=None): \"\"\"Create/update the internal", "def load(self, fp=None): \"\"\"Load media info from opened file object.", "return result def _check_date_range(x, term): result = True if term.startdate", "enumerate(media): relpath2index[key] = index tags = m.pop('tags') for tname, v", "Media, MediaData, get_media_data from .directory import Directory from . import", "query.DateRange): if expr.fieldname == 'ctime': value = get_tag(m_key, 'ctime_') elif", "tag.type: removed.append(tag) added.append(tag) for tag in old_tags: if tag.name not", "state['directories']] state.pop('relpath') state.pop('name') return state fp = open_file(fp, 'wb') media", "def save(self): \"\"\"Save current media info to a file object", "self.tags] root = self.root.__getstate__() processors = [processor.dump(x) for x in", "= record[header_index] try: value = type_map[tag.type](data) if media is not", "for testing and making sure we still read the old", "So we only # set self.tags below. self.tags = new_tags", "proc._done.clear() return p # #### CRUD interface to the data", "= get_media_data(f.path, f.relpath) self.update(data) for d in dir.directories: if refresh:", "def _get_media_attr(self, index, attr): \"\"\"Given an index to the media,", "= self.root.__getstate__() processors = [processor.dump(x) for x in self.processors] data", "print(\"Invalid search expression: %s\" % q) return tag_types = self._get_tag_types()", "a file object \"\"\" if len(self.save_file) > 0: self.save_as(self.save_file) self._update_last_save_time()", "tags=tags, media_data=self._data, tag_data=self._tag_data, relpath2index=self._relpath2index, root=root, processors=processors ) json_tricks.dump(data, fp, compression=True)", "query.Phrase): attr = expr.fieldname text = \" \".join(expr.words) return _check_value(get_tag(m_key,", "record[path_idx] rpath = relpath(path, self.path) index = self._relpath2index.get(rpath, None) media", "expr.is_leaf(): if isinstance(expr, query.Term): attr = expr.fieldname return _check_value(get_tag(m_key, attr),", "self.root = Directory(path=path, extensions=self.extensions) def _tags_default(self): return [TagInfo(name='completed', type='bool')] def", "in self.tags] root = self.root.__getstate__() processors = [processor.dump(x) for x", "self.tags = new_tags # Update the cached media for m", "root.__setstate__(data.get('root')) self.extensions = root.extensions self.root = root self.number_of_files = len(self._relpath2index)", "def __tag_data_default(self): tags = {} for key in self.tags: tags[key.name]", "= data self._tag_data = tag_data self._relpath2index = relpath2index def _delete_record(self,", "in enumerate(MediaData._fields): self._data[key][index] = media_data[i] if tags: for key, value", "%s\", q) print(\"Invalid search expression: %s\" % q) return tag_types", "kw[tag.name] = type_to_field[tag.type] return Schema(**kw) def _make_query_parser(self): schema = self._make_schema()", "def get(self, relpath): \"\"\"Given the relative path of some media,", "term.text = term.text.lower() elif isinstance(term, query.Phrase): term.words = [x.lower() for", "This does not refresh the directory tree or set the", "query.And): result = True for child in expr.children(): result &=", "class Project(HasTraits): name = Str description = Str path =", "if count == 0 and total > 0: msg +=", "valid save file set.\") def save_as(self, fp): \"\"\"Save copy to", "media is not None: media.tags[tag.name] = value else: self._tag_data[tag.name][index] =", "tag in removed: del self._tag_data[tag.name] n_entries = len(self._relpath2index) for tag", "= term.text.lower() elif isinstance(term, query.Phrase): term.words = [x.lower() for x", "data = record[header_index] try: value = type_map[tag.type](data) if media is", "\"\"\" if fp is None: if not exists(self.save_file): return fp", "Dict, Enum, HasTraits, Instance, List, Long, Str) from whoosh import", "fp.readline() return sample def _get_csv_headers(fname): sample = _get_sample(fname) sniffer =", "expression, index to media, and a getter to get the", "tags.items(): self._tag_data[key][index] = value media = self._media.get(relpath) if media is", "in TRUE, 'string': lambda x: x, 'text': lambda x: x,", "get_project_dir from .media import Media, MediaData, get_media_data from .directory import", "if relpath in self._media: return self._media[relpath] else: data = {}", "self._relpath2index[relpath] for key in MediaData._fields: data[key] = self._data[key][index] tags =", "CSV filename. Returns the success status and the error message", "and remove any dead entries. This is useful when you", "default = Any def __repr__(self): return 'TagInfo(%r, %r)' % (self.name,", "added: m.tags[tag.name] = tag.default self._query_parser = self._make_query_parser() def copy(self): \"\"\"Make", "text=TEXT, int=INT, float=FLOAT, bool=BOOLEAN ) for tag in self.tags: kw[tag.name]", "search expression: %s\" % q) return tag_types = self._get_tag_types() _cleanup_query(parsed_q,", "list(sorted(cols)) data_cols = set([x for x in cols if x", "parsed_q = self._query_parser.parse(q) except Exception: logger.warn(\"Invalid search expression: %s\", q)", "d = get_project_dir() return get_non_existing_filename(join(d, fname)) else: return '' def", "index = len(self._relpath2index) self._relpath2index[relpath] = index for key in MediaData._fields:", "\"\"\" name = self.name + ' copy' p = Project(name=name)", "= sniffer.has_header(sample) dialect = sniffer.sniff(sample) with io.open(fname, 'r', newline='', encoding='utf-8')", "self._tag_data[tag][index] = obj.tags[tag] def _read_version1_media(self, media): data = self.__data_default() tag_data", "and result: result &= x <= term.end return result def", "the _done information from the processors for proc in p.processors:", "protocol ################################################ def _setup_root(self): path = abspath(expanduser(self.path)) root = self.root", "x in term.words] def _check_value(value, expr): if isinstance(expr, string_types): return", "to have a header.\" if 'path' not in header: msg", "logger.info('Saved project: %s', self.name) def scan(self, refresh=False): \"\"\"Find all the", "= data.get('description', '') self.path = data.get('path') self.tags = [TagInfo(name=x[0], type=x[1])", "if tag.name not in new_tag_names: removed.append(tag) for tag in removed:", "accessed, when # creating a new project for example. In", "removed.append(tag) for tag in removed: del self._tag_data[tag.name] n_entries = len(self._relpath2index)", "else: string_types = (basestring,) import backports.csv as csv INT =", "query. \"\"\" logger.info('Searching for %s', q) try: parsed_q = self._query_parser.parse(q)", "= expr.fieldname return _check_range(get_tag(m_key, attr), expr) else: print(\"Unsupported term: %r\"", "self._relpath2index.keys() def _get_media_attr(self, index, attr): \"\"\"Given an index to the", "msg += (\"\\nPlease check that your path column matches \"", "specified path. \"\"\" fp = open_file(fp, 'wb') tags = [(t.name,", "does not have a 'path' column.\" return False, msg tags", "= qparser.QueryParser('path', schema=schema) qp.add_plugin(qparser.GtLtPlugin()) from whoosh.qparser.dateparse import DateParserPlugin qp.add_plugin(DateParserPlugin()) return", "def _cleanup_query(q, tag_types): type_map = dict(float=FLOAT.from_bytes, int=INT.from_bytes) for term in", "child in expr.children(): result &= _search_media(child, m_key, get_tag) if not", "x: x, 'int': int, 'float': float } count = 0", "the # removed tags will not exist in _tag_data causing", "= value else: self._tag_data[tag.name][index] = value except ValueError: pass msg", "dict( type=TEXT, file_name=TEXT, path=TEXT, mtime=DATETIME, ctime=DATETIME, size=INT ) type_to_field =", "io.open(fname, 'w', newline='', encoding='utf-8') as of: # Write the header.", "= self._relpath2index.get(rpath, None) media = self._media.get(rpath) if index is not", "query.Term): attr = expr.fieldname return _check_value(get_tag(m_key, attr), expr.text) elif isinstance(expr,", "None: count += 1 for tag, header_index in tags.items(): data", "_tag_data = self._tag_data for key in MediaData._fields: _data[key][index] = _data[key][last]", "= tag_data self._relpath2index = relpath2index def _delete_record(self, index, relpath): for", "__tag_data_default(self): tags = {} for key in self.tags: tags[key.name] =", "save_file = Str last_save_time = Str _data = Dict _tag_data", "for child in expr.children(): result &= _search_media(child, m_key, get_tag) if", "term.end return result def _search_media(expr, m_key, get_tag): \"\"\"Given search expression,", "dict( string=TEXT, text=TEXT, int=INT, float=FLOAT, bool=BOOLEAN ) for tag in", "\"\"\" logger.info('Searching for %s', q) try: parsed_q = self._query_parser.parse(q) except", "len(self.save_file) > 0: self.save_as(self.save_file) self._update_last_save_time() else: raise IOError(\"No valid save", "x: x.lower() in TRUE, 'string': lambda x: x, 'text': lambda", "import datetime_to_long, long_to_datetime from .common import get_project_dir from .media import", "basename(key), key def refresh(self): logger.info('Refreshing project: %s', self.name) self.clean() self.scan(refresh=True)", "\"save the project.\") return True, msg def load(self, fp=None): \"\"\"Load", "newline='', encoding='utf-8') as fp: reader = csv.reader(fp, dialect) header =", "fp: reader = csv.reader(fp, dialect) header = next(reader) return has_header,", "result &= x >= term.start if term.enddate is not None", "size=INT ) type_to_field = dict( string=TEXT, text=TEXT, int=INT, float=FLOAT, bool=BOOLEAN", "_check_value(get_tag(m_key, attr), expr.text) elif isinstance(expr, query.Phrase): attr = expr.fieldname text", "text) elif isinstance(expr, query.DateRange): if expr.fieldname == 'ctime': value =", "> 0: msg += (\"\\nPlease check that your path column", ".media import Media, MediaData, get_media_data from .directory import Directory from", "data = {} index = self._relpath2index[relpath] for key in MediaData._fields:", "a csv file. If `cols` are not specified, it writes", "self.root = root self.number_of_files = len(self._relpath2index) def save(self): \"\"\"Save current", "for tag in self.tags: self._tag_data[tag.name].append(tag.default) index = self._relpath2index[relpath] for i,", "self.processors = [processor.load(x) for x in data.get('processors', [])] version =", "= data['media_data'] self._tag_data = data['tag_data'] self._relpath2index = data['relpath2index'] root =", "the tags, extensions and the other settings of the project.", "media, return its value. \"\"\" if attr in self._data: return", "'ctime_' keymap['_mtime'] = 'mtime_' for index, (key, m) in enumerate(media):", "value = type_map[tag.type](data) if media is not None: media.tags[tag.name] =", "in self._tag_data: tags[key] = self._tag_data[key][index] media = Media.from_data(MediaData(**data), tags) media.on_trait_change(self._media_tag_handler,", "expr) else: print(\"Unsupported term: %r\" % expr) return False else:", "the attribute check if the media matches expression. \"\"\" if", "other settings of the project. This will not copy any", "result = dict(COMMON_TAGS) result.update(dict((t.name, t.type) for t in self.tags)) return", "self.root if root is None or realpath(root.path) != realpath(path): self.root", "reader = csv.reader(fp, dialect) next(reader) # Skip header for record", "fields.NUMERIC(numtype=int) FLOAT = fields.NUMERIC(numtype=float) def get_file_saved_time(path): dt = datetime.datetime.fromtimestamp(os.stat(path).st_ctime) return", "isinstance(term.text, (str, unicode, bytes)): fieldtype = tag_types[term.fieldname] if fieldtype in", "self.__tag_data_default is called, so if self.tags is set then the", "added: self._tag_data[tag.name] = [tag.default]*n_entries # The above can be the", "= self._make_query_parser() def copy(self): \"\"\"Make a copy of this project.", "up the db of files that no longer exist. \"\"\"", "= [(x, relpath2index[x]) for x in relpaths] for relpath, index", "[] return tags def _media_tag_handler(self, obj, tname, old, new): index", "= value except ValueError: pass msg = \"Read tags for", "their settings. \"\"\" name = self.name + ' copy' p", "for term in q.leaves(): if isinstance(term, query.Term): if isinstance(term.text, (str,", "_get_csv_headers(fname) if not has_header: return False, \"The CSV file does", "if self.tags is set then the # removed tags will", "if version == 1: self._read_version1_media(data['media']) else: self._data = data['media_data'] self._tag_data", "def clean(self): \"\"\"Scan the project and remove any dead entries.", "%d paths out of %d entries.\" % (count, total) if", "is None: if not exists(self.save_file): return fp = open_file(self.save_file, 'rb')", "added.append(tag) elif tag_info[tag.name] != tag.type: removed.append(tag) added.append(tag) for tag in", "_make_schema(self): from whoosh.fields import BOOLEAN, DATETIME, TEXT, Schema kw =", "longer exist. \"\"\" logger.info('Cleaning project: %s', self.name) root_path = self.path", "add_tags(self, tags): tags = list(self.tags) + tags self.update_tags(tags) def update_tags(self,", "if isinstance(term, query.Term): if isinstance(term.text, (str, unicode, bytes)): fieldtype =", "= ['description', 'extensions', 'path', 'processors', 'tags'] p.copy_traits(self, traits, copy='deep') #", "data[key] = [] return data def __tag_data_default(self): tags = {}", "an error. So we only # set self.tags below. self.tags", "BOOLEAN, DATETIME, TEXT, Schema kw = dict( type=TEXT, file_name=TEXT, path=TEXT,", ": str Input filename. \"\"\" logger.info('Importing tags from: %s', fname)", "not exists(self.save_file): return fp = open_file(self.save_file, 'rb') else: fp =", "if new_save_file != old_save_file: self.save_file = new_save_file if exists(old_save_file): shutil.move(old_save_file,", ".common import get_project_dir from .media import Media, MediaData, get_media_data from", "tag_info: added.append(tag) elif tag_info[tag.name] != tag.type: removed.append(tag) added.append(tag) for tag", "not result: break return result elif isinstance(expr, query.Or): result =", "('1', 't', 'true', 'y', 'yes') type_map = { 'bool': lambda", "get_tag): \"\"\"Given search expression, index to media, and a getter", "\"\"\"Return all the keys for the media relative paths.\"\"\" return", "Write the header. writer = csv.writer(of) writer.writerow(cols) for i in", "header.index('path') TRUE = ('1', 't', 'true', 'y', 'yes') type_map =", "def refresh(self): logger.info('Refreshing project: %s', self.name) self.clean() self.scan(refresh=True) # ####", "index, last): _data = self._data _tag_data = self._tag_data for key", "\"\"\" if attr in self._data: return self._data[attr][index] elif attr in", "self.tags new_tag_names = set(tag.name for tag in new_tags) tag_info =", "in enumerate(media): relpath2index[key] = index tags = m.pop('tags') for tname,", "FLOAT = fields.NUMERIC(numtype=float) def get_file_saved_time(path): dt = datetime.datetime.fromtimestamp(os.stat(path).st_ctime) return dt.ctime()", "x <= term.end return result def _search_media(expr, m_key, get_tag): \"\"\"Given", "key in self._tag_data: _tag_data[key][index] = _tag_data[key][last] last_relpath = _data['relpath'][last] self._relpath2index[last_relpath]", "no longer exist. \"\"\" logger.info('Cleaning project: %s', self.name) root_path =", "p.copy_traits(self, traits, copy='deep') # Clear out the _done information from", "return False, msg else: msg += (\"\\nPlease check the imported", "dict.fromkeys(MediaData._fields) for k in keymap: keymap[k] = k keymap['_ctime'] =", "self.update_tags(tags) def update_tags(self, new_tags): old_tags = self.tags new_tag_names = set(tag.name", "file object. \"\"\" if fp is None: if not exists(self.save_file):", "else: return open(fname_or_file, mode) def sanitize_name(name): name = name.lower() name", "self.tags is set then the # removed tags will not", "{ 'bool': lambda x: x.lower() in TRUE, 'string': lambda x:", "self.has_media(f.relpath) or refresh: data = get_media_data(f.path, f.relpath) self.update(data) for d", "fname, cols=None): \"\"\"Export metadata to a csv file. If `cols`", "_default_default(self): map = {\"string\": \"\", \"text\": \"\", \"int\": 0, \"float\":", "open_file(self.save_file, 'rb') else: fp = open_file(fp, 'rb') data = json_tricks.load(", "tag.default self._query_parser = self._make_query_parser() def copy(self): \"\"\"Make a copy of", "None and result: result &= x <= term.end return result", "if root is None or realpath(root.path) != realpath(path): self.root =", "m.tags[tag.name] for tag in added: m.tags[tag.name] = tag.default self._query_parser =", "state fp = open_file(fp, 'wb') media = [(key, self.get(key).to_dict()) for", "\"\"\" if relpath in self._media: return self._media[relpath] else: data =", "in state['files']] state['directories'] = [_rewrite_dir(d) for d in state['directories']] state.pop('relpath')", "fname = os.path.join(root_path, rpath) if not os.path.exists(fname): to_remove.append(rpath) self.remove(to_remove) def", "in self.processors] for k, m in media: m['_ctime'] = long_to_datetime(m['_ctime'])", "tags = {} for key in self._tag_data: tags[key] = self._tag_data[key][index]", "join(dirname(fname), base + '_a' + ext) else: return fname COMMON_TAGS", "<= term.end return result def _search_media(expr, m_key, get_tag): \"\"\"Given search", "qp = qparser.QueryParser('path', schema=schema) qp.add_plugin(qparser.GtLtPlugin()) from whoosh.qparser.dateparse import DateParserPlugin qp.add_plugin(DateParserPlugin())", "to the data #### def clean(self): \"\"\"Scan the project and", "backports.csv as csv INT = fields.NUMERIC(numtype=int) FLOAT = fields.NUMERIC(numtype=float) def", "IOError(\"No valid save file set.\") def save_as(self, fp): \"\"\"Save copy", "logger.warn(\"Invalid search expression: %s\", q) print(\"Invalid search expression: %s\" %", "not exist in _tag_data causing an error. So we only", "self._tag_data[tag.name][index] = value except ValueError: pass msg = \"Read tags", "self._delete_record(last, relpath) def has_media(self, relpath): \"\"\"Returns True if the media", "in dir.files: if not self.has_media(f.relpath) or refresh: data = get_media_data(f.path,", "if isinstance(term.text, (str, unicode, bytes)): fieldtype = tag_types[term.fieldname] if fieldtype", "key in self._tag_data: tags[key] = self._tag_data[key][index] media = Media.from_data(MediaData(**data), tags)", "tag_types = self._get_tag_types() _cleanup_query(parsed_q, tag_types) for key, index in self._relpath2index.items():", "result &= x >= term.start if term.end is not None", "Schema(**kw) def _make_query_parser(self): schema = self._make_schema() qp = qparser.QueryParser('path', schema=schema)", "if term.endexcl: result &= x < term.end else: result &=", "if media is not None: media.tags[tag.name] = value else: self._tag_data[tag.name][index]", "if _search_media(parsed_q, index, self._get_media_attr): yield basename(key), key def refresh(self): logger.info('Refreshing", "DateParserPlugin qp.add_plugin(DateParserPlugin()) return qp def __query_parser_default(self): return self._make_query_parser() def __data_default(self):", "return False class Project(HasTraits): name = Str description = Str", "# Clear out the _done information from the processors for", "result &= x < term.end else: result &= x <=", "is useful when you remove or rename files. This does", "= self.__tag_data_default() relpath2index = {} keymap = dict.fromkeys(MediaData._fields) for k", "media_data=self._data, tag_data=self._tag_data, relpath2index=self._relpath2index, root=root, processors=processors ) json_tricks.dump(data, fp, compression=True) fp.close()", "self._query_parser = self._make_query_parser() def copy(self): \"\"\"Make a copy of this", "def _get_csv_headers(fname): sample = _get_sample(fname) sniffer = csv.Sniffer() has_header =", "self._relpath2index for rpath in list(relpath2index.keys()): fname = os.path.join(root_path, rpath) if", "for tag in old_tags) removed = [] added = []", "obj.tags[tag] def _read_version1_media(self, media): data = self.__data_default() tag_data = self.__tag_data_default()", "for tag in removed: del self._tag_data[tag.name] n_entries = len(self._relpath2index) for", "= [x[0] for x in state['files']] state['directories'] = [_rewrite_dir(d) for", "tag.name not in new_tag_names: removed.append(tag) for tag in removed: del", "DATETIME, TEXT, Schema kw = dict( type=TEXT, file_name=TEXT, path=TEXT, mtime=DATETIME,", "'mtime_') return _check_date_range(value, expr) elif isinstance(expr, query.NumericRange): attr = expr.fieldname", "project: %s', self.name) def scan(self, refresh=False): \"\"\"Find all the media", "= [(t.name, t.type) for t in self.tags] root = self.root.__getstate__()", "for x in data['ctime_']] self._data = data self._tag_data = tag_data", "return map[self.type] def open_file(fname_or_file, mode='rb'): if hasattr(fname_or_file, 'read'): return fname_or_file", "description = Str path = Str root = Instance(Directory) tags", "----------- fname: str: a path to the csv file to", "= self.root if root is None or realpath(root.path) != realpath(path):", "new_tags) tag_info = dict((tag.name, tag.type) for tag in old_tags) removed", "index to the media, return its value. \"\"\" if attr", "int=INT.from_bytes) for term in q.leaves(): if isinstance(term, query.Term): if isinstance(term.text,", "for x in data['mtime_']] data['ctime_'] = [datetime_to_long(x) for x in", "data['ctime_'] = [datetime_to_long(x) for x in data['ctime_']] self._data = data", "#### def clean(self): \"\"\"Scan the project and remove any dead", "\"\"\"Load media info from opened file object. \"\"\" if fp", "the search query. \"\"\" logger.info('Searching for %s', q) try: parsed_q", "class TagInfo(HasTraits): name = Str type = Enum(\"string\", \"text\", \"int\",", "index to media, and a getter to get the attribute", "False, msg tags = {x: header.index(x.name) for x in self.tags", "[(t.name, t.type) for t in self.tags] root = self.root.__getstate__() processors", "\"\"\"Find all the media recursively inside the root directory. This", "in self.tags: self._tag_data[tag.name].append(tag.default) index = self._relpath2index[relpath] for i, key in", "is not None: media.update(media_data, tags) def get(self, relpath): \"\"\"Given the", "export_csv(self, fname, cols=None): \"\"\"Export metadata to a csv file. If", "= List(TagInfo) _media = Dict(Str, Media) extensions = List(Str) processors", "fp = open_file(fp, 'wb') media = [(key, self.get(key).to_dict()) for key", "opened file object. \"\"\" if fp is None: if not", "database. \"\"\" relpath2index = self._relpath2index indices = [(x, relpath2index[x]) for", "next(reader) return has_header, header, dialect class TagInfo(HasTraits): name = Str", "media = self._media.get(rpath) if index is not None: count +=", "the number of files. It simply cleans up the db", "for col in cols: if col in data_cols: elem =", "in self._media: return self._media[relpath] else: data = {} index =", "string_types = (str,) import csv else: string_types = (basestring,) import", "_scan(d) if refresh: self.root.refresh() _scan(self.root) self.number_of_files = len(self._relpath2index) def search(self,", "Any def __repr__(self): return 'TagInfo(%r, %r)' % (self.name, self.type) def", "result: result &= x <= term.end return result def _search_media(expr,", "to get the attribute check if the media matches expression.", "from .common import get_project_dir from .media import Media, MediaData, get_media_data", "mode='rb'): if hasattr(fname_or_file, 'read'): return fname_or_file else: return open(fname_or_file, mode)", "in MediaData._fields: data[key] = self._data[key][index] tags = {} for key", "the useful metadata. Parameters ----------- fname: str: a path to", "+ ' copy' p = Project(name=name) traits = ['description', 'extensions',", "added.append(tag) for tag in old_tags: if tag.name not in new_tag_names:", "total) if count == 0 and total > 0: msg", "key, value in tags.items(): self._tag_data[key][index] = value media = self._media.get(relpath)", "copy' p = Project(name=name) traits = ['description', 'extensions', 'path', 'processors',", "None) media = self._media.get(rpath) if index is not None: count", "1 for tag, header_index in tags.items(): data = record[header_index] try:", "useful metadata. Parameters ----------- fname: str: a path to the", "\"\"\" logger.info('Exporting CSV: %s', fname) all_keys = ((set(MediaData._fields) | set(self._tag_data.keys()))", "last_save_time = Str _data = Dict _tag_data = Dict _relpath2index", "exists(self.save_file): return get_file_saved_time(self.save_file) else: return '' def _name_changed(self, name): if", "return tags def _media_tag_handler(self, obj, tname, old, new): index =", "self._data[key][index] tags = {} for key in self._tag_data: tags[key] =", "for d in dir.directories: if refresh: d.refresh() _scan(d) if refresh:", "= expr.fieldname text = \" \".join(expr.words) return _check_value(get_tag(m_key, attr), text)", "= new_save_file if exists(old_save_file): shutil.move(old_save_file, self.save_file) def _extensions_changed(self, ext): if", "= [] return tags def _media_tag_handler(self, obj, tname, old, new):", "= Str root = Instance(Directory) tags = List(TagInfo) _media =", "realpath(root.path) != realpath(path): self.root = Directory(path=path, extensions=self.extensions) def _tags_default(self): return", "shutil.move(old_save_file, self.save_file) def _extensions_changed(self, ext): if self.root is not None:", "\"\"\"Returns True if the media data is available. \"\"\" return", "and make sure you \" \"save the project.\") return True,", "tag in old_tags: if tag.name not in new_tag_names: removed.append(tag) for", "to specified path. \"\"\" fp = open_file(fp, 'wb') tags =", "CSV file does not appear to have a header.\" if", "if fieldtype in type_map: term.text = type_map[fieldtype](term.text) else: term.text =", "= json_tricks.load( fp, preserve_order=False, ignore_comments=False ) fp.close() self.name = data.get('name',", "tags for column headers with known tags. Unknown tags are", "you remove or rename files. This does not refresh the", "is accessed, when # creating a new project for example.", "in range(len(self._relpath2index)): line = [] for col in cols: if", "data_cols = set([x for x in cols if x in", "relpath) for each file satisfying the search query. \"\"\" logger.info('Searching", "path to the csv file to dump. cols: sequence: a", "attr in self._data: return self._data[attr][index] elif attr in self._tag_data: return", "= get_tag(m_key, 'ctime_') elif expr.fieldname == 'mtime': value = get_tag(m_key,", "check the imported tags and make sure you \" \"save", "state.pop('relpath') state.pop('name') return state fp = open_file(fp, 'wb') media =", "# removed tags will not exist in _tag_data causing an", "relpath): \"\"\"Returns True if the media data is available. \"\"\"", "return True, msg def load(self, fp=None): \"\"\"Load media info from", "index in self._relpath2index.items(): if _search_media(parsed_q, index, self._get_media_attr): yield basename(key), key", "count = 0 total = 0 with io.open(fname, 'r', newline='',", "term): result = True if term.start is not None: if", "tags will not exist in _tag_data causing an error. So", "old_dir = dirname(old_save_file) new_save_file = join(old_dir, sanitize_name(name) + '.vxn') if", "the root directory. This will not clobber existing records but", "= self._data _tag_data = self._tag_data for key in MediaData._fields: _data[key][index]", "= expr.fieldname return _check_value(get_tag(m_key, attr), expr.text) elif isinstance(expr, query.Phrase): attr", "the old format.\" state['files'] = [x[0] for x in state['files']]", "is not None: self.root.extensions = ext def _extensions_items_changed(self): if self.root", "tags): tags = list(self.tags) + tags self.update_tags(tags) def update_tags(self, new_tags):", "x in data.get('processors', [])] version = data.get('version') if version ==", "(abspath, basename, dirname, exists, expanduser, join, realpath, relpath, splitext) import", "information from given CSV filename. Returns the success status and", "relpath) def has_media(self, relpath): \"\"\"Returns True if the media data", "Parameters ----------- fname: str: a path to the csv file", "reverse=True): last = len(relpath2index) - 1 if index == last:", "%s', self.name) self.clean() self.scan(refresh=True) # #### Private protocol ################################################ def", "__query_parser_default(self): return self._make_query_parser() def __data_default(self): data = {} for key", "msg = \"Read tags for %d paths out of %d", "a copy of this project. This does not copy the", "= type_map[fieldtype](term.text) else: term.text = term.text.lower() elif isinstance(term, query.Phrase): term.words", "= Str path = Str root = Instance(Directory) tags =", "in self._tag_data: return self._tag_data[attr][index] # #### End of CRUD interface", "refresh: d.refresh() _scan(d) if refresh: self.root.refresh() _scan(self.root) self.number_of_files = len(self._relpath2index)", "in data.get('processors', [])] version = data.get('version') if version == 1:", "for key in self._relpath2index] tags = [(t.name, t.type) for t", "This is useful when you remove or rename files. This", "= dict( version=2, path=self.path, name=self.name, description=self.description, tags=tags, media_data=self._data, tag_data=self._tag_data, relpath2index=self._relpath2index,", "for x in term.words] def _check_value(value, expr): if isinstance(expr, string_types):", "\"\"\"Given an index to the media, return its value. \"\"\"", "= [] added = [] for tag in new_tags: if", "else: self._data = data['media_data'] self._tag_data = data['tag_data'] self._relpath2index = data['relpath2index']", "query.Phrase): term.words = [x.lower() for x in term.words] def _check_value(value,", "for x in data['tags']] self.processors = [processor.load(x) for x in", "base, ext = splitext(basename(fname)) return join(dirname(fname), base + '_a' +", "self._relpath2index[relpath] def _replace_with_last_record(self, index, last): _data = self._data _tag_data =", "the internal data given the media data and tags. Parameters", "result = True if term.start is not None: if term.startexcl:", "= media_data[i] if tags: for key, value in tags.items(): self._tag_data[key][index]", "version = data.get('version') if version == 1: self._read_version1_media(data['media']) else: self._data", "dead entries. This is useful when you remove or rename", "fp, preserve_order=False, ignore_comments=False ) fp.close() self.name = data.get('name', '') self.description", "def search(self, q): \"\"\"A generator which yields the (filename, relpath)", "key in MediaData._fields: _data[key][index] = _data[key][last] for key in self._tag_data:", "new_tags # Update the cached media for m in self._media.values():", "dirname(old_save_file) new_save_file = join(old_dir, sanitize_name(name) + '.vxn') if new_save_file !=", "file object \"\"\" if len(self.save_file) > 0: self.save_as(self.save_file) self._update_last_save_time() else:", "self.save_as(self.save_file) self._update_last_save_time() else: raise IOError(\"No valid save file set.\") def", "self.root is not None: self.root.extensions = self.extensions def _get_tag_types(self): result", "for key in MediaData._fields: self._data[key].append(None) for tag in self.tags: self._tag_data[tag.name].append(tag.default)", "to a csv file. If `cols` are not specified, it", "in data['mtime_']] data['ctime_'] = [datetime_to_long(x) for x in data['ctime_']] self._data", "= index for key in MediaData._fields: self._data[key].append(None) for tag in", "== 1: self._read_version1_media(data['media']) else: self._data = data['media_data'] self._tag_data = data['tag_data']", "if index == last: self._delete_record(last, relpath) else: self._replace_with_last_record(index, last) self._delete_record(last,", "def _update_last_save_time(self): self.last_save_time = get_file_saved_time(self.save_file) def _last_save_time_default(self): if exists(self.save_file): return", "above can be the first time when self._tag_data is accessed,", "in MediaData._fields: del self._data[key][index] for key in self._tag_data: del self._tag_data[key][index]", "tag in added: m.tags[tag.name] = tag.default self._query_parser = self._make_query_parser() def", "\"\"\"Scan the project and remove any dead entries. This is", "_check_value(value, expr): if isinstance(expr, string_types): return expr in value.lower() else:", "qp.add_plugin(DateParserPlugin()) return qp def __query_parser_default(self): return self._make_query_parser() def __data_default(self): data", "the project and remove any dead entries. This is useful", "to the media, return its value. \"\"\" if attr in", "dict( file_name='string', path='string', relpath='string', ctime='string', mtime='string', size='int', type='string' ) def", "csv.reader(fp, dialect) next(reader) # Skip header for record in reader:", "search(self, q): \"\"\"A generator which yields the (filename, relpath) for", "self._tag_data[key][index] if relpath in self._media: del self._media[relpath] del self._relpath2index[relpath] def", "number of files. It simply cleans up the db of", "tag.type) for tag in old_tags) removed = [] added =", "= Dict(Str, Media) extensions = List(Str) processors = List(processor.FactoryBase) number_of_files", "header: msg = \"The CSV file does not have a", "not in m: data['file_name'].append(basename(key)) data['mtime_'] = [datetime_to_long(x) for x in", "for %s', q) try: parsed_q = self._query_parser.parse(q) except Exception: logger.warn(\"Invalid", "else: return '' def _name_changed(self, name): if len(name) > 0:", "self._tag_data for key in MediaData._fields: _data[key][index] = _data[key][last] for key", "paths.\") return False, msg else: msg += (\"\\nPlease check the", "i, key in enumerate(MediaData._fields): self._data[key][index] = media_data[i] if tags: for", "logging import os from os.path import (abspath, basename, dirname, exists,", "as csv INT = fields.NUMERIC(numtype=int) FLOAT = fields.NUMERIC(numtype=float) def get_file_saved_time(path):", "not None: self.root.extensions = self.extensions def _get_tag_types(self): result = dict(COMMON_TAGS)", "header. writer = csv.writer(of) writer.writerow(cols) for i in range(len(self._relpath2index)): line", "in old_tags: if tag.name not in new_tag_names: removed.append(tag) for tag", "is not None and result: result &= x <= term.end", "if term.startdate is not None: result &= x >= term.start", "not None and result: if term.endexcl: result &= x <", "return False else: if isinstance(expr, query.And): result = True for", "%s', self.name) root_path = self.path to_remove = [] relpath2index =", "of files that no longer exist. \"\"\" logger.info('Cleaning project: %s',", "del self._tag_data[tag.name] n_entries = len(self._relpath2index) for tag in added: self._tag_data[tag.name]", "tags and make sure you \" \"save the project.\") return", "not None: media.tags[tag.name] = value else: self._tag_data[tag.name][index] = value except", "import os from os.path import (abspath, basename, dirname, exists, expanduser,", "result def _make_schema(self): from whoosh.fields import BOOLEAN, DATETIME, TEXT, Schema", "name) return re.sub(r'\\W+', '', name) def get_non_existing_filename(fname): if exists(fname): base,", "set then the # removed tags will not exist in", "len(self._relpath2index) for tag in added: self._tag_data[tag.name] = [tag.default]*n_entries # The", "scan(self, refresh=False): \"\"\"Find all the media recursively inside the root", "re import shutil import sys from traits.api import (Any, Dict,", "set(('ctime_', 'mtime_'))) if cols is None: cols = all_keys cols", "import BOOLEAN, DATETIME, TEXT, Schema kw = dict( type=TEXT, file_name=TEXT,", "def scan(self, refresh=False): \"\"\"Find all the media recursively inside the", "_cleanup_query(q, tag_types): type_map = dict(float=FLOAT.from_bytes, int=INT.from_bytes) for term in q.leaves():", "qparser.QueryParser('path', schema=schema) qp.add_plugin(qparser.GtLtPlugin()) from whoosh.qparser.dateparse import DateParserPlugin qp.add_plugin(DateParserPlugin()) return qp", "TRUE = ('1', 't', 'true', 'y', 'yes') type_map = {", "if refresh: d.refresh() _scan(d) if refresh: self.root.refresh() _scan(self.root) self.number_of_files =", "= Instance(Directory) tags = List(TagInfo) _media = Dict(Str, Media) extensions", "for rpath in list(relpath2index.keys()): fname = os.path.join(root_path, rpath) if not", "(Any, Dict, Enum, HasTraits, Instance, List, Long, Str) from whoosh", "os.path.exists(fname): to_remove.append(rpath) self.remove(to_remove) def export_csv(self, fname, cols=None): \"\"\"Export metadata to", "exist. \"\"\" logger.info('Cleaning project: %s', self.name) root_path = self.path to_remove", "in self._media: del self._media[relpath] del self._relpath2index[relpath] def _replace_with_last_record(self, index, last):", "x in self.processors] data = dict( version=2, path=self.path, name=self.name, description=self.description,", "logger.info('Cleaning project: %s', self.name) root_path = self.path to_remove = []", "has_header, header, dialect class TagInfo(HasTraits): name = Str type =", "fname = sanitize_name(self.name) + '.vxn' d = get_project_dir() return get_non_existing_filename(join(d,", "for k, m in media: m['_ctime'] = long_to_datetime(m['_ctime']) m['_mtime'] =", "return _check_date_range(value, expr) elif isinstance(expr, query.NumericRange): attr = expr.fieldname return", "old_tags: if tag.name not in new_tag_names: removed.append(tag) for tag in", "version == 1: self._read_version1_media(data['media']) else: self._data = data['media_data'] self._tag_data =", "project and remove any dead entries. This is useful when", "in new.changed: self._tag_data[tag][index] = obj.tags[tag] def _read_version1_media(self, media): data =", "schema=schema) qp.add_plugin(qparser.GtLtPlugin()) from whoosh.qparser.dateparse import DateParserPlugin qp.add_plugin(DateParserPlugin()) return qp def", "of %d entries.\" % (count, total) if count == 0", "return result elif isinstance(expr, query.Not): subquery = list(expr.children())[0] return not", "self._tag_data[key][index] media = Media.from_data(MediaData(**data), tags) media.on_trait_change(self._media_tag_handler, 'tags_items') self._media[relpath] = media", "relpaths): \"\"\"Given a list of relative path of some media,", "when you remove or rename files. This does not refresh", "as of: # Write the header. writer = csv.writer(of) writer.writerow(cols)", "return qp def __query_parser_default(self): return self._make_query_parser() def __data_default(self): data =", "None or realpath(root.path) != realpath(path): self.root = Directory(path=path, extensions=self.extensions) def", "will not clobber existing records but will add any new", "any dead entries. This is useful when you remove or", "tag_data=self._tag_data, relpath2index=self._relpath2index, root=root, processors=processors ) json_tricks.dump(data, fp, compression=True) fp.close() logger.info('Saved", "msg def load(self, fp=None): \"\"\"Load media info from opened file", "isinstance(expr, query.NumericRange): attr = expr.fieldname return _check_range(get_tag(m_key, attr), expr) else:", "Dict _tag_data = Dict _relpath2index = Dict() _query_parser = Instance(qparser.QueryParser)", "the media data and tags. Parameters ---------- f: vixen.directory.File instance", "we only # set self.tags below. self.tags = new_tags #", "m in media: m['_ctime'] = long_to_datetime(m['_ctime']) m['_mtime'] = long_to_datetime(m['_mtime']) data", "_data['relpath'][last] self._relpath2index[last_relpath] = index def _save_as_v1(self, fp): \"\"\"Save copy to", "True for child in expr.children(): result &= _search_media(child, m_key, get_tag)", "= self.tags new_tag_names = set(tag.name for tag in new_tags) tag_info", "attr in self._tag_data: return self._tag_data[attr][index] # #### End of CRUD", "instance tags: dict \"\"\" relpath = media_data.relpath if not self.has_media(relpath):", "TRUE, 'string': lambda x: x, 'text': lambda x: x, 'int':", "self.tags = [TagInfo(name=x[0], type=x[1]) for x in data['tags']] self.processors =", "for key in self._tag_data: del self._tag_data[key][index] if relpath in self._media:", "data['tag_data'] self._relpath2index = data['relpath2index'] root = Directory() root.__setstate__(data.get('root')) self.extensions =", "[(key, self.get(key).to_dict()) for key in self._relpath2index] tags = [(t.name, t.type)", "!= old_save_file: self.save_file = new_save_file if exists(old_save_file): shutil.move(old_save_file, self.save_file) def", "in the old format.\" state['files'] = [x[0] for x in", "= m.pop('tags') for tname, v in tags.items(): tag_data[tname].append(v) for k,", "the csv file to dump. cols: sequence: a sequence of", "expr.fieldname == 'mtime': value = get_tag(m_key, 'mtime_') return _check_date_range(value, expr)", "csv file to dump. cols: sequence: a sequence of columns", "old_tags = self.tags new_tag_names = set(tag.name for tag in new_tags)", "that this only applies tags for column headers with known", "# set self.tags below. self.tags = new_tags # Update the", "extensions and the other settings of the project. This will", "else: result &= x <= term.end return result def _check_date_range(x,", "= True if term.start is not None: if term.startexcl: result", "in dir.directories: if refresh: d.refresh() _scan(d) if refresh: self.root.refresh() _scan(self.root)", "import re import shutil import sys from traits.api import (Any,", "attr), text) elif isinstance(expr, query.DateRange): if expr.fieldname == 'ctime': value", "self.clean() self.scan(refresh=True) # #### Private protocol ################################################ def _setup_root(self): path", "'y', 'yes') type_map = { 'bool': lambda x: x.lower() in", "key in MediaData._fields: self._data[key].append(None) for tag in self.tags: self._tag_data[tag.name].append(tag.default) index", "query.Or): result = False for child in expr.children(): result |=", "in data['tags']] self.processors = [processor.load(x) for x in data.get('processors', [])]", "= len(self._relpath2index) for tag in added: self._tag_data[tag.name] = [tag.default]*n_entries #", "object \"\"\" if len(self.save_file) > 0: self.save_as(self.save_file) self._update_last_save_time() else: raise", "for tag in removed: del m.tags[tag.name] for tag in added:", "self._relpath2index[obj.relpath] for tag in new.changed: self._tag_data[tag][index] = obj.tags[tag] def _read_version1_media(self,", "bytes)): fieldtype = tag_types[term.fieldname] if fieldtype in type_map: term.text =", "in _tag_data causing an error. So we only # set", "is not None: media.tags[tag.name] = value else: self._tag_data[tag.name][index] = value", "self._media.get(relpath) if media is not None: media.update(media_data, tags) def get(self,", "path = Str root = Instance(Directory) tags = List(TagInfo) _media", "sample += fp.readline() + fp.readline() return sample def _get_csv_headers(fname): sample", "import fields, qparser, query from whoosh.util.times import datetime_to_long, long_to_datetime from", "= Str last_save_time = Str _data = Dict _tag_data =", "not refresh the directory tree or set the number of", "key, index in self._relpath2index.items(): if _search_media(parsed_q, index, self._get_media_attr): yield basename(key),", "[processor.dump(x) for x in self.processors] data = dict( version=2, path=self.path,", "file set.\") def save_as(self, fp): \"\"\"Save copy to specified path.", "os.path import (abspath, basename, dirname, exists, expanduser, join, realpath, relpath,", "tags are not added. Parameters ---------- fname : str Input", "self.tags if x.name in header} path_idx = header.index('path') TRUE =", "path of some media, return a Media instance. \"\"\" if", "Dict() _query_parser = Instance(qparser.QueryParser) def add_tags(self, tags): tags = list(self.tags)", "in reader: total += 1 path = record[path_idx] rpath =", "[(x, relpath2index[x]) for x in relpaths] for relpath, index in", "called, so if self.tags is set then the # removed", "or rename files. This does not refresh the directory tree", "elif expr.fieldname == 'mtime': value = get_tag(m_key, 'mtime_') return _check_date_range(value,", "tags for %d paths out of %d entries.\" % (count,", "\"\"\" logger.info('Cleaning project: %s', self.name) root_path = self.path to_remove =", "> term.start else: result &= x >= term.start if term.end", "for relpath, index in sorted(indices, reverse=True): last = len(relpath2index) -", "next(reader) # Skip header for record in reader: total +=", "for t in self.tags] root = self.root.__getstate__() processors = [processor.dump(x)", "= relpath2index def _delete_record(self, index, relpath): for key in MediaData._fields:", "= (str,) import csv else: string_types = (basestring,) import backports.csv", "= Str description = Str path = Str root =", "dialect) next(reader) # Skip header for record in reader: total", "reader = csv.reader(fp, dialect) header = next(reader) return has_header, header,", "= [] for tag in new_tags: if tag.name not in", "for key in self.tags: tags[key.name] = [] return tags def", "record in reader: total += 1 path = record[path_idx] rpath", "= [x.lower() for x in term.words] def _check_value(value, expr): if", "sanitize_name(name) + '.vxn') if new_save_file != old_save_file: self.save_file = new_save_file", "qp def __query_parser_default(self): return self._make_query_parser() def __data_default(self): data = {}", "_setup_root(self): path = abspath(expanduser(self.path)) root = self.root if root is", "'mtime_' for index, (key, m) in enumerate(media): relpath2index[key] = index", "tag_info = dict((tag.name, tag.type) for tag in old_tags) removed =", "relpath(path, self.path) index = self._relpath2index.get(rpath, None) media = self._media.get(rpath) if", "expression: %s\" % q) return tag_types = self._get_tag_types() _cleanup_query(parsed_q, tag_types)", "tags[key.name] = [] return tags def _media_tag_handler(self, obj, tname, old,", "old format.\" state['files'] = [x[0] for x in state['files']] state['directories']", "term): result = True if term.startdate is not None: result", "header.\" if 'path' not in header: msg = \"The CSV", "self._media: return self._media[relpath] else: data = {} index = self._relpath2index[relpath]", "fp: sample += fp.readline() + fp.readline() return sample def _get_csv_headers(fname):", "tag_data self._relpath2index = relpath2index def _delete_record(self, index, relpath): for key", "str string_types = (str,) import csv else: string_types = (basestring,)", "for k in keymap: keymap[k] = k keymap['_ctime'] = 'ctime_'", "str Input filename. \"\"\" logger.info('Importing tags from: %s', fname) has_header,", "proc in p.processors: proc._done.clear() return p # #### CRUD interface", ") type_to_field = dict( string=TEXT, text=TEXT, int=INT, float=FLOAT, bool=BOOLEAN )", "open_file(fname_or_file, mode='rb'): if hasattr(fname_or_file, 'read'): return fname_or_file else: return open(fname_or_file,", "self.get(key).to_dict()) for key in self._relpath2index] tags = [(t.name, t.type) for", "for m in self._media.values(): for tag in removed: del m.tags[tag.name]", "and result: if term.endexcl: result &= x < term.end else:", "make sure you \" \"save the project.\") return True, msg", "\" \"save the project.\") return True, msg def load(self, fp=None):", "= long_to_datetime(m['_ctime']) m['_mtime'] = long_to_datetime(m['_mtime']) data = dict( version=1, path=self.path,", "or set the number of files. It simply cleans up", "if result: break return result elif isinstance(expr, query.Not): subquery =", "line = [] for col in cols: if col in", "you \" \"save the project.\") return True, msg def load(self,", "set(tag.name for tag in new_tags) tag_info = dict((tag.name, tag.type) for", "index, (key, m) in enumerate(media): relpath2index[key] = index tags =", "which yields the (filename, relpath) for each file satisfying the", "_search_media(child, m_key, get_tag) if result: break return result elif isinstance(expr,", "self._tag_data: _tag_data[key][index] = _tag_data[key][last] last_relpath = _data['relpath'][last] self._relpath2index[last_relpath] = index", "'text': lambda x: x, 'int': int, 'float': float } count", "p = Project(name=name) traits = ['description', 'extensions', 'path', 'processors', 'tags']", "out the _done information from the processors for proc in", "= self._query_parser.parse(q) except Exception: logger.warn(\"Invalid search expression: %s\", q) print(\"Invalid", "tag_info[tag.name] != tag.type: removed.append(tag) added.append(tag) for tag in old_tags: if", "x >= term.start if term.end is not None and result:", "!= tag.type: removed.append(tag) added.append(tag) for tag in old_tags: if tag.name", "tags[key] = self._tag_data[key][index] media = Media.from_data(MediaData(**data), tags) media.on_trait_change(self._media_tag_handler, 'tags_items') self._media[relpath]", "attr), expr.text) elif isinstance(expr, query.Phrase): attr = expr.fieldname text =", "MediaData._fields: data[key] = [] return data def __tag_data_default(self): tags =", "\"\"\"Save copy to specified path. This mainly exists for testing", "---------- f: vixen.directory.File instance tags: dict \"\"\" relpath = media_data.relpath", "remove them from the database. \"\"\" relpath2index = self._relpath2index indices", "data.get('path') self.tags = [TagInfo(name=x[0], type=x[1]) for x in data['tags']] self.processors", "def __data_default(self): data = {} for key in MediaData._fields: data[key]", "x in cols if x in self._data]) with io.open(fname, 'w',", "for d in state['directories']] state.pop('relpath') state.pop('name') return state fp =", "tag.name not in tag_info: added.append(tag) elif tag_info[tag.name] != tag.type: removed.append(tag)", "whoosh.qparser.dateparse import DateParserPlugin qp.add_plugin(DateParserPlugin()) return qp def __query_parser_default(self): return self._make_query_parser()", "= {} index = self._relpath2index[relpath] for key in MediaData._fields: data[key]", "return data def __tag_data_default(self): tags = {} for key in", "keys for the media relative paths.\"\"\" return self._relpath2index.keys() def _get_media_attr(self,", "for x in self.processors] data = dict( version=2, path=self.path, name=self.name,", "&= _search_media(child, m_key, get_tag) if not result: break return result", "else: raise IOError(\"No valid save file set.\") def save_as(self, fp):", "isinstance(expr, query.Not): subquery = list(expr.children())[0] return not _search_media(subquery, m_key, get_tag)", "1 path = record[path_idx] rpath = relpath(path, self.path) index =", "the (filename, relpath) for each file satisfying the search query.", "x.name in header} path_idx = header.index('path') TRUE = ('1', 't',", "header.index(x.name) for x in self.tags if x.name in header} path_idx", "else: if isinstance(expr, query.And): result = True for child in", "# #### Private protocol ################################################ def _setup_root(self): path = abspath(expanduser(self.path))", "indices = [(x, relpath2index[x]) for x in relpaths] for relpath,", "root_path = self.path to_remove = [] relpath2index = self._relpath2index for", "set.\") def save_as(self, fp): \"\"\"Save copy to specified path. \"\"\"", "count += 1 for tag, header_index in tags.items(): data =", "Instance(qparser.QueryParser) def add_tags(self, tags): tags = list(self.tags) + tags self.update_tags(tags)", "tags self.update_tags(tags) def update_tags(self, new_tags): old_tags = self.tags new_tag_names =", "media_data, tags=None): \"\"\"Create/update the internal data given the media data", "have a 'path' column.\" return False, msg tags = {x:", "in q.leaves(): if isinstance(term, query.Term): if isinstance(term.text, (str, unicode, bytes)):", "= media return media def remove(self, relpaths): \"\"\"Given a list", "isinstance(expr, query.DateRange): if expr.fieldname == 'ctime': value = get_tag(m_key, 'ctime_')", "This mainly exists for testing and making sure we still", "relpath2index=self._relpath2index, root=root, processors=processors ) json_tricks.dump(data, fp, compression=True) fp.close() logger.info('Saved project:", "sys.version_info[0] > 2: unicode = str string_types = (str,) import", "n_entries = len(self._relpath2index) for tag in added: self._tag_data[tag.name] = [tag.default]*n_entries", "self._relpath2index] tags = [(t.name, t.type) for t in self.tags] root", "= self._data[col][i] else: elem = self._tag_data[col][i] line.append(elem) writer.writerow(line) def import_csv(self,", "to the csv file to dump. cols: sequence: a sequence", "fname): \"\"\"Read tag information from given CSV filename. Returns the", "of relative path of some media, remove them from the", "does not refresh the directory tree or set the number", "tags = m.pop('tags') for tname, v in tags.items(): tag_data[tname].append(v) for", "has_media(self, relpath): \"\"\"Returns True if the media data is available.", "if refresh: self.root.refresh() _scan(self.root) self.number_of_files = len(self._relpath2index) def search(self, q):", "= dict((tag.name, tag.type) for tag in old_tags) removed = []", ") for tag in self.tags: kw[tag.name] = type_to_field[tag.type] return Schema(**kw)", "term.start is not None: if term.startexcl: result &= x >", "value = get_tag(m_key, 'mtime_') return _check_date_range(value, expr) elif isinstance(expr, query.NumericRange):", "'processors', 'tags'] p.copy_traits(self, traits, copy='deep') # Clear out the _done", "1 if index == last: self._delete_record(last, relpath) else: self._replace_with_last_record(index, last)", "in new_tags) tag_info = dict((tag.name, tag.type) for tag in old_tags)", "= self._relpath2index for rpath in list(relpath2index.keys()): fname = os.path.join(root_path, rpath)", "relpath2index[key] = index tags = m.pop('tags') for tname, v in", "INT = fields.NUMERIC(numtype=int) FLOAT = fields.NUMERIC(numtype=float) def get_file_saved_time(path): dt =", "return tag_types = self._get_tag_types() _cleanup_query(parsed_q, tag_types) for key, index in", "expr.text) elif isinstance(expr, query.Phrase): attr = expr.fieldname text = \"", "def _check_range(x, term): result = True if term.start is not", "sanitize_name(name): name = name.lower() name = re.sub(r'\\s+', '_', name) return", "keymap['_mtime'] = 'mtime_' for index, (key, m) in enumerate(media): relpath2index[key]", "with io.open(fname, 'w', newline='', encoding='utf-8') as of: # Write the", "m['_mtime'] = long_to_datetime(m['_mtime']) data = dict( version=1, path=self.path, name=self.name, description=self.description,", "index def _save_as_v1(self, fp): \"\"\"Save copy to specified path. This", "not os.path.exists(fname): to_remove.append(rpath) self.remove(to_remove) def export_csv(self, fname, cols=None): \"\"\"Export metadata", "tag information from given CSV filename. Returns the success status", "self.type) def _default_default(self): map = {\"string\": \"\", \"text\": \"\", \"int\":", "map = {\"string\": \"\", \"text\": \"\", \"int\": 0, \"float\": 0.0,", "def update(self, media_data, tags=None): \"\"\"Create/update the internal data given the", "def _media_tag_handler(self, obj, tname, old, new): index = self._relpath2index[obj.relpath] for", "Long # Path where the project data is saved. save_file", "def _setup_root(self): path = abspath(expanduser(self.path)) root = self.root if root", "that your path column matches \" \"the media paths.\") return", "join(old_dir, sanitize_name(name) + '.vxn') if new_save_file != old_save_file: self.save_file =", "out all the useful metadata. Parameters ----------- fname: str: a", "= [_rewrite_dir(d) for d in state['directories']] state.pop('relpath') state.pop('name') return state", "\"\"\" if expr.is_leaf(): if isinstance(expr, query.Term): attr = expr.fieldname return", "for example. In this case, # self.__tag_data_default is called, so", "instance. \"\"\" if relpath in self._media: return self._media[relpath] else: data", "type='string' ) def _cleanup_query(q, tag_types): type_map = dict(float=FLOAT.from_bytes, int=INT.from_bytes) for", "root=root, processors=processors ) json_tricks.dump(data, fp, compression=True) fp.close() logger.info('Saved project: %s',", "= self.save_file old_dir = dirname(old_save_file) new_save_file = join(old_dir, sanitize_name(name) +", "data = {} for key in MediaData._fields: data[key] = []", "key in self._tag_data: del self._tag_data[key][index] if relpath in self._media: del", "shutil import sys from traits.api import (Any, Dict, Enum, HasTraits,", "= \"The CSV file does not have a 'path' column.\"", "_data = self._data _tag_data = self._tag_data for key in MediaData._fields:", "self._make_query_parser() def __data_default(self): data = {} for key in MediaData._fields:", "if term.start is not None: if term.startexcl: result &= x", "in cols if x in self._data]) with io.open(fname, 'w', newline='',", "is None: cols = all_keys cols = list(sorted(cols)) data_cols =", "term in q.leaves(): if isinstance(term, query.Term): if isinstance(term.text, (str, unicode,", "#### def update(self, media_data, tags=None): \"\"\"Create/update the internal data given", "+= (\"\\nPlease check that your path column matches \" \"the", "project data is saved. save_file = Str last_save_time = Str", "len(self._relpath2index) def search(self, q): \"\"\"A generator which yields the (filename,", "def _make_schema(self): from whoosh.fields import BOOLEAN, DATETIME, TEXT, Schema kw", "logger.info('Importing tags from: %s', fname) has_header, header, dialect = _get_csv_headers(fname)", "data = dict( version=1, path=self.path, name=self.name, description=self.description, tags=tags, media=media, root=root,", "new_tags): old_tags = self.tags new_tag_names = set(tag.name for tag in", "removed tags will not exist in _tag_data causing an error.", "the processors for proc in p.processors: proc._done.clear() return p #", "internal data given the media data and tags. Parameters ----------", "csv file. If `cols` are not specified, it writes out", "% expr) return False else: if isinstance(expr, query.And): result =", "if term.enddate is not None and result: result &= x", "data = json_tricks.load( fp, preserve_order=False, ignore_comments=False ) fp.close() self.name =", "x: x, 'text': lambda x: x, 'int': int, 'float': float", "if self.root is not None: self.root.extensions = ext def _extensions_items_changed(self):", "%r\" % expr) return False else: if isinstance(expr, query.And): result", "_rewrite_dir(state): \"Rewrite directories in the old format.\" state['files'] = [x[0]", "% (self.name, self.type) def _default_default(self): map = {\"string\": \"\", \"text\":", "expanduser, join, realpath, relpath, splitext) import re import shutil import", "t.type) for t in self.tags] root = self.root.__getstate__() processors =", "\"\"\"Given search expression, index to media, and a getter to", "for key, value in tags.items(): self._tag_data[key][index] = value media =", "result = True for child in expr.children(): result &= _search_media(child,", "remove or rename files. This does not refresh the directory", "= [processor.dump(x) for x in self.processors] for k, m in", "= {} keymap = dict.fromkeys(MediaData._fields) for k in keymap: keymap[k]", "'yes') type_map = { 'bool': lambda x: x.lower() in TRUE,", "col in data_cols: elem = self._data[col][i] else: elem = self._tag_data[col][i]", "whoosh.util.times import datetime_to_long, long_to_datetime from .common import get_project_dir from .media", "expr.fieldname == 'ctime': value = get_tag(m_key, 'ctime_') elif expr.fieldname ==", "self._make_schema() qp = qparser.QueryParser('path', schema=schema) qp.add_plugin(qparser.GtLtPlugin()) from whoosh.qparser.dateparse import DateParserPlugin", "x in data['ctime_']] self._data = data self._tag_data = tag_data self._relpath2index", "old_save_file = self.save_file old_dir = dirname(old_save_file) new_save_file = join(old_dir, sanitize_name(name)", "has_header = sniffer.has_header(sample) dialect = sniffer.sniff(sample) with io.open(fname, 'r', newline='',", "isinstance(expr, query.Term): attr = expr.fieldname return _check_value(get_tag(m_key, attr), expr.text) elif", "Parameters ---------- f: vixen.directory.File instance tags: dict \"\"\" relpath =", "of: # Write the header. writer = csv.writer(of) writer.writerow(cols) for", "to dump. cols: sequence: a sequence of columns to write.", "write. \"\"\" logger.info('Exporting CSV: %s', fname) all_keys = ((set(MediaData._fields) |", "= root.extensions self.root = root self.number_of_files = len(self._relpath2index) def save(self):", "'bool': lambda x: x.lower() in TRUE, 'string': lambda x: x,", "It simply cleans up the db of files that no", "expr) elif isinstance(expr, query.NumericRange): attr = expr.fieldname return _check_range(get_tag(m_key, attr),", "this case, # self.__tag_data_default is called, so if self.tags is", "in self.tags: kw[tag.name] = type_to_field[tag.type] return Schema(**kw) def _make_query_parser(self): schema", "(\"\\nPlease check the imported tags and make sure you \"", "save file set.\") def save_as(self, fp): \"\"\"Save copy to specified", "# creating a new project for example. In this case,", "self.name + ' copy' p = Project(name=name) traits = ['description',", "not None: media.update(media_data, tags) def get(self, relpath): \"\"\"Given the relative", "self._tag_data: return self._tag_data[attr][index] # #### End of CRUD interface to", "media data and tags. Parameters ---------- f: vixen.directory.File instance tags:", "key in self.tags: tags[key.name] = [] return tags def _media_tag_handler(self,", "int=INT, float=FLOAT, bool=BOOLEAN ) for tag in self.tags: kw[tag.name] =", "name = re.sub(r'\\s+', '_', name) return re.sub(r'\\W+', '', name) def", "\" \".join(expr.words) return _check_value(get_tag(m_key, attr), text) elif isinstance(expr, query.DateRange): if", "\"Read tags for %d paths out of %d entries.\" %", "and making sure we still read the old saved files.", "generator which yields the (filename, relpath) for each file satisfying", "Str last_save_time = Str _data = Dict _tag_data = Dict", "mtime=DATETIME, ctime=DATETIME, size=INT ) type_to_field = dict( string=TEXT, text=TEXT, int=INT,", "= join(old_dir, sanitize_name(name) + '.vxn') if new_save_file != old_save_file: self.save_file", "If `cols` are not specified, it writes out all the", "\"\"\" logger.info('Importing tags from: %s', fname) has_header, header, dialect =", "causing an error. So we only # set self.tags below.", "cols if x in self._data]) with io.open(fname, 'w', newline='', encoding='utf-8')", "\"\"\" return relpath in self._relpath2index def keys(self): \"\"\"Return all the", "relpath2index = {} keymap = dict.fromkeys(MediaData._fields) for k in keymap:", "del self._data[key][index] for key in self._tag_data: del self._tag_data[key][index] if relpath", "else: elem = self._tag_data[col][i] line.append(elem) writer.writerow(line) def import_csv(self, fname): \"\"\"Read", "= data.get('name', '') self.description = data.get('description', '') self.path = data.get('path')", "def get_file_saved_time(path): dt = datetime.datetime.fromtimestamp(os.stat(path).st_ctime) return dt.ctime() def _get_sample(fname): sample", "_check_value(get_tag(m_key, attr), text) elif isinstance(expr, query.DateRange): if expr.fieldname == 'ctime':", "in tags.items(): self._tag_data[key][index] = value media = self._media.get(relpath) if media", "format.\" state['files'] = [x[0] for x in state['files']] state['directories'] =", "of files. It simply cleans up the db of files", "record[header_index] try: value = type_map[tag.type](data) if media is not None:", "key in enumerate(MediaData._fields): self._data[key][index] = media_data[i] if tags: for key,", "del m.tags[tag.name] for tag in added: m.tags[tag.name] = tag.default self._query_parser", "fname: str: a path to the csv file to dump.", "csv.reader(fp, dialect) header = next(reader) return has_header, header, dialect class", "media def remove(self, relpaths): \"\"\"Given a list of relative path", "data but only the tags, extensions and the other settings", "'int': int, 'float': float } count = 0 total =", ") def _cleanup_query(q, tag_types): type_map = dict(float=FLOAT.from_bytes, int=INT.from_bytes) for term", "tags = {} for key in self.tags: tags[key.name] = []", "mode) def sanitize_name(name): name = name.lower() name = re.sub(r'\\s+', '_',", "\"\"\" if len(self.save_file) > 0: self.save_as(self.save_file) self._update_last_save_time() else: raise IOError(\"No", "self.__tag_data_default() relpath2index = {} keymap = dict.fromkeys(MediaData._fields) for k in", "fp: reader = csv.reader(fp, dialect) next(reader) # Skip header for", "q) print(\"Invalid search expression: %s\" % q) return tag_types =", "to_remove.append(rpath) self.remove(to_remove) def export_csv(self, fname, cols=None): \"\"\"Export metadata to a", "value else: self._tag_data[tag.name][index] = value except ValueError: pass msg =", "file. If `cols` are not specified, it writes out all", "media paths.\") return False, msg else: msg += (\"\\nPlease check", "_relpath2index = Dict() _query_parser = Instance(qparser.QueryParser) def add_tags(self, tags): tags", "fname_or_file else: return open(fname_or_file, mode) def sanitize_name(name): name = name.lower()", "self._media[relpath] del self._relpath2index[relpath] def _replace_with_last_record(self, index, last): _data = self._data", "= sniffer.sniff(sample) with io.open(fname, 'r', newline='', encoding='utf-8') as fp: reader", "expression. \"\"\" if expr.is_leaf(): if isinstance(expr, query.Term): attr = expr.fieldname", "def _extensions_changed(self, ext): if self.root is not None: self.root.extensions =", "None: media.tags[tag.name] = value else: self._tag_data[tag.name][index] = value except ValueError:", "value media = self._media.get(relpath) if media is not None: media.update(media_data,", "for key in MediaData._fields: data[key] = self._data[key][index] tags = {}", "from traits.api import (Any, Dict, Enum, HasTraits, Instance, List, Long,", "path='string', relpath='string', ctime='string', mtime='string', size='int', type='string' ) def _cleanup_query(q, tag_types):", "def _search_media(expr, m_key, get_tag): \"\"\"Given search expression, index to media,", "data['mtime_'] = [datetime_to_long(x) for x in data['mtime_']] data['ctime_'] = [datetime_to_long(x)", "None: cols = all_keys cols = list(sorted(cols)) data_cols = set([x", "term.endexcl: result &= x < term.end else: result &= x", "= _rewrite_dir(self.root.__getstate__()) processors = [processor.dump(x) for x in self.processors] for", "return has_header, header, dialect class TagInfo(HasTraits): name = Str type", "splitext(basename(fname)) return join(dirname(fname), base + '_a' + ext) else: return", "'mtime_'))) if cols is None: cols = all_keys cols =", "data['file_name'].append(basename(key)) data['mtime_'] = [datetime_to_long(x) for x in data['mtime_']] data['ctime_'] =", "root = _rewrite_dir(self.root.__getstate__()) processors = [processor.dump(x) for x in self.processors]", "csv.Sniffer() has_header = sniffer.has_header(sample) dialect = sniffer.sniff(sample) with io.open(fname, 'r',", "str: a path to the csv file to dump. cols:", "x in data['tags']] self.processors = [processor.load(x) for x in data.get('processors',", "self.name) root_path = self.path to_remove = [] relpath2index = self._relpath2index", "return result def _make_schema(self): from whoosh.fields import BOOLEAN, DATETIME, TEXT,", "sanitize_name(self.name) + '.vxn' d = get_project_dir() return get_non_existing_filename(join(d, fname)) else:", "type = Enum(\"string\", \"text\", \"int\", \"float\", \"bool\") default = Any", "def _make_query_parser(self): schema = self._make_schema() qp = qparser.QueryParser('path', schema=schema) qp.add_plugin(qparser.GtLtPlugin())", "def _tags_default(self): return [TagInfo(name='completed', type='bool')] def _save_file_default(self): if len(self.name) >", "it writes out all the useful metadata. Parameters ----------- fname:", "def _default_default(self): map = {\"string\": \"\", \"text\": \"\", \"int\": 0,", "in m.items(): data[keymap[k]].append(v) if 'file_name' not in m: data['file_name'].append(basename(key)) data['mtime_']", "t in self.tags] root = _rewrite_dir(self.root.__getstate__()) processors = [processor.dump(x) for", "for t in self.tags)) return result def _make_schema(self): from whoosh.fields", "data #### def clean(self): \"\"\"Scan the project and remove any", "writes out all the useful metadata. Parameters ----------- fname: str:", "for %d paths out of %d entries.\" % (count, total)", "{} keymap = dict.fromkeys(MediaData._fields) for k in keymap: keymap[k] =", "rpath) if not os.path.exists(fname): to_remove.append(rpath) self.remove(to_remove) def export_csv(self, fname, cols=None):", "yield basename(key), key def refresh(self): logger.info('Refreshing project: %s', self.name) self.clean()", "---------- fname : str Input filename. \"\"\" logger.info('Importing tags from:", "relpath) else: self._replace_with_last_record(index, last) self._delete_record(last, relpath) def has_media(self, relpath): \"\"\"Returns", "= tag.default self._query_parser = self._make_query_parser() def copy(self): \"\"\"Make a copy", "except Exception: logger.warn(\"Invalid search expression: %s\", q) print(\"Invalid search expression:", "f: vixen.directory.File instance tags: dict \"\"\" relpath = media_data.relpath if", "data[keymap[k]].append(v) if 'file_name' not in m: data['file_name'].append(basename(key)) data['mtime_'] = [datetime_to_long(x)", "name): if len(name) > 0: old_save_file = self.save_file old_dir =", "from given CSV filename. Returns the success status and the", "column.\" return False, msg tags = {x: header.index(x.name) for x", "m) in enumerate(media): relpath2index[key] = index tags = m.pop('tags') for", "None: media.update(media_data, tags) def get(self, relpath): \"\"\"Given the relative path", "= {x: header.index(x.name) for x in self.tags if x.name in", "MediaData._fields: data[key] = self._data[key][index] tags = {} for key in", "Project(HasTraits): name = Str description = Str path = Str", "= _tag_data[key][last] last_relpath = _data['relpath'][last] self._relpath2index[last_relpath] = index def _save_as_v1(self,", "def __repr__(self): return 'TagInfo(%r, %r)' % (self.name, self.type) def _default_default(self):", "get_project_dir() return get_non_existing_filename(join(d, fname)) else: return '' def _update_last_save_time(self): self.last_save_time", "== value def _check_range(x, term): result = True if term.start", "'extensions', 'path', 'processors', 'tags'] p.copy_traits(self, traits, copy='deep') # Clear out", "'wb') tags = [(t.name, t.type) for t in self.tags] root", "= set(tag.name for tag in new_tags) tag_info = dict((tag.name, tag.type)", "0 total = 0 with io.open(fname, 'r', newline='', encoding='utf-8') as", "long_to_datetime(m['_ctime']) m['_mtime'] = long_to_datetime(m['_mtime']) data = dict( version=1, path=self.path, name=self.name,", "media, and a getter to get the attribute check if", "#### Private protocol ################################################ def _setup_root(self): path = abspath(expanduser(self.path)) root", "tags = list(self.tags) + tags self.update_tags(tags) def update_tags(self, new_tags): old_tags", "for key in self._tag_data: tags[key] = self._tag_data[key][index] media = Media.from_data(MediaData(**data),", "self._media.get(rpath) if index is not None: count += 1 for", "elif isinstance(expr, query.Or): result = False for child in expr.children():", "tag_types): type_map = dict(float=FLOAT.from_bytes, int=INT.from_bytes) for term in q.leaves(): if", "[x[0] for x in state['files']] state['directories'] = [_rewrite_dir(d) for d", "encoding='utf-8') as fp: reader = csv.reader(fp, dialect) header = next(reader)", "has_header: return False, \"The CSV file does not appear to", "_make_query_parser(self): schema = self._make_schema() qp = qparser.QueryParser('path', schema=schema) qp.add_plugin(qparser.GtLtPlugin()) from", "state.pop('name') return state fp = open_file(fp, 'wb') media = [(key,", "root = Instance(Directory) tags = List(TagInfo) _media = Dict(Str, Media)", "dialect = _get_csv_headers(fname) if not has_header: return False, \"The CSV", "None: result &= x >= term.start if term.enddate is not", "removed: del m.tags[tag.name] for tag in added: m.tags[tag.name] = tag.default", "if not exists(self.save_file): return fp = open_file(self.save_file, 'rb') else: fp", "x, 'int': int, 'float': float } count = 0 total", "sniffer.sniff(sample) with io.open(fname, 'r', newline='', encoding='utf-8') as fp: reader =", "t.type) for t in self.tags)) return result def _make_schema(self): from", "return self._make_query_parser() def __data_default(self): data = {} for key in", "float=FLOAT, bool=BOOLEAN ) for tag in self.tags: kw[tag.name] = type_to_field[tag.type]", "= Enum(\"string\", \"text\", \"int\", \"float\", \"bool\") default = Any def", "_extensions_items_changed(self): if self.root is not None: self.root.extensions = self.extensions def", "> 0: old_save_file = self.save_file old_dir = dirname(old_save_file) new_save_file =", "= List(processor.FactoryBase) number_of_files = Long # Path where the project", "= {\"string\": \"\", \"text\": \"\", \"int\": 0, \"float\": 0.0, \"bool\":", "Enum, HasTraits, Instance, List, Long, Str) from whoosh import fields,", "+ '.vxn' d = get_project_dir() return get_non_existing_filename(join(d, fname)) else: return", "appear to have a header.\" if 'path' not in header:", "query.Term): if isinstance(term.text, (str, unicode, bytes)): fieldtype = tag_types[term.fieldname] if", "relpaths] for relpath, index in sorted(indices, reverse=True): last = len(relpath2index)", "media: m['_ctime'] = long_to_datetime(m['_ctime']) m['_mtime'] = long_to_datetime(m['_mtime']) data = dict(", "refresh(self): logger.info('Refreshing project: %s', self.name) self.clean() self.scan(refresh=True) # #### Private", "any of the processor states but only their settings. \"\"\"", "a path to the csv file to dump. cols: sequence:", "query from whoosh.util.times import datetime_to_long, long_to_datetime from .common import get_project_dir", "= ((set(MediaData._fields) | set(self._tag_data.keys())) - set(('ctime_', 'mtime_'))) if cols is", "self._tag_data = tag_data self._relpath2index = relpath2index def _delete_record(self, index, relpath):", "attr): \"\"\"Given an index to the media, return its value.", "List(TagInfo) _media = Dict(Str, Media) extensions = List(Str) processors =", "x.lower() in TRUE, 'string': lambda x: x, 'text': lambda x:", "= os.path.join(root_path, rpath) if not os.path.exists(fname): to_remove.append(rpath) self.remove(to_remove) def export_csv(self,", "for tag in old_tags: if tag.name not in new_tag_names: removed.append(tag)", "if x.name in header} path_idx = header.index('path') TRUE = ('1',", "\"\"\"Read tag information from given CSV filename. Returns the success", "for k, v in m.items(): data[keymap[k]].append(v) if 'file_name' not in", "schema = self._make_schema() qp = qparser.QueryParser('path', schema=schema) qp.add_plugin(qparser.GtLtPlugin()) from whoosh.qparser.dateparse", "lambda x: x, 'text': lambda x: x, 'int': int, 'float':", "return expr in value.lower() else: return expr == value def", "then the # removed tags will not exist in _tag_data", "return _check_value(get_tag(m_key, attr), text) elif isinstance(expr, query.DateRange): if expr.fieldname ==", "fp.readline() + fp.readline() return sample def _get_csv_headers(fname): sample = _get_sample(fname)", "not copy any of the processor states but only their", "x in state['files']] state['directories'] = [_rewrite_dir(d) for d in state['directories']]", "in state['directories']] state.pop('relpath') state.pop('name') return state fp = open_file(fp, 'wb')", "fieldtype = tag_types[term.fieldname] if fieldtype in type_map: term.text = type_map[fieldtype](term.text)", "the keys for the media relative paths.\"\"\" return self._relpath2index.keys() def", "with io.open(fname, 'r', newline='', encoding='utf-8') as fp: reader = csv.reader(fp,", "exists(fname): base, ext = splitext(basename(fname)) return join(dirname(fname), base + '_a'", "tag in new_tags) tag_info = dict((tag.name, tag.type) for tag in", "self._data[key].append(None) for tag in self.tags: self._tag_data[tag.name].append(tag.default) index = self._relpath2index[relpath] for", "self.extensions = root.extensions self.root = root self.number_of_files = len(self._relpath2index) def", "result &= x > term.start else: result &= x >=", "term.start else: result &= x >= term.start if term.end is", "= dict(float=FLOAT.from_bytes, int=INT.from_bytes) for term in q.leaves(): if isinstance(term, query.Term):", "index for key in MediaData._fields: self._data[key].append(None) for tag in self.tags:", "= Media.from_data(MediaData(**data), tags) media.on_trait_change(self._media_tag_handler, 'tags_items') self._media[relpath] = media return media", "path=self.path, name=self.name, description=self.description, tags=tags, media=media, root=root, processors=processors ) json_tricks.dump(data, fp,", "tags) media.on_trait_change(self._media_tag_handler, 'tags_items') self._media[relpath] = media return media def remove(self,", "msg tags = {x: header.index(x.name) for x in self.tags if", "= open_file(fp, 'wb') media = [(key, self.get(key).to_dict()) for key in", "return self._data[attr][index] elif attr in self._tag_data: return self._tag_data[attr][index] # ####", "is not None: self.root.extensions = self.extensions def _get_tag_types(self): result =", "else: self._replace_with_last_record(index, last) self._delete_record(last, relpath) def has_media(self, relpath): \"\"\"Returns True", "realpath, relpath, splitext) import re import shutil import sys from", "attr = expr.fieldname return _check_value(get_tag(m_key, attr), expr.text) elif isinstance(expr, query.Phrase):", "self.tags] root = _rewrite_dir(self.root.__getstate__()) processors = [processor.dump(x) for x in", "io import json_tricks import logging import os from os.path import", "new_tag_names = set(tag.name for tag in new_tags) tag_info = dict((tag.name,", "known tags. Unknown tags are not added. Parameters ---------- fname", "copy(self): \"\"\"Make a copy of this project. This does not", "= List(Str) processors = List(processor.FactoryBase) number_of_files = Long # Path", "new): index = self._relpath2index[obj.relpath] for tag in new.changed: self._tag_data[tag][index] =", "inside the root directory. This will not clobber existing records", "&= x >= term.start if term.enddate is not None and", "= set([x for x in cols if x in self._data])", "key def refresh(self): logger.info('Refreshing project: %s', self.name) self.clean() self.scan(refresh=True) #", "= False for child in expr.children(): result |= _search_media(child, m_key,", "elif tag_info[tag.name] != tag.type: removed.append(tag) added.append(tag) for tag in old_tags:", "%s', fname) all_keys = ((set(MediaData._fields) | set(self._tag_data.keys())) - set(('ctime_', 'mtime_')))", "for i in range(len(self._relpath2index)): line = [] for col in", "data def __tag_data_default(self): tags = {} for key in self.tags:", "from whoosh.qparser.dateparse import DateParserPlugin qp.add_plugin(DateParserPlugin()) return qp def __query_parser_default(self): return", "= self.extensions def _get_tag_types(self): result = dict(COMMON_TAGS) result.update(dict((t.name, t.type) for", "\"text\", \"int\", \"float\", \"bool\") default = Any def __repr__(self): return", "index, attr): \"\"\"Given an index to the media, return its", "= re.sub(r'\\s+', '_', name) return re.sub(r'\\W+', '', name) def get_non_existing_filename(fname):", "self.has_media(relpath): index = len(self._relpath2index) self._relpath2index[relpath] = index for key in", "unicode = str string_types = (str,) import csv else: string_types", "return 'TagInfo(%r, %r)' % (self.name, self.type) def _default_default(self): map =", "if tag.name not in tag_info: added.append(tag) elif tag_info[tag.name] != tag.type:", "value in tags.items(): self._tag_data[key][index] = value media = self._media.get(relpath) if", "= Str type = Enum(\"string\", \"text\", \"int\", \"float\", \"bool\") default", "will add any new ones. \"\"\" self._setup_root() def _scan(dir): for", "_search_media(subquery, m_key, get_tag) else: print(\"Unsupported term: %r\" % expr) return", "fieldtype in type_map: term.text = type_map[fieldtype](term.text) else: term.text = term.text.lower()", "unicode, bytes)): fieldtype = tag_types[term.fieldname] if fieldtype in type_map: term.text", "fname COMMON_TAGS = dict( file_name='string', path='string', relpath='string', ctime='string', mtime='string', size='int',", "CRUD interface to the data #### def clean(self): \"\"\"Scan the", "isinstance(expr, query.And): result = True for child in expr.children(): result", "= dict( type=TEXT, file_name=TEXT, path=TEXT, mtime=DATETIME, ctime=DATETIME, size=INT ) type_to_field", "specified, it writes out all the useful metadata. Parameters -----------", "to a file object \"\"\" if len(self.save_file) > 0: self.save_as(self.save_file)", "{\"string\": \"\", \"text\": \"\", \"int\": 0, \"float\": 0.0, \"bool\": False}", "import processor logger = logging.getLogger(__name__) if sys.version_info[0] > 2: unicode", "> 0: fname = sanitize_name(self.name) + '.vxn' d = get_project_dir()", "= open_file(fp, 'wb') tags = [(t.name, t.type) for t in", "_save_file_default(self): if len(self.name) > 0: fname = sanitize_name(self.name) + '.vxn'", "io.open(fname, 'r', newline='', encoding='utf-8') as fp: sample += fp.readline() +", "= [processor.dump(x) for x in self.processors] data = dict( version=2,", "== 0 and total > 0: msg += (\"\\nPlease check", "files. \"\"\" def _rewrite_dir(state): \"Rewrite directories in the old format.\"", "= tag_types[term.fieldname] if fieldtype in type_map: term.text = type_map[fieldtype](term.text) else:", "object. \"\"\" if fp is None: if not exists(self.save_file): return", "cleans up the db of files that no longer exist.", "is called, so if self.tags is set then the #", "in self._tag_data: _tag_data[key][index] = _tag_data[key][last] last_relpath = _data['relpath'][last] self._relpath2index[last_relpath] =", "= Long # Path where the project data is saved.", "copy the data but only the tags, extensions and the", "_media = Dict(Str, Media) extensions = List(Str) processors = List(processor.FactoryBase)", "\"\"\"Save copy to specified path. \"\"\" fp = open_file(fp, 'wb')", "= long_to_datetime(m['_mtime']) data = dict( version=1, path=self.path, name=self.name, description=self.description, tags=tags,", "return self._relpath2index.keys() def _get_media_attr(self, index, attr): \"\"\"Given an index to", "= dict( file_name='string', path='string', relpath='string', ctime='string', mtime='string', size='int', type='string' )", "media for m in self._media.values(): for tag in removed: del", "rename files. This does not refresh the directory tree or", "result = True if term.startdate is not None: result &=", "in self._relpath2index] tags = [(t.name, t.type) for t in self.tags]", "expression: %s\", q) print(\"Invalid search expression: %s\" % q) return", "search expression: %s\", q) print(\"Invalid search expression: %s\" % q)", "satisfying the search query. \"\"\" logger.info('Searching for %s', q) try:", "path=TEXT, mtime=DATETIME, ctime=DATETIME, size=INT ) type_to_field = dict( string=TEXT, text=TEXT,", "= Dict _relpath2index = Dict() _query_parser = Instance(qparser.QueryParser) def add_tags(self,", "return _check_value(get_tag(m_key, attr), expr.text) elif isinstance(expr, query.Phrase): attr = expr.fieldname", "_scan(dir): for f in dir.files: if not self.has_media(f.relpath) or refresh:", "the processor states but only their settings. \"\"\" name =", "= '' with io.open(fname, 'r', newline='', encoding='utf-8') as fp: sample", "def open_file(fname_or_file, mode='rb'): if hasattr(fname_or_file, 'read'): return fname_or_file else: return", "the media matches expression. \"\"\" if expr.is_leaf(): if isinstance(expr, query.Term):", "tag in new_tags: if tag.name not in tag_info: added.append(tag) elif", "json_tricks.dump(data, fp, compression=True) fp.close() logger.info('Saved project: %s', self.name) def scan(self,", "get_tag) if not result: break return result elif isinstance(expr, query.Or):", "type='bool')] def _save_file_default(self): if len(self.name) > 0: fname = sanitize_name(self.name)", "term.start if term.enddate is not None and result: result &=", "self._media[relpath] else: data = {} index = self._relpath2index[relpath] for key", "yields the (filename, relpath) for each file satisfying the search", "dirname, exists, expanduser, join, realpath, relpath, splitext) import re import", "p.processors: proc._done.clear() return p # #### CRUD interface to the", "key in MediaData._fields: del self._data[key][index] for key in self._tag_data: del", "ext def _extensions_items_changed(self): if self.root is not None: self.root.extensions =", "if sys.version_info[0] > 2: unicode = str string_types = (str,)", "copy of this project. This does not copy the data", "range(len(self._relpath2index)): line = [] for col in cols: if col", "in data['ctime_']] self._data = data self._tag_data = tag_data self._relpath2index =", "del self._media[relpath] del self._relpath2index[relpath] def _replace_with_last_record(self, index, last): _data =", "= _get_sample(fname) sniffer = csv.Sniffer() has_header = sniffer.has_header(sample) dialect =", "the old saved files. \"\"\" def _rewrite_dir(state): \"Rewrite directories in", "= fields.NUMERIC(numtype=float) def get_file_saved_time(path): dt = datetime.datetime.fromtimestamp(os.stat(path).st_ctime) return dt.ctime() def", "self._data]) with io.open(fname, 'w', newline='', encoding='utf-8') as of: # Write", "'_a' + ext) else: return fname COMMON_TAGS = dict( file_name='string',", "files. It simply cleans up the db of files that", "in data_cols: elem = self._data[col][i] else: elem = self._tag_data[col][i] line.append(elem)", "header} path_idx = header.index('path') TRUE = ('1', 't', 'true', 'y',", "'ctime': value = get_tag(m_key, 'ctime_') elif expr.fieldname == 'mtime': value", "read the old saved files. \"\"\" def _rewrite_dir(state): \"Rewrite directories", "x in self.processors] for k, m in media: m['_ctime'] =", "tags: for key, value in tags.items(): self._tag_data[key][index] = value media", "[] for col in cols: if col in data_cols: elem", "header = next(reader) return has_header, header, dialect class TagInfo(HasTraits): name", "dict(float=FLOAT.from_bytes, int=INT.from_bytes) for term in q.leaves(): if isinstance(term, query.Term): if", "specified path. This mainly exists for testing and making sure", "result def _search_media(expr, m_key, get_tag): \"\"\"Given search expression, index to", "in removed: del self._tag_data[tag.name] n_entries = len(self._relpath2index) for tag in", "return fp = open_file(self.save_file, 'rb') else: fp = open_file(fp, 'rb')", "realpath(path): self.root = Directory(path=path, extensions=self.extensions) def _tags_default(self): return [TagInfo(name='completed', type='bool')]", "root self.number_of_files = len(self._relpath2index) def save(self): \"\"\"Save current media info", "processors for proc in p.processors: proc._done.clear() return p # ####", "for tag in new_tags: if tag.name not in tag_info: added.append(tag)", "cols is None: cols = all_keys cols = list(sorted(cols)) data_cols", "csv.writer(of) writer.writerow(cols) for i in range(len(self._relpath2index)): line = [] for", "from opened file object. \"\"\" if fp is None: if", "filename. \"\"\" logger.info('Importing tags from: %s', fname) has_header, header, dialect", "_media_tag_handler(self, obj, tname, old, new): index = self._relpath2index[obj.relpath] for tag", "((set(MediaData._fields) | set(self._tag_data.keys())) - set(('ctime_', 'mtime_'))) if cols is None:", "version=2, path=self.path, name=self.name, description=self.description, tags=tags, media_data=self._data, tag_data=self._tag_data, relpath2index=self._relpath2index, root=root, processors=processors", "if len(self.name) > 0: fname = sanitize_name(self.name) + '.vxn' d", "base + '_a' + ext) else: return fname COMMON_TAGS =", "None: if not exists(self.save_file): return fp = open_file(self.save_file, 'rb') else:", "abspath(expanduser(self.path)) root = self.root if root is None or realpath(root.path)", "'t', 'true', 'y', 'yes') type_map = { 'bool': lambda x:", "not clobber existing records but will add any new ones.", "if len(name) > 0: old_save_file = self.save_file old_dir = dirname(old_save_file)", "useful when you remove or rename files. This does not", "{} for key in self.tags: tags[key.name] = [] return tags", "= data.get('version') if version == 1: self._read_version1_media(data['media']) else: self._data =", "self._relpath2index.get(rpath, None) media = self._media.get(rpath) if index is not None:", "def _check_value(value, expr): if isinstance(expr, string_types): return expr in value.lower()", "path=self.path, name=self.name, description=self.description, tags=tags, media_data=self._data, tag_data=self._tag_data, relpath2index=self._relpath2index, root=root, processors=processors )", "type_map = { 'bool': lambda x: x.lower() in TRUE, 'string':", "None: self.root.extensions = self.extensions def _get_tag_types(self): result = dict(COMMON_TAGS) result.update(dict((t.name,", "Returns the success status and the error message if any.", "= [] return data def __tag_data_default(self): tags = {} for", "self._delete_record(last, relpath) else: self._replace_with_last_record(index, last) self._delete_record(last, relpath) def has_media(self, relpath):", "still read the old saved files. \"\"\" def _rewrite_dir(state): \"Rewrite", "def _replace_with_last_record(self, index, last): _data = self._data _tag_data = self._tag_data", "not in new_tag_names: removed.append(tag) for tag in removed: del self._tag_data[tag.name]", "- set(('ctime_', 'mtime_'))) if cols is None: cols = all_keys", "= Directory(path=path, extensions=self.extensions) def _tags_default(self): return [TagInfo(name='completed', type='bool')] def _save_file_default(self):", "'', name) def get_non_existing_filename(fname): if exists(fname): base, ext = splitext(basename(fname))", "= data.get('path') self.tags = [TagInfo(name=x[0], type=x[1]) for x in data['tags']]", "total > 0: msg += (\"\\nPlease check that your path", "Enum(\"string\", \"text\", \"int\", \"float\", \"bool\") default = Any def __repr__(self):", "of columns to write. \"\"\" logger.info('Exporting CSV: %s', fname) all_keys", "k in keymap: keymap[k] = k keymap['_ctime'] = 'ctime_' keymap['_mtime']", "processor states but only their settings. \"\"\" name = self.name", "'read'): return fname_or_file else: return open(fname_or_file, mode) def sanitize_name(name): name", "This will not copy any of the processor states but", "= self._relpath2index[relpath] for i, key in enumerate(MediaData._fields): self._data[key][index] = media_data[i]", "reader: total += 1 path = record[path_idx] rpath = relpath(path,", "if the media matches expression. \"\"\" if expr.is_leaf(): if isinstance(expr,", "index = self._relpath2index.get(rpath, None) media = self._media.get(rpath) if index is", "msg else: msg += (\"\\nPlease check the imported tags and", "return self._media[relpath] else: data = {} index = self._relpath2index[relpath] for", "%s', self.name) def scan(self, refresh=False): \"\"\"Find all the media recursively", "= self._get_tag_types() _cleanup_query(parsed_q, tag_types) for key, index in self._relpath2index.items(): if", "Clear out the _done information from the processors for proc", "state['directories'] = [_rewrite_dir(d) for d in state['directories']] state.pop('relpath') state.pop('name') return", "to media, and a getter to get the attribute check", ">= term.start if term.end is not None and result: if", "media, remove them from the database. \"\"\" relpath2index = self._relpath2index", "filename. Returns the success status and the error message if", "&= x >= term.start if term.end is not None and", "media=media, root=root, processors=processors ) json_tricks.dump(data, fp, compression=True) fp.close() logger.info('Saved project:", "qp.add_plugin(qparser.GtLtPlugin()) from whoosh.qparser.dateparse import DateParserPlugin qp.add_plugin(DateParserPlugin()) return qp def __query_parser_default(self):", "t in self.tags] root = self.root.__getstate__() processors = [processor.dump(x) for", "get_file_saved_time(self.save_file) else: return '' def _name_changed(self, name): if len(name) >", "return '' def _name_changed(self, name): if len(name) > 0: old_save_file", "Private protocol ################################################ def _setup_root(self): path = abspath(expanduser(self.path)) root =", "as fp: reader = csv.reader(fp, dialect) next(reader) # Skip header", "data #### def update(self, media_data, tags=None): \"\"\"Create/update the internal data", "self._relpath2index[relpath] = index for key in MediaData._fields: self._data[key].append(None) for tag", "def has_media(self, relpath): \"\"\"Returns True if the media data is", "column matches \" \"the media paths.\") return False, msg else:", "&= x <= term.end return result def _check_date_range(x, term): result", "%r)' % (self.name, self.type) def _default_default(self): map = {\"string\": \"\",", "media return media def remove(self, relpaths): \"\"\"Given a list of", "the project. This will not copy any of the processor", "get_media_data from .directory import Directory from . import processor logger", "getter to get the attribute check if the media matches", "from: %s', fname) has_header, header, dialect = _get_csv_headers(fname) if not", "be the first time when self._tag_data is accessed, when #", "_replace_with_last_record(self, index, last): _data = self._data _tag_data = self._tag_data for", "\"int\", \"float\", \"bool\") default = Any def __repr__(self): return 'TagInfo(%r,", "extensions = List(Str) processors = List(processor.FactoryBase) number_of_files = Long #", "old, new): index = self._relpath2index[obj.relpath] for tag in new.changed: self._tag_data[tag][index]", "saved files. \"\"\" def _rewrite_dir(state): \"Rewrite directories in the old", "relpath in self._relpath2index def keys(self): \"\"\"Return all the keys for", "\"float\": 0.0, \"bool\": False} return map[self.type] def open_file(fname_or_file, mode='rb'): if", "expr.children(): result &= _search_media(child, m_key, get_tag) if not result: break", "tree or set the number of files. It simply cleans", "not copy the data but only the tags, extensions and", "tags = List(TagInfo) _media = Dict(Str, Media) extensions = List(Str)", "removed: del self._tag_data[tag.name] n_entries = len(self._relpath2index) for tag in added:", "if not result: break return result elif isinstance(expr, query.Or): result", "remove(self, relpaths): \"\"\"Given a list of relative path of some", "tags: dict \"\"\" relpath = media_data.relpath if not self.has_media(relpath): index", "q): \"\"\"A generator which yields the (filename, relpath) for each", "return p # #### CRUD interface to the data ####", "in self.tags)) return result def _make_schema(self): from whoosh.fields import BOOLEAN,", "len(self.name) > 0: fname = sanitize_name(self.name) + '.vxn' d =", "info from opened file object. \"\"\" if fp is None:", "dict( version=2, path=self.path, name=self.name, description=self.description, tags=tags, media_data=self._data, tag_data=self._tag_data, relpath2index=self._relpath2index, root=root,", "ctime=DATETIME, size=INT ) type_to_field = dict( string=TEXT, text=TEXT, int=INT, float=FLOAT,", "term.enddate is not None and result: result &= x <=", "encoding='utf-8') as fp: sample += fp.readline() + fp.readline() return sample", "import (abspath, basename, dirname, exists, expanduser, join, realpath, relpath, splitext)", "data and tags. Parameters ---------- f: vixen.directory.File instance tags: dict", ".directory import Directory from . import processor logger = logging.getLogger(__name__)", "= list(sorted(cols)) data_cols = set([x for x in cols if", "media.on_trait_change(self._media_tag_handler, 'tags_items') self._media[relpath] = media return media def remove(self, relpaths):", "self._tag_data[col][i] line.append(elem) writer.writerow(line) def import_csv(self, fname): \"\"\"Read tag information from", "_save_as_v1(self, fp): \"\"\"Save copy to specified path. This mainly exists", "Str) from whoosh import fields, qparser, query from whoosh.util.times import", "COMMON_TAGS = dict( file_name='string', path='string', relpath='string', ctime='string', mtime='string', size='int', type='string'", "raise IOError(\"No valid save file set.\") def save_as(self, fp): \"\"\"Save", "media info to a file object \"\"\" if len(self.save_file) >", "term.end return result def _check_date_range(x, term): result = True if", "fname : str Input filename. \"\"\" logger.info('Importing tags from: %s',", "imported tags and make sure you \" \"save the project.\")", "name = Str description = Str path = Str root", "[processor.load(x) for x in data.get('processors', [])] version = data.get('version') if", "= Project(name=name) traits = ['description', 'extensions', 'path', 'processors', 'tags'] p.copy_traits(self,", "in tags.items(): data = record[header_index] try: value = type_map[tag.type](data) if", "return fname_or_file else: return open(fname_or_file, mode) def sanitize_name(name): name =", "rpath in list(relpath2index.keys()): fname = os.path.join(root_path, rpath) if not os.path.exists(fname):", "query.NumericRange): attr = expr.fieldname return _check_range(get_tag(m_key, attr), expr) else: print(\"Unsupported", "fp is None: if not exists(self.save_file): return fp = open_file(self.save_file,", "q) return tag_types = self._get_tag_types() _cleanup_query(parsed_q, tag_types) for key, index", "_name_changed(self, name): if len(name) > 0: old_save_file = self.save_file old_dir", "open_file(fp, 'wb') media = [(key, self.get(key).to_dict()) for key in self._relpath2index]", "self._data: return self._data[attr][index] elif attr in self._tag_data: return self._tag_data[attr][index] #", "self._data = data['media_data'] self._tag_data = data['tag_data'] self._relpath2index = data['relpath2index'] root", "root = self.root.__getstate__() processors = [processor.dump(x) for x in self.processors]", "for tag in added: self._tag_data[tag.name] = [tag.default]*n_entries # The above", "[] added = [] for tag in new_tags: if tag.name", "only the tags, extensions and the other settings of the", "total = 0 with io.open(fname, 'r', newline='', encoding='utf-8') as fp:", "import Media, MediaData, get_media_data from .directory import Directory from .", "new.changed: self._tag_data[tag][index] = obj.tags[tag] def _read_version1_media(self, media): data = self.__data_default()", "of the processor states but only their settings. \"\"\" name", "print(\"Unsupported term: %r\" % expr) return False class Project(HasTraits): name", "\"\"\"Save current media info to a file object \"\"\" if", "file_name=TEXT, path=TEXT, mtime=DATETIME, ctime=DATETIME, size=INT ) type_to_field = dict( string=TEXT,", "if exists(fname): base, ext = splitext(basename(fname)) return join(dirname(fname), base +", "self.path) index = self._relpath2index.get(rpath, None) media = self._media.get(rpath) if index", "x in self._data]) with io.open(fname, 'w', newline='', encoding='utf-8') as of:", "message if any. Note that this only applies tags for", "term.end is not None and result: if term.endexcl: result &=", "in expr.children(): result |= _search_media(child, m_key, get_tag) if result: break", "+ '.vxn') if new_save_file != old_save_file: self.save_file = new_save_file if", "given CSV filename. Returns the success status and the error", "for x in relpaths] for relpath, index in sorted(indices, reverse=True):", "= csv.reader(fp, dialect) header = next(reader) return has_header, header, dialect", "the project.\") return True, msg def load(self, fp=None): \"\"\"Load media", "result.update(dict((t.name, t.type) for t in self.tags)) return result def _make_schema(self):", "self.root.extensions = ext def _extensions_items_changed(self): if self.root is not None:", "dict( version=1, path=self.path, name=self.name, description=self.description, tags=tags, media=media, root=root, processors=processors )", "for index, (key, m) in enumerate(media): relpath2index[key] = index tags", "term.startexcl: result &= x > term.start else: result &= x", "0 with io.open(fname, 'r', newline='', encoding='utf-8') as fp: reader =", "data['media_data'] self._tag_data = data['tag_data'] self._relpath2index = data['relpath2index'] root = Directory()", "Exception: logger.warn(\"Invalid search expression: %s\", q) print(\"Invalid search expression: %s\"", "ext) else: return fname COMMON_TAGS = dict( file_name='string', path='string', relpath='string',", "CSV: %s', fname) all_keys = ((set(MediaData._fields) | set(self._tag_data.keys())) - set(('ctime_',", "term.start if term.end is not None and result: if term.endexcl:", "open_file(fp, 'rb') data = json_tricks.load( fp, preserve_order=False, ignore_comments=False ) fp.close()", "encoding='utf-8') as fp: reader = csv.reader(fp, dialect) next(reader) # Skip", "for x in cols if x in self._data]) with io.open(fname,", "`cols` are not specified, it writes out all the useful", "column headers with known tags. Unknown tags are not added.", "return re.sub(r'\\W+', '', name) def get_non_existing_filename(fname): if exists(fname): base, ext", "= self._media.get(relpath) if media is not None: media.update(media_data, tags) def", "\"\"\" fp = open_file(fp, 'wb') tags = [(t.name, t.type) for", "= self._make_schema() qp = qparser.QueryParser('path', schema=schema) qp.add_plugin(qparser.GtLtPlugin()) from whoosh.qparser.dateparse import", "update(self, media_data, tags=None): \"\"\"Create/update the internal data given the media", "[TagInfo(name='completed', type='bool')] def _save_file_default(self): if len(self.name) > 0: fname =", "for record in reader: total += 1 path = record[path_idx]", "expr) return False else: if isinstance(expr, query.And): result = True", "result = False for child in expr.children(): result |= _search_media(child,", "index, relpath): for key in MediaData._fields: del self._data[key][index] for key", "this project. This does not copy the data but only", "self._data = data self._tag_data = tag_data self._relpath2index = relpath2index def", "= dict( version=1, path=self.path, name=self.name, description=self.description, tags=tags, media=media, root=root, processors=processors", "dialect class TagInfo(HasTraits): name = Str type = Enum(\"string\", \"text\",", "a 'path' column.\" return False, msg tags = {x: header.index(x.name)", "project: %s', self.name) root_path = self.path to_remove = [] relpath2index", "= dict(COMMON_TAGS) result.update(dict((t.name, t.type) for t in self.tags)) return result", "= value media = self._media.get(relpath) if media is not None:", "Dict _relpath2index = Dict() _query_parser = Instance(qparser.QueryParser) def add_tags(self, tags):", "for child in expr.children(): result |= _search_media(child, m_key, get_tag) if", "any. Note that this only applies tags for column headers", "(filename, relpath) for each file satisfying the search query. \"\"\"", "= sanitize_name(self.name) + '.vxn' d = get_project_dir() return get_non_existing_filename(join(d, fname))", "if isinstance(expr, query.And): result = True for child in expr.children():", "data = get_media_data(f.path, f.relpath) self.update(data) for d in dir.directories: if", "get_file_saved_time(path): dt = datetime.datetime.fromtimestamp(os.stat(path).st_ctime) return dt.ctime() def _get_sample(fname): sample =", "self._update_last_save_time() else: raise IOError(\"No valid save file set.\") def save_as(self,", "data.get('version') if version == 1: self._read_version1_media(data['media']) else: self._data = data['media_data']", "last) self._delete_record(last, relpath) def has_media(self, relpath): \"\"\"Returns True if the", "[] relpath2index = self._relpath2index for rpath in list(relpath2index.keys()): fname =", "0: self.save_as(self.save_file) self._update_last_save_time() else: raise IOError(\"No valid save file set.\")", "index tags = m.pop('tags') for tname, v in tags.items(): tag_data[tname].append(v)", "in old_tags) removed = [] added = [] for tag", "all_keys = ((set(MediaData._fields) | set(self._tag_data.keys())) - set(('ctime_', 'mtime_'))) if cols", "from the processors for proc in p.processors: proc._done.clear() return p", "type_to_field[tag.type] return Schema(**kw) def _make_query_parser(self): schema = self._make_schema() qp =", "if not self.has_media(relpath): index = len(self._relpath2index) self._relpath2index[relpath] = index for", "data.get('processors', [])] version = data.get('version') if version == 1: self._read_version1_media(data['media'])", "tag, header_index in tags.items(): data = record[header_index] try: value =", "if len(self.save_file) > 0: self.save_as(self.save_file) self._update_last_save_time() else: raise IOError(\"No valid", "mtime='string', size='int', type='string' ) def _cleanup_query(q, tag_types): type_map = dict(float=FLOAT.from_bytes,", "len(self._relpath2index) self._relpath2index[relpath] = index for key in MediaData._fields: self._data[key].append(None) for", "rpath = relpath(path, self.path) index = self._relpath2index.get(rpath, None) media =", "\"\"\"Make a copy of this project. This does not copy", "root directory. This will not clobber existing records but will", "does not copy the data but only the tags, extensions", "msg = \"The CSV file does not have a 'path'", "<= term.end return result def _check_date_range(x, term): result = True", "in added: self._tag_data[tag.name] = [tag.default]*n_entries # The above can be", "= ('1', 't', 'true', 'y', 'yes') type_map = { 'bool':", "\"\"\"Given a list of relative path of some media, remove", "sample def _get_csv_headers(fname): sample = _get_sample(fname) sniffer = csv.Sniffer() has_header", "This will not clobber existing records but will add any", "k, m in media: m['_ctime'] = long_to_datetime(m['_ctime']) m['_mtime'] = long_to_datetime(m['_mtime'])", "paths out of %d entries.\" % (count, total) if count", "expr in value.lower() else: return expr == value def _check_range(x,", "and the error message if any. Note that this only", "data[key] = self._data[key][index] tags = {} for key in self._tag_data:", "if exists(self.save_file): return get_file_saved_time(self.save_file) else: return '' def _name_changed(self, name):", "dialect) header = next(reader) return has_header, header, dialect class TagInfo(HasTraits):", "relpath2index = self._relpath2index indices = [(x, relpath2index[x]) for x in", "> 0: self.save_as(self.save_file) self._update_last_save_time() else: raise IOError(\"No valid save file", "= header.index('path') TRUE = ('1', 't', 'true', 'y', 'yes') type_map", "value except ValueError: pass msg = \"Read tags for %d", "(str, unicode, bytes)): fieldtype = tag_types[term.fieldname] if fieldtype in type_map:", "cols: sequence: a sequence of columns to write. \"\"\" logger.info('Exporting", "{x: header.index(x.name) for x in self.tags if x.name in header}", "for proc in p.processors: proc._done.clear() return p # #### CRUD", "relpath = media_data.relpath if not self.has_media(relpath): index = len(self._relpath2index) self._relpath2index[relpath]", "directories in the old format.\" state['files'] = [x[0] for x", "new ones. \"\"\" self._setup_root() def _scan(dir): for f in dir.files:", "as fp: reader = csv.reader(fp, dialect) header = next(reader) return", "processor logger = logging.getLogger(__name__) if sys.version_info[0] > 2: unicode =", "cached media for m in self._media.values(): for tag in removed:", "index is not None: count += 1 for tag, header_index", "= root self.number_of_files = len(self._relpath2index) def save(self): \"\"\"Save current media", "dt = datetime.datetime.fromtimestamp(os.stat(path).st_ctime) return dt.ctime() def _get_sample(fname): sample = ''", "not None and result: result &= x <= term.end return", "ctime='string', mtime='string', size='int', type='string' ) def _cleanup_query(q, tag_types): type_map =", "self.root.__getstate__() processors = [processor.dump(x) for x in self.processors] data =", "not in header: msg = \"The CSV file does not", "k, v in m.items(): data[keymap[k]].append(v) if 'file_name' not in m:", "0, \"float\": 0.0, \"bool\": False} return map[self.type] def open_file(fname_or_file, mode='rb'):", "for key in MediaData._fields: data[key] = [] return data def", "d in dir.directories: if refresh: d.refresh() _scan(d) if refresh: self.root.refresh()", "lambda x: x, 'int': int, 'float': float } count =", "project for example. In this case, # self.__tag_data_default is called,", "TEXT, Schema kw = dict( type=TEXT, file_name=TEXT, path=TEXT, mtime=DATETIME, ctime=DATETIME,", "the directory tree or set the number of files. It", "result |= _search_media(child, m_key, get_tag) if result: break return result", "len(self._relpath2index) def save(self): \"\"\"Save current media info to a file", "or realpath(root.path) != realpath(path): self.root = Directory(path=path, extensions=self.extensions) def _tags_default(self):", "= { 'bool': lambda x: x.lower() in TRUE, 'string': lambda", "&= x < term.end else: result &= x <= term.end", "= get_file_saved_time(self.save_file) def _last_save_time_default(self): if exists(self.save_file): return get_file_saved_time(self.save_file) else: return", "MediaData, get_media_data from .directory import Directory from . import processor", "Note that this only applies tags for column headers with", "in header} path_idx = header.index('path') TRUE = ('1', 't', 'true',", "dict(COMMON_TAGS) result.update(dict((t.name, t.type) for t in self.tags)) return result def", "settings. \"\"\" name = self.name + ' copy' p =", "a Media instance. \"\"\" if relpath in self._media: return self._media[relpath]", "return a Media instance. \"\"\" if relpath in self._media: return", "io.open(fname, 'r', newline='', encoding='utf-8') as fp: reader = csv.reader(fp, dialect)", "matches expression. \"\"\" if expr.is_leaf(): if isinstance(expr, query.Term): attr =", "from whoosh import fields, qparser, query from whoosh.util.times import datetime_to_long,", "not specified, it writes out all the useful metadata. Parameters", "MediaData._fields: _data[key][index] = _data[key][last] for key in self._tag_data: _tag_data[key][index] =", "else: print(\"Unsupported term: %r\" % expr) return False else: if", "name=self.name, description=self.description, tags=tags, media=media, root=root, processors=processors ) json_tricks.dump(data, fp, compression=True)", "fp = open_file(fp, 'rb') data = json_tricks.load( fp, preserve_order=False, ignore_comments=False", "x in self.tags if x.name in header} path_idx = header.index('path')", "d.refresh() _scan(d) if refresh: self.root.refresh() _scan(self.root) self.number_of_files = len(self._relpath2index) def", "'file_name' not in m: data['file_name'].append(basename(key)) data['mtime_'] = [datetime_to_long(x) for x", "== last: self._delete_record(last, relpath) else: self._replace_with_last_record(index, last) self._delete_record(last, relpath) def", "if exists(old_save_file): shutil.move(old_save_file, self.save_file) def _extensions_changed(self, ext): if self.root is", "Project(name=name) traits = ['description', 'extensions', 'path', 'processors', 'tags'] p.copy_traits(self, traits,", "data.get('description', '') self.path = data.get('path') self.tags = [TagInfo(name=x[0], type=x[1]) for", "self._get_media_attr): yield basename(key), key def refresh(self): logger.info('Refreshing project: %s', self.name)", "error. So we only # set self.tags below. self.tags =", "= self.name + ' copy' p = Project(name=name) traits =", "removed.append(tag) added.append(tag) for tag in old_tags: if tag.name not in", "# Skip header for record in reader: total += 1", "False} return map[self.type] def open_file(fname_or_file, mode='rb'): if hasattr(fname_or_file, 'read'): return", "path. This mainly exists for testing and making sure we", "can be the first time when self._tag_data is accessed, when", "csv INT = fields.NUMERIC(numtype=int) FLOAT = fields.NUMERIC(numtype=float) def get_file_saved_time(path): dt", "type_map: term.text = type_map[fieldtype](term.text) else: term.text = term.text.lower() elif isinstance(term,", "= True for child in expr.children(): result &= _search_media(child, m_key,", "Media instance. \"\"\" if relpath in self._media: return self._media[relpath] else:", "expr) return False class Project(HasTraits): name = Str description =", "refresh=False): \"\"\"Find all the media recursively inside the root directory.", "encoding='utf-8') as of: # Write the header. writer = csv.writer(of)", "self._relpath2index[relpath] for i, key in enumerate(MediaData._fields): self._data[key][index] = media_data[i] if", "tags=None): \"\"\"Create/update the internal data given the media data and", "# #### End of CRUD interface to the data ####", "if the media data is available. \"\"\" return relpath in", "sorted(indices, reverse=True): last = len(relpath2index) - 1 if index ==", "if self.root is not None: self.root.extensions = self.extensions def _get_tag_types(self):", "get_non_existing_filename(fname): if exists(fname): base, ext = splitext(basename(fname)) return join(dirname(fname), base", "if media is not None: media.update(media_data, tags) def get(self, relpath):", "if expr.is_leaf(): if isinstance(expr, query.Term): attr = expr.fieldname return _check_value(get_tag(m_key,", "def update_tags(self, new_tags): old_tags = self.tags new_tag_names = set(tag.name for", "self._relpath2index[last_relpath] = index def _save_as_v1(self, fp): \"\"\"Save copy to specified", "to the data #### def update(self, media_data, tags=None): \"\"\"Create/update the", "% expr) return False class Project(HasTraits): name = Str description", "\"\"\"Create/update the internal data given the media data and tags.", "string=TEXT, text=TEXT, int=INT, float=FLOAT, bool=BOOLEAN ) for tag in self.tags:", "result: break return result elif isinstance(expr, query.Or): result = False", "name) def get_non_existing_filename(fname): if exists(fname): base, ext = splitext(basename(fname)) return", "basename, dirname, exists, expanduser, join, realpath, relpath, splitext) import re", "Str path = Str root = Instance(Directory) tags = List(TagInfo)", "header, dialect = _get_csv_headers(fname) if not has_header: return False, \"The", "isinstance(expr, query.Or): result = False for child in expr.children(): result", "add any new ones. \"\"\" self._setup_root() def _scan(dir): for f", "dialect = sniffer.sniff(sample) with io.open(fname, 'r', newline='', encoding='utf-8') as fp:", "f.relpath) self.update(data) for d in dir.directories: if refresh: d.refresh() _scan(d)", "+= 1 for tag, header_index in tags.items(): data = record[header_index]", "all the media recursively inside the root directory. This will", "'float': float } count = 0 total = 0 with", "get_media_data(f.path, f.relpath) self.update(data) for d in dir.directories: if refresh: d.refresh()", "return not _search_media(subquery, m_key, get_tag) else: print(\"Unsupported term: %r\" %", "type=x[1]) for x in data['tags']] self.processors = [processor.load(x) for x", "files. This does not refresh the directory tree or set", "_check_date_range(x, term): result = True if term.startdate is not None:", "self._read_version1_media(data['media']) else: self._data = data['media_data'] self._tag_data = data['tag_data'] self._relpath2index =", "media.update(media_data, tags) def get(self, relpath): \"\"\"Given the relative path of", "the error message if any. Note that this only applies", "for i, key in enumerate(MediaData._fields): self._data[key][index] = media_data[i] if tags:", ") fp.close() self.name = data.get('name', '') self.description = data.get('description', '')", "elif isinstance(expr, query.Phrase): attr = expr.fieldname text = \" \".join(expr.words)", "media = self._media.get(relpath) if media is not None: media.update(media_data, tags)", "MediaData._fields: del self._data[key][index] for key in self._tag_data: del self._tag_data[key][index] if", "else: print(\"Unsupported term: %r\" % expr) return False class Project(HasTraits):", "def _save_as_v1(self, fp): \"\"\"Save copy to specified path. This mainly", "return result def _search_media(expr, m_key, get_tag): \"\"\"Given search expression, index", "else: term.text = term.text.lower() elif isinstance(term, query.Phrase): term.words = [x.lower()", "check that your path column matches \" \"the media paths.\")", "load(self, fp=None): \"\"\"Load media info from opened file object. \"\"\"", "= type_to_field[tag.type] return Schema(**kw) def _make_query_parser(self): schema = self._make_schema() qp", "[_rewrite_dir(d) for d in state['directories']] state.pop('relpath') state.pop('name') return state fp", "header, dialect class TagInfo(HasTraits): name = Str type = Enum(\"string\",", "Skip header for record in reader: total += 1 path", "m.tags[tag.name] = tag.default self._query_parser = self._make_query_parser() def copy(self): \"\"\"Make a", "project. This will not copy any of the processor states", "interface to the data #### def clean(self): \"\"\"Scan the project", "self._tag_data[key][index] = value media = self._media.get(relpath) if media is not", "new_tags: if tag.name not in tag_info: added.append(tag) elif tag_info[tag.name] !=", "data['tags']] self.processors = [processor.load(x) for x in data.get('processors', [])] version", "in term.words] def _check_value(value, expr): if isinstance(expr, string_types): return expr", "# self.__tag_data_default is called, so if self.tags is set then", "= type_map[tag.type](data) if media is not None: media.tags[tag.name] = value", "not have a 'path' column.\" return False, msg tags =", "root = self.root if root is None or realpath(root.path) !=", "fp, compression=True) fp.close() logger.info('Saved project: %s', self.name) def scan(self, refresh=False):", "attr), expr) else: print(\"Unsupported term: %r\" % expr) return False", "if cols is None: cols = all_keys cols = list(sorted(cols))", "Parameters ---------- fname : str Input filename. \"\"\" logger.info('Importing tags", "['description', 'extensions', 'path', 'processors', 'tags'] p.copy_traits(self, traits, copy='deep') # Clear", "states but only their settings. \"\"\" name = self.name +", "_scan(self.root) self.number_of_files = len(self._relpath2index) def search(self, q): \"\"\"A generator which", "tags.items(): data = record[header_index] try: value = type_map[tag.type](data) if media", "else: msg += (\"\\nPlease check the imported tags and make", ">= term.start if term.enddate is not None and result: result", "string_types): return expr in value.lower() else: return expr == value", "[datetime_to_long(x) for x in data['mtime_']] data['ctime_'] = [datetime_to_long(x) for x", "= fields.NUMERIC(numtype=int) FLOAT = fields.NUMERIC(numtype=float) def get_file_saved_time(path): dt = datetime.datetime.fromtimestamp(os.stat(path).st_ctime)", "where the project data is saved. save_file = Str last_save_time", "sure we still read the old saved files. \"\"\" def", "from .media import Media, MediaData, get_media_data from .directory import Directory", "relative path of some media, remove them from the database.", "relative path of some media, return a Media instance. \"\"\"", "sniffer = csv.Sniffer() has_header = sniffer.has_header(sample) dialect = sniffer.sniff(sample) with", "is None or realpath(root.path) != realpath(path): self.root = Directory(path=path, extensions=self.extensions)", "not has_header: return False, \"The CSV file does not appear", "def _extensions_items_changed(self): if self.root is not None: self.root.extensions = self.extensions", "ext): if self.root is not None: self.root.extensions = ext def", "= next(reader) return has_header, header, dialect class TagInfo(HasTraits): name =", "tag in added: self._tag_data[tag.name] = [tag.default]*n_entries # The above can", "= open_file(self.save_file, 'rb') else: fp = open_file(fp, 'rb') data =", "interface to the data #### def update(self, media_data, tags=None): \"\"\"Create/update", "media data is available. \"\"\" return relpath in self._relpath2index def", "traits = ['description', 'extensions', 'path', 'processors', 'tags'] p.copy_traits(self, traits, copy='deep')", "= [] for col in cols: if col in data_cols:", "csv else: string_types = (basestring,) import backports.csv as csv INT", "= True if term.startdate is not None: result &= x", "only their settings. \"\"\" name = self.name + ' copy'", "tag in new.changed: self._tag_data[tag][index] = obj.tags[tag] def _read_version1_media(self, media): data", "data['relpath2index'] root = Directory() root.__setstate__(data.get('root')) self.extensions = root.extensions self.root =", "mainly exists for testing and making sure we still read", "\"bool\") default = Any def __repr__(self): return 'TagInfo(%r, %r)' %", "newline='', encoding='utf-8') as fp: sample += fp.readline() + fp.readline() return", "all_keys cols = list(sorted(cols)) data_cols = set([x for x in", "for x in state['files']] state['directories'] = [_rewrite_dir(d) for d in", "= media_data.relpath if not self.has_media(relpath): index = len(self._relpath2index) self._relpath2index[relpath] =", "entries.\" % (count, total) if count == 0 and total", "json_tricks import logging import os from os.path import (abspath, basename,", "CRUD interface to the data #### def update(self, media_data, tags=None):", "hasattr(fname_or_file, 'read'): return fname_or_file else: return open(fname_or_file, mode) def sanitize_name(name):", "self._query_parser.parse(q) except Exception: logger.warn(\"Invalid search expression: %s\", q) print(\"Invalid search", "get_file_saved_time(self.save_file) def _last_save_time_default(self): if exists(self.save_file): return get_file_saved_time(self.save_file) else: return ''", "self.save_file) def _extensions_changed(self, ext): if self.root is not None: self.root.extensions", "x > term.start else: result &= x >= term.start if", "the media, return its value. \"\"\" if attr in self._data:", "return result elif isinstance(expr, query.Or): result = False for child", "Str root = Instance(Directory) tags = List(TagInfo) _media = Dict(Str,", "result &= x <= term.end return result def _check_date_range(x, term):", "= [TagInfo(name=x[0], type=x[1]) for x in data['tags']] self.processors = [processor.load(x)", "None: self.root.extensions = ext def _extensions_items_changed(self): if self.root is not", "not added. Parameters ---------- fname : str Input filename. \"\"\"", ") json_tricks.dump(data, fp, compression=True) fp.close() logger.info('Saved project: %s', self.name) def", "list(expr.children())[0] return not _search_media(subquery, m_key, get_tag) else: print(\"Unsupported term: %r\"", "< term.end else: result &= x <= term.end return result", "last = len(relpath2index) - 1 if index == last: self._delete_record(last,", "the relative path of some media, return a Media instance.", "_search_media(child, m_key, get_tag) if not result: break return result elif", "= index tags = m.pop('tags') for tname, v in tags.items():", "def add_tags(self, tags): tags = list(self.tags) + tags self.update_tags(tags) def", "!= realpath(path): self.root = Directory(path=path, extensions=self.extensions) def _tags_default(self): return [TagInfo(name='completed',", "relative paths.\"\"\" return self._relpath2index.keys() def _get_media_attr(self, index, attr): \"\"\"Given an", "\"The CSV file does not have a 'path' column.\" return", "dir.directories: if refresh: d.refresh() _scan(d) if refresh: self.root.refresh() _scan(self.root) self.number_of_files", "extensions=self.extensions) def _tags_default(self): return [TagInfo(name='completed', type='bool')] def _save_file_default(self): if len(self.name)", "\"int\": 0, \"float\": 0.0, \"bool\": False} return map[self.type] def open_file(fname_or_file,", "\"\"\"A generator which yields the (filename, relpath) for each file", "in type_map: term.text = type_map[fieldtype](term.text) else: term.text = term.text.lower() elif", "try: parsed_q = self._query_parser.parse(q) except Exception: logger.warn(\"Invalid search expression: %s\",", "\"\"\" relpath2index = self._relpath2index indices = [(x, relpath2index[x]) for x", "def remove(self, relpaths): \"\"\"Given a list of relative path of", "% (count, total) if count == 0 and total >", "fp=None): \"\"\"Load media info from opened file object. \"\"\" if", "fp): \"\"\"Save copy to specified path. \"\"\" fp = open_file(fp,", "in m: data['file_name'].append(basename(key)) data['mtime_'] = [datetime_to_long(x) for x in data['mtime_']]", "directory tree or set the number of files. It simply", "get(self, relpath): \"\"\"Given the relative path of some media, return", "= Any def __repr__(self): return 'TagInfo(%r, %r)' % (self.name, self.type)", "from os.path import (abspath, basename, dirname, exists, expanduser, join, realpath,", "writer.writerow(line) def import_csv(self, fname): \"\"\"Read tag information from given CSV", "only # set self.tags below. self.tags = new_tags # Update", "expr.fieldname return _check_range(get_tag(m_key, attr), expr) else: print(\"Unsupported term: %r\" %", "if tags: for key, value in tags.items(): self._tag_data[key][index] = value", "state['files'] = [x[0] for x in state['files']] state['directories'] = [_rewrite_dir(d)", "refresh: self.root.refresh() _scan(self.root) self.number_of_files = len(self._relpath2index) def search(self, q): \"\"\"A", "metadata. Parameters ----------- fname: str: a path to the csv", "new_save_file != old_save_file: self.save_file = new_save_file if exists(old_save_file): shutil.move(old_save_file, self.save_file)", "in MediaData._fields: self._data[key].append(None) for tag in self.tags: self._tag_data[tag.name].append(tag.default) index =", "# The above can be the first time when self._tag_data", "set self.tags below. self.tags = new_tags # Update the cached", "\"\", \"int\": 0, \"float\": 0.0, \"bool\": False} return map[self.type] def", "self._replace_with_last_record(index, last) self._delete_record(last, relpath) def has_media(self, relpath): \"\"\"Returns True if", "= data['tag_data'] self._relpath2index = data['relpath2index'] root = Directory() root.__setstate__(data.get('root')) self.extensions", "= csv.reader(fp, dialect) next(reader) # Skip header for record in", "is not None and result: if term.endexcl: result &= x", "{} for key in MediaData._fields: data[key] = [] return data", "newline='', encoding='utf-8') as of: # Write the header. writer =", "key in self._relpath2index] tags = [(t.name, t.type) for t in", "import get_project_dir from .media import Media, MediaData, get_media_data from .directory", "in value.lower() else: return expr == value def _check_range(x, term):", "_search_media(parsed_q, index, self._get_media_attr): yield basename(key), key def refresh(self): logger.info('Refreshing project:", "the first time when self._tag_data is accessed, when # creating", "writer.writerow(cols) for i in range(len(self._relpath2index)): line = [] for col", "[x.lower() for x in term.words] def _check_value(value, expr): if isinstance(expr,", "_get_tag_types(self): result = dict(COMMON_TAGS) result.update(dict((t.name, t.type) for t in self.tags))", "0: old_save_file = self.save_file old_dir = dirname(old_save_file) new_save_file = join(old_dir,", "msg += (\"\\nPlease check the imported tags and make sure", "dump. cols: sequence: a sequence of columns to write. \"\"\"", "have a header.\" if 'path' not in header: msg =", "is not None: if term.startexcl: result &= x > term.start", "} count = 0 total = 0 with io.open(fname, 'r',", "and the other settings of the project. This will not", "state['files']] state['directories'] = [_rewrite_dir(d) for d in state['directories']] state.pop('relpath') state.pop('name')", "in added: m.tags[tag.name] = tag.default self._query_parser = self._make_query_parser() def copy(self):", "for f in dir.files: if not self.has_media(f.relpath) or refresh: data", "_data[key][index] = _data[key][last] for key in self._tag_data: _tag_data[key][index] = _tag_data[key][last]", "its value. \"\"\" if attr in self._data: return self._data[attr][index] elif", "elif isinstance(expr, query.NumericRange): attr = expr.fieldname return _check_range(get_tag(m_key, attr), expr)", "= self._tag_data[col][i] line.append(elem) writer.writerow(line) def import_csv(self, fname): \"\"\"Read tag information", "are not specified, it writes out all the useful metadata.", "= relpath(path, self.path) index = self._relpath2index.get(rpath, None) media = self._media.get(rpath)", "a header.\" if 'path' not in header: msg = \"The", "sequence: a sequence of columns to write. \"\"\" logger.info('Exporting CSV:", "tag in removed: del m.tags[tag.name] for tag in added: m.tags[tag.name]", "self.remove(to_remove) def export_csv(self, fname, cols=None): \"\"\"Export metadata to a csv", "if x in self._data]) with io.open(fname, 'w', newline='', encoding='utf-8') as", "> 2: unicode = str string_types = (str,) import csv", "\" \"the media paths.\") return False, msg else: msg +=", "def _name_changed(self, name): if len(name) > 0: old_save_file = self.save_file", "media info from opened file object. \"\"\" if fp is", "else: result &= x >= term.start if term.end is not", "= ext def _extensions_items_changed(self): if self.root is not None: self.root.extensions", "in self._data: return self._data[attr][index] elif attr in self._tag_data: return self._tag_data[attr][index]", "writer = csv.writer(of) writer.writerow(cols) for i in range(len(self._relpath2index)): line =", "'rb') else: fp = open_file(fp, 'rb') data = json_tricks.load( fp,", ". import processor logger = logging.getLogger(__name__) if sys.version_info[0] > 2:", "'rb') data = json_tricks.load( fp, preserve_order=False, ignore_comments=False ) fp.close() self.name", "information from the processors for proc in p.processors: proc._done.clear() return", "return relpath in self._relpath2index def keys(self): \"\"\"Return all the keys", "if isinstance(expr, query.Term): attr = expr.fieldname return _check_value(get_tag(m_key, attr), expr.text)", "self._data[key][index] for key in self._tag_data: del self._tag_data[key][index] if relpath in", "The above can be the first time when self._tag_data is", "relpath in self._media: del self._media[relpath] del self._relpath2index[relpath] def _replace_with_last_record(self, index,", "given the media data and tags. Parameters ---------- f: vixen.directory.File", "self._relpath2index indices = [(x, relpath2index[x]) for x in relpaths] for", "whoosh import fields, qparser, query from whoosh.util.times import datetime_to_long, long_to_datetime", "= self._relpath2index[relpath] for key in MediaData._fields: data[key] = self._data[key][index] tags", "tag_data[tname].append(v) for k, v in m.items(): data[keymap[k]].append(v) if 'file_name' not", "the media recursively inside the root directory. This will not", "testing and making sure we still read the old saved", "index in sorted(indices, reverse=True): last = len(relpath2index) - 1 if", "dt.ctime() def _get_sample(fname): sample = '' with io.open(fname, 'r', newline='',", "return fname COMMON_TAGS = dict( file_name='string', path='string', relpath='string', ctime='string', mtime='string',", "is not None: result &= x >= term.start if term.enddate", "matches \" \"the media paths.\") return False, msg else: msg", "################################################ def _setup_root(self): path = abspath(expanduser(self.path)) root = self.root if", "= self._relpath2index[obj.relpath] for tag in new.changed: self._tag_data[tag][index] = obj.tags[tag] def", "get_tag(m_key, 'ctime_') elif expr.fieldname == 'mtime': value = get_tag(m_key, 'mtime_')", "= get_project_dir() return get_non_existing_filename(join(d, fname)) else: return '' def _update_last_save_time(self):", "save_as(self, fp): \"\"\"Save copy to specified path. \"\"\" fp =", "else: return fname COMMON_TAGS = dict( file_name='string', path='string', relpath='string', ctime='string',", "name = self.name + ' copy' p = Project(name=name) traits", "in removed: del m.tags[tag.name] for tag in added: m.tags[tag.name] =", "= self._tag_data[key][index] media = Media.from_data(MediaData(**data), tags) media.on_trait_change(self._media_tag_handler, 'tags_items') self._media[relpath] =", "_extensions_changed(self, ext): if self.root is not None: self.root.extensions = ext", "dict((tag.name, tag.type) for tag in old_tags) removed = [] added", "d in state['directories']] state.pop('relpath') state.pop('name') return state fp = open_file(fp,", "first time when self._tag_data is accessed, when # creating a", "in cols: if col in data_cols: elem = self._data[col][i] else:", "name.lower() name = re.sub(r'\\s+', '_', name) return re.sub(r'\\W+', '', name)", "self.name) self.clean() self.scan(refresh=True) # #### Private protocol ################################################ def _setup_root(self):", "self.path to_remove = [] relpath2index = self._relpath2index for rpath in", "self._relpath2index = relpath2index def _delete_record(self, index, relpath): for key in", "check if the media matches expression. \"\"\" if expr.is_leaf(): if", "[tag.default]*n_entries # The above can be the first time when", "from whoosh.util.times import datetime_to_long, long_to_datetime from .common import get_project_dir from", "Long, Str) from whoosh import fields, qparser, query from whoosh.util.times", "_data = Dict _tag_data = Dict _relpath2index = Dict() _query_parser", "self.scan(refresh=True) # #### Private protocol ################################################ def _setup_root(self): path =", "a new project for example. In this case, # self.__tag_data_default", "'path' column.\" return False, msg tags = {x: header.index(x.name) for", "cols=None): \"\"\"Export metadata to a csv file. If `cols` are", "else: self._tag_data[tag.name][index] = value except ValueError: pass msg = \"Read", "= splitext(basename(fname)) return join(dirname(fname), base + '_a' + ext) else:", "for tname, v in tags.items(): tag_data[tname].append(v) for k, v in", "_tag_data[key][index] = _tag_data[key][last] last_relpath = _data['relpath'][last] self._relpath2index[last_relpath] = index def", "m['_ctime'] = long_to_datetime(m['_ctime']) m['_mtime'] = long_to_datetime(m['_mtime']) data = dict( version=1,", "= Dict() _query_parser = Instance(qparser.QueryParser) def add_tags(self, tags): tags =", "data is saved. save_file = Str last_save_time = Str _data", "\"\"\"Export metadata to a csv file. If `cols` are not", "path = abspath(expanduser(self.path)) root = self.root if root is None", "in new_tags: if tag.name not in tag_info: added.append(tag) elif tag_info[tag.name]", "Update the cached media for m in self._media.values(): for tag", "self.tags: self._tag_data[tag.name].append(tag.default) index = self._relpath2index[relpath] for i, key in enumerate(MediaData._fields):", "old_save_file: self.save_file = new_save_file if exists(old_save_file): shutil.move(old_save_file, self.save_file) def _extensions_changed(self,", "except ValueError: pass msg = \"Read tags for %d paths", "path = record[path_idx] rpath = relpath(path, self.path) index = self._relpath2index.get(rpath,", "term.words = [x.lower() for x in term.words] def _check_value(value, expr):", "type_map[tag.type](data) if media is not None: media.tags[tag.name] = value else:", "self._tag_data is accessed, when # creating a new project for", "def _scan(dir): for f in dir.files: if not self.has_media(f.relpath) or", "return get_non_existing_filename(join(d, fname)) else: return '' def _update_last_save_time(self): self.last_save_time =", "__data_default(self): data = {} for key in MediaData._fields: data[key] =", "from . import processor logger = logging.getLogger(__name__) if sys.version_info[0] >", "data['mtime_']] data['ctime_'] = [datetime_to_long(x) for x in data['ctime_']] self._data =", "the other settings of the project. This will not copy", "for tag in new_tags) tag_info = dict((tag.name, tag.type) for tag", "in MediaData._fields: _data[key][index] = _data[key][last] for key in self._tag_data: _tag_data[key][index]", "self._relpath2index = data['relpath2index'] root = Directory() root.__setstate__(data.get('root')) self.extensions = root.extensions", "file to dump. cols: sequence: a sequence of columns to", "self.save_file old_dir = dirname(old_save_file) new_save_file = join(old_dir, sanitize_name(name) + '.vxn')" ]
[ "try: value = path_get(value, obj_path) except (KeyError, IndexError, TypeError) as", "'<NAME>' __copyright__ = 'Copyright © 2021 <NAME>' __license__ = 'MIT'", "__license__ = 'MIT' __all__ = () import prance.util.url as _url", "prance.util.path import path_get try: value = path_get(value, obj_path) except (KeyError,", "= 'MIT' __all__ = () import prance.util.url as _url def", "made. If a URL is given, it is used as", "translator.\"\"\" __author__ = '<NAME>' __copyright__ = 'Copyright © 2021 <NAME>'", "whole document, adding the referenced object from external files to", "'_' + '_'.join(item_path[1:]) def _local_ref(path): url = '#/' + '/'.join(path)", "# In this inner parser's specification, we can now look", "specs, url): \"\"\" Construct a JSON reference translator. The translated", "except (KeyError, IndexError, TypeError) as ex: raise _url.ResolutionError('Cannot resolve reference", "else: # Reference to a non-root document. ref_key = _reference_key(ref_url,", "obj_path) except (KeyError, IndexError, TypeError) as ex: raise _url.ResolutionError('Cannot resolve", "# to read and parse it, of course. contents =", "url): \"\"\" Construct a JSON reference translator. The translated specs", "some time for the public API to be stabilized. class", "a JSON reference translator.\"\"\" __author__ = '<NAME>' __copyright__ = 'Copyright", "the translation. Traverses over the whole document, adding the referenced", "as ex: raise _url.ResolutionError('Cannot resolve reference \"%s\": %s' % (ref_url.geturl(),", "over the whole document, adding the referenced object from external", "resource. :param tuple recursions: A recursion stack for resolving references.", "from # creating a new resolver for this url. self.__reference_cache[url_key]", "{} if 'schemas' not in self.specs['components']: self.specs['components'].update({'schemas': {}}) self.specs['components']['schemas'].update(self.__collected_references) def", "value = self._translate_partial(ref_url, value) # That's it! return value def", "partial): changes = dict(tuple(self._translating_iterator(base_url, partial, ()))) paths = sorted(changes.keys(), key", "= path_get(value, obj_path) except (KeyError, IndexError, TypeError) as ex: raise", "# In order to start dereferencing anything in the referenced", "= len) from prance.util.path import path_set for path in paths:", "\"\"\" Resolve JSON pointers/references in a spec by translation. References", "# Now resolve partial specs value = self._translate_partial(ref_url, value) #", "for the public API to be stabilized. class _RefTranslator: \"\"\"", "resolver for this url. self.__reference_cache[url_key] = self.specs else: self.url =", "= '#/' + '/'.join(path) return {'$ref': url} # Underscored to", "_url.ResolutionError('Cannot resolve reference \"%s\": %s' % (ref_url.geturl(), str(ex))) # Deep", "to create recursive structures import copy value = copy.deepcopy(value) #", "the absolute URL of relative file references. :param dict specs:", "ref_url: The URL at which the reference is located. :param", "prance.util.path import path_set for path in paths: value = changes[path]", "/components/schemas object of the root document, while being translated to", "# If we have a url, we want to add", "prance.util.url as _url def _reference_key(ref_url, item_path): \"\"\" Return a portion", "the /components/schemas object of the root document, while being translated", "to point to the the new object locations. \"\"\" def", "the public API to be stabilized. class _RefTranslator: \"\"\" Resolve", "a URL is given, it is used as a base", "ref_path = ['components', 'schemas', ref_key] ref_obj = _local_ref(ref_path) yield full_path,", "the reference cache # - that creates a reference loop,", "self.specs) # Add collected references to the root document. if", "resolve reference \"%s\": %s' % (ref_url.geturl(), str(ex))) # Deep copy", "If a URL is given, it is used as a", "dict(tuple(self._translating_iterator(base_url, partial, ()))) paths = sorted(changes.keys(), key = len) from", "resolve partial specs value = self._translate_partial(ref_url, value) # That's it!", "IndexError, TypeError) as ex: raise _url.ResolutionError('Cannot resolve reference \"%s\": %s'", "a JSON reference translator. The translated specs are in the", "ref_string) full_path = path + item_path if ref_url.path == self.url.path:", "= path + item_path if ref_url.path == self.url.path: # Reference", "we have # to read and parse it, of course.", "base for calculating the absolute URL of relative file references.", "specs are in the `specs` member after a call to", "copy value; we don't want to create recursive structures import", "\"\"\" return ref_url.path.split('/')[-1] + '_' + '_'.join(item_path[1:]) def _local_ref(path): url", ":param str url: [optional] The URL to base relative references", "()))) paths = sorted(changes.keys(), key = len) from prance.util.path import", "the dereferenced value, with all internal references resolved. \"\"\" #", "to allow some time for the public API to be", "recursions: A recursion stack for resolving references. :return: A copy", "_url def _reference_key(ref_url, item_path): \"\"\" Return a portion of the", "list obj_path: The object path within the URL resource. :param", "to `translate_references` has been made. If a URL is given,", "self.__collected_references[ref_key] = None ref_value = self._dereference(ref_url, obj_path) self.__collected_references[ref_key] = ref_value", "non-root document. ref_key = _reference_key(ref_url, obj_path) if ref_key not in", "import prance.util.url as _url def _reference_key(ref_url, item_path): \"\"\" Return a", "= ['components', 'schemas', ref_key] ref_obj = _local_ref(ref_path) yield full_path, ref_obj", "# Deep copy value; we don't want to create recursive", "document. if self.__collected_references: if 'components' not in self.specs: self.specs['components'] =", "+ item_path if ref_url.path == self.url.path: # Reference to the", "if len(obj_path) != 0: from prance.util.path import path_get try: value", "'Copyright © 2021 <NAME>' __license__ = 'MIT' __all__ = ()", "if 'components' not in self.specs: self.specs['components'] = {} if 'schemas'", "changes[path] if len(path) == 0: partial = value else: path_set(partial,", "# Reference to the root document. ref_path = obj_path else:", "been made. If a URL is given, it is used", "value) # That's it! return value def _translate_partial(self, base_url, partial):", "loop, but prevents child resolvers from # creating a new", "self.__collected_references: if 'components' not in self.specs: self.specs['components'] = {} if", "document. ref_path = obj_path else: # Reference to a non-root", "ref_url.path == self.url.path: # Reference to the root document. ref_path", "References to objects in other files are copied to the", "None ref_value = self._dereference(ref_url, obj_path) self.__collected_references[ref_key] = ref_value ref_path =", "it is used as a base for calculating the absolute", "adding the referenced object from external files to the /components/schemas", "self.specs['components'].update({'schemas': {}}) self.specs['components']['schemas'].update(self.__collected_references) def _dereference(self, ref_url, obj_path): \"\"\" Dereference the", "return partial def _translating_iterator(self, base_url, partial, path): from prance.util.iterators import", "_url.split_url_reference(base_url, ref_string) full_path = path + item_path if ref_url.path ==", "ref_value = self._dereference(ref_url, obj_path) self.__collected_references[ref_key] = ref_value ref_path = ['components',", "Add collected references to the root document. if self.__collected_references: if", "to translate any references. :param str url: [optional] The URL", "A recursion stack for resolving references. :return: A copy of", "ex: raise _url.ResolutionError('Cannot resolve reference \"%s\": %s' % (ref_url.geturl(), str(ex)))", "That's it! return value def _translate_partial(self, base_url, partial): changes =", "resolving references. :return: A copy of the dereferenced value, with", "= (_url.urlresource(self.url), self.__strict) # If we have a url, we", "URL resource. :param tuple recursions: A recursion stack for resolving", "member after a call to `translate_references` has been made. If", "paths = sorted(changes.keys(), key = len) from prance.util.path import path_set", "references to the root document. if self.__collected_references: if 'components' not", "= () import prance.util.url as _url def _reference_key(ref_url, item_path): \"\"\"", "return ref_url.path.split('/')[-1] + '_' + '_'.join(item_path[1:]) def _local_ref(path): url =", "= None ref_value = self._dereference(ref_url, obj_path) self.__collected_references[ref_key] = ref_value ref_path", "path in paths: value = changes[path] if len(path) == 0:", "and parse it, of course. contents = _url.fetch_url(ref_url, self.__reference_cache, strict=self.__strict)", "_translate_partial(self, base_url, partial): changes = dict(tuple(self._translating_iterator(base_url, partial, ()))) paths =", "path): from prance.util.iterators import reference_iterator for _, ref_string, item_path in", "any references. :param str url: [optional] The URL to base", "contents if len(obj_path) != 0: from prance.util.path import path_get try:", "JSON reference translator.\"\"\" __author__ = '<NAME>' __copyright__ = 'Copyright ©", "create = True) return partial def _translating_iterator(self, base_url, partial, path):", "partial, ()))) paths = sorted(changes.keys(), key = len) from prance.util.path", "copy.deepcopy(value) # Now resolve partial specs value = self._translate_partial(ref_url, value)", "/components/schemas object in the root document and translating the references", ":param dict specs: The parsed specs in which to translate", "else: path_set(partial, list(path), value, create = True) return partial def", "def translate_references(self): \"\"\" Iterate over the specification document, performing the", "`specs` member after a call to `translate_references` has been made.", "_translating_iterator(self, base_url, partial, path): from prance.util.iterators import reference_iterator for _,", "base relative references on. \"\"\" import copy self.specs = copy.deepcopy(specs)", "# Add collected references to the root document. if self.__collected_references:", ":param mixed ref_url: The URL at which the reference is", "root document. if self.__collected_references: if 'components' not in self.specs: self.specs['components']", "we don't want to create recursive structures import copy value", "# Underscored to allow some time for the public API", "\"\"\" import copy self.specs = copy.deepcopy(specs) self.__strict = True self.__reference_cache", "portion of the dereferenced URL. format - ref-url_obj-path \"\"\" return", "value = changes[path] if len(path) == 0: partial = value", "ref_url, obj_path = _url.split_url_reference(base_url, ref_string) full_path = path + item_path", "import path_set for path in paths: value = changes[path] if", "import path_get try: value = path_get(value, obj_path) except (KeyError, IndexError,", "self.specs['components'] = {} if 'schemas' not in self.specs['components']: self.specs['components'].update({'schemas': {}})", "pointers/references in a spec by translation. References to objects in", "key = len) from prance.util.path import path_set for path in", "Traverses over the whole document, adding the referenced object from", "referenced URL, we have # to read and parse it,", "__all__ = () import prance.util.url as _url def _reference_key(ref_url, item_path):", "root document. ref_path = obj_path else: # Reference to a", "for resolving references. :return: A copy of the dereferenced value,", "== self.url.path: # Reference to the root document. ref_path =", "to a non-root document. ref_key = _reference_key(ref_url, obj_path) if ref_key", "(KeyError, IndexError, TypeError) as ex: raise _url.ResolutionError('Cannot resolve reference \"%s\":", "object in the root document and translating the references to", "on. \"\"\" import copy self.specs = copy.deepcopy(specs) self.__strict = True", "self.specs['components']: self.specs['components'].update({'schemas': {}}) self.specs['components']['schemas'].update(self.__collected_references) def _dereference(self, ref_url, obj_path): \"\"\" Dereference", "order to start dereferencing anything in the referenced URL, we", "_reference_key(ref_url, item_path): \"\"\" Return a portion of the dereferenced URL.", "references resolved. \"\"\" # In order to start dereferencing anything", "in the referenced URL, we have # to read and", "that creates a reference loop, but prevents child resolvers from", "len) from prance.util.path import path_set for path in paths: value", "translated specs are in the `specs` member after a call", "want to create recursive structures import copy value = copy.deepcopy(value)", "+ '/'.join(path) return {'$ref': url} # Underscored to allow some", "changes = dict(tuple(self._translating_iterator(base_url, partial, ()))) paths = sorted(changes.keys(), key =", "the specification document, performing the translation. Traverses over the whole", "of relative file references. :param dict specs: The parsed specs", "for this url. self.__reference_cache[url_key] = self.specs else: self.url = None", "of the root document, while being translated to point to", "the dereferenced URL. format - ref-url_obj-path \"\"\" return ref_url.path.split('/')[-1] +", "look for the referenced # object. value = contents if", "In order to start dereferencing anything in the referenced URL,", "root document, while being translated to point to the the", "the /components/schemas object in the root document and translating the", "\"\"\" Construct a JSON reference translator. The translated specs are", "'/'.join(path) return {'$ref': url} # Underscored to allow some time", "- that creates a reference loop, but prevents child resolvers", "2021 <NAME>' __license__ = 'MIT' __all__ = () import prance.util.url", "__init__(self, specs, url): \"\"\" Construct a JSON reference translator. The", "object. value = contents if len(obj_path) != 0: from prance.util.path", "item_path if ref_url.path == self.url.path: # Reference to the root", "the dereferenced object. :param mixed ref_url: The URL at which", "add ourselves to the reference cache # - that creates", "contents = _url.fetch_url(ref_url, self.__reference_cache, strict=self.__strict) # In this inner parser's", "ref_url.path.split('/')[-1] + '_' + '_'.join(item_path[1:]) def _local_ref(path): url = '#/'", "references. :return: A copy of the dereferenced value, with all", "collected references to the root document. if self.__collected_references: if 'components'", "in a spec by translation. References to objects in other", "def __init__(self, specs, url): \"\"\" Construct a JSON reference translator.", "0: partial = value else: path_set(partial, list(path), value, create =", "path_get try: value = path_get(value, obj_path) except (KeyError, IndexError, TypeError)", "to be stabilized. class _RefTranslator: \"\"\" Resolve JSON pointers/references in", "= self._dereference(ref_url, obj_path) self.__collected_references[ref_key] = ref_value ref_path = ['components', 'schemas',", "self._dereference(ref_url, obj_path) self.__collected_references[ref_key] = ref_value ref_path = ['components', 'schemas', ref_key]", "to add ourselves to the reference cache # - that", "JSON reference translator. The translated specs are in the `specs`", "str url: [optional] The URL to base relative references on.", "len(obj_path) != 0: from prance.util.path import path_get try: value =", "a url, we want to add ourselves to the reference", "references. :param dict specs: The parsed specs in which to", "self.__reference_cache = {} self.__collected_references = {} if url: self.url =", "Resolve JSON pointers/references in a spec by translation. References to", "'schemas' not in self.specs['components']: self.specs['components'].update({'schemas': {}}) self.specs['components']['schemas'].update(self.__collected_references) def _dereference(self, ref_url,", "child resolvers from # creating a new resolver for this", "reference_iterator for _, ref_string, item_path in reference_iterator(partial): ref_url, obj_path =", "else: self.url = None def translate_references(self): \"\"\" Iterate over the", "object path within the URL resource. :param tuple recursions: A", "object path. Returns the dereferenced object. :param mixed ref_url: The", "\"\"\" Return a portion of the dereferenced URL. format -", "in other files are copied to the /components/schemas object of", "this inner parser's specification, we can now look for the", "The object path within the URL resource. :param tuple recursions:", "this url. self.__reference_cache[url_key] = self.specs else: self.url = None def", "recursion stack for resolving references. :return: A copy of the", "references. :param str url: [optional] The URL to base relative", "{} self.__collected_references = {} if url: self.url = _url.absurl(url) url_key", "% (ref_url.geturl(), str(ex))) # Deep copy value; we don't want", "reference translator. The translated specs are in the `specs` member", "if ref_key not in self.__collected_references: self.__collected_references[ref_key] = None ref_value =", "= self._translate_partial(ref_url, value) # That's it! return value def _translate_partial(self,", "given, it is used as a base for calculating the", "ref_value ref_path = ['components', 'schemas', ref_key] ref_obj = _local_ref(ref_path) yield", "translation. Traverses over the whole document, adding the referenced object", "if url: self.url = _url.absurl(url) url_key = (_url.urlresource(self.url), self.__strict) #", "all internal references resolved. \"\"\" # In order to start", "_url.fetch_url(ref_url, self.__reference_cache, strict=self.__strict) # In this inner parser's specification, we", "other files are copied to the /components/schemas object of the", "resolved. \"\"\" # In order to start dereferencing anything in", "value; we don't want to create recursive structures import copy", "not in self.__collected_references: self.__collected_references[ref_key] = None ref_value = self._dereference(ref_url, obj_path)", "objects in other files are copied to the /components/schemas object", "ref_key not in self.__collected_references: self.__collected_references[ref_key] = None ref_value = self._dereference(ref_url,", "+ '_' + '_'.join(item_path[1:]) def _local_ref(path): url = '#/' +", "point to the the new object locations. \"\"\" def __init__(self,", "return {'$ref': url} # Underscored to allow some time for", "\"\"\" # In order to start dereferencing anything in the", "self.__reference_cache, strict=self.__strict) # In this inner parser's specification, we can", "self._translate_partial(ref_url, value) # That's it! return value def _translate_partial(self, base_url,", "def _local_ref(path): url = '#/' + '/'.join(path) return {'$ref': url}", "= _reference_key(ref_url, obj_path) if ref_key not in self.__collected_references: self.__collected_references[ref_key] =", "value else: path_set(partial, list(path), value, create = True) return partial", "return value def _translate_partial(self, base_url, partial): changes = dict(tuple(self._translating_iterator(base_url, partial,", "a new resolver for this url. self.__reference_cache[url_key] = self.specs else:", "the reference is located. :param list obj_path: The object path", "\"\"\" def __init__(self, specs, url): \"\"\" Construct a JSON reference", "of the dereferenced URL. format - ref-url_obj-path \"\"\" return ref_url.path.split('/')[-1]", "path. Returns the dereferenced object. :param mixed ref_url: The URL", "a non-root document. ref_key = _reference_key(ref_url, obj_path) if ref_key not", "obj_path) if ref_key not in self.__collected_references: self.__collected_references[ref_key] = None ref_value", "URL and object path. Returns the dereferenced object. :param mixed", "cache # - that creates a reference loop, but prevents", "Reference to the root document. ref_path = obj_path else: #", "the referenced object from external files to the /components/schemas object", "value = copy.deepcopy(value) # Now resolve partial specs value =", "0: from prance.util.path import path_get try: value = path_get(value, obj_path)", "the new location. \"\"\" self.specs = self._translate_partial(self.url, self.specs) # Add", "root document and translating the references to the new location.", "value = path_get(value, obj_path) except (KeyError, IndexError, TypeError) as ex:", "Deep copy value; we don't want to create recursive structures", "for the referenced # object. value = contents if len(obj_path)", "Now resolve partial specs value = self._translate_partial(ref_url, value) # That's", "of the dereferenced value, with all internal references resolved. \"\"\"", "= sorted(changes.keys(), key = len) from prance.util.path import path_set for", "as a base for calculating the absolute URL of relative", "The URL to base relative references on. \"\"\" import copy", "to objects in other files are copied to the /components/schemas", "the root document. if self.__collected_references: if 'components' not in self.specs:", "used as a base for calculating the absolute URL of", "\"%s\": %s' % (ref_url.geturl(), str(ex))) # Deep copy value; we", "dereferencing anything in the referenced URL, we have # to", "copy self.specs = copy.deepcopy(specs) self.__strict = True self.__reference_cache = {}", "while being translated to point to the the new object", "%s' % (ref_url.geturl(), str(ex))) # Deep copy value; we don't", "`translate_references` has been made. If a URL is given, it", "reference loop, but prevents child resolvers from # creating a", "self.specs = self._translate_partial(self.url, self.specs) # Add collected references to the", "translate_references(self): \"\"\" Iterate over the specification document, performing the translation.", "a call to `translate_references` has been made. If a URL", "= ref_value ref_path = ['components', 'schemas', ref_key] ref_obj = _local_ref(ref_path)", "in self.specs: self.specs['components'] = {} if 'schemas' not in self.specs['components']:", "reference \"%s\": %s' % (ref_url.geturl(), str(ex))) # Deep copy value;", "with all internal references resolved. \"\"\" # In order to", "path_get(value, obj_path) except (KeyError, IndexError, TypeError) as ex: raise _url.ResolutionError('Cannot", "import copy value = copy.deepcopy(value) # Now resolve partial specs", "partial specs value = self._translate_partial(ref_url, value) # That's it! return", "self.url = None def translate_references(self): \"\"\" Iterate over the specification", ":param list obj_path: The object path within the URL resource.", "in which to translate any references. :param str url: [optional]", "{}}) self.specs['components']['schemas'].update(self.__collected_references) def _dereference(self, ref_url, obj_path): \"\"\" Dereference the URL", "the references to the new location. \"\"\" self.specs = self._translate_partial(self.url,", "are copied to the /components/schemas object of the root document,", "we have a url, we want to add ourselves to", "has been made. If a URL is given, it is", "the URL and object path. Returns the dereferenced object. :param", "= 'Copyright © 2021 <NAME>' __license__ = 'MIT' __all__ =", "prevents child resolvers from # creating a new resolver for", "performing the translation. Traverses over the whole document, adding the", "is located. :param list obj_path: The object path within the", "ref_key = _reference_key(ref_url, obj_path) if ref_key not in self.__collected_references: self.__collected_references[ref_key]", "are in the `specs` member after a call to `translate_references`", "object. :param mixed ref_url: The URL at which the reference", "In this inner parser's specification, we can now look for", "course. contents = _url.fetch_url(ref_url, self.__reference_cache, strict=self.__strict) # In this inner", "the URL resource. :param tuple recursions: A recursion stack for", "self._translate_partial(self.url, self.specs) # Add collected references to the root document.", "path_set(partial, list(path), value, create = True) return partial def _translating_iterator(self,", "path + item_path if ref_url.path == self.url.path: # Reference to", "referenced # object. value = contents if len(obj_path) != 0:", "at which the reference is located. :param list obj_path: The", "If we have a url, we want to add ourselves", "Dereference the URL and object path. Returns the dereferenced object.", "() import prance.util.url as _url def _reference_key(ref_url, item_path): \"\"\" Return", "str(ex))) # Deep copy value; we don't want to create", "value, create = True) return partial def _translating_iterator(self, base_url, partial,", "self.specs else: self.url = None def translate_references(self): \"\"\" Iterate over", "relative file references. :param dict specs: The parsed specs in", "we want to add ourselves to the reference cache #", "the new object locations. \"\"\" def __init__(self, specs, url): \"\"\"", "path_set for path in paths: value = changes[path] if len(path)", "references to the new location. \"\"\" self.specs = self._translate_partial(self.url, self.specs)", "_local_ref(path): url = '#/' + '/'.join(path) return {'$ref': url} #", "new location. \"\"\" self.specs = self._translate_partial(self.url, self.specs) # Add collected", "internal references resolved. \"\"\" # In order to start dereferencing", "= None def translate_references(self): \"\"\" Iterate over the specification document,", "(_url.urlresource(self.url), self.__strict) # If we have a url, we want", "= {} self.__collected_references = {} if url: self.url = _url.absurl(url)", "obj_path else: # Reference to a non-root document. ref_key =", "Reference to a non-root document. ref_key = _reference_key(ref_url, obj_path) if", "locations. \"\"\" def __init__(self, specs, url): \"\"\" Construct a JSON", "= copy.deepcopy(specs) self.__strict = True self.__reference_cache = {} self.__collected_references =", "== 0: partial = value else: path_set(partial, list(path), value, create", "for _, ref_string, item_path in reference_iterator(partial): ref_url, obj_path = _url.split_url_reference(base_url,", "translate any references. :param str url: [optional] The URL to", "a portion of the dereferenced URL. format - ref-url_obj-path \"\"\"", "now look for the referenced # object. value = contents", "in paths: value = changes[path] if len(path) == 0: partial", "= changes[path] if len(path) == 0: partial = value else:", "Returns the dereferenced object. :param mixed ref_url: The URL at", "url, we want to add ourselves to the reference cache", "= _url.fetch_url(ref_url, self.__reference_cache, strict=self.__strict) # In this inner parser's specification,", "def _translating_iterator(self, base_url, partial, path): from prance.util.iterators import reference_iterator for", "object locations. \"\"\" def __init__(self, specs, url): \"\"\" Construct a", "def _reference_key(ref_url, item_path): \"\"\" Return a portion of the dereferenced", "anything in the referenced URL, we have # to read", "self.__strict) # If we have a url, we want to", "True) return partial def _translating_iterator(self, base_url, partial, path): from prance.util.iterators", "in the `specs` member after a call to `translate_references` has", ":return: A copy of the dereferenced value, with all internal", "obj_path = _url.split_url_reference(base_url, ref_string) full_path = path + item_path if", "obj_path) self.__collected_references[ref_key] = ref_value ref_path = ['components', 'schemas', ref_key] ref_obj", "and object path. Returns the dereferenced object. :param mixed ref_url:", "the root document and translating the references to the new", "dereferenced value, with all internal references resolved. \"\"\" # In", "tuple recursions: A recursion stack for resolving references. :return: A", "the root document, while being translated to point to the", "the `specs` member after a call to `translate_references` has been", "from prance.util.iterators import reference_iterator for _, ref_string, item_path in reference_iterator(partial):", "from prance.util.path import path_set for path in paths: value =", "= _url.split_url_reference(base_url, ref_string) full_path = path + item_path if ref_url.path", "self.url.path: # Reference to the root document. ref_path = obj_path", "is used as a base for calculating the absolute URL", "contains a JSON reference translator.\"\"\" __author__ = '<NAME>' __copyright__ =", "parsed specs in which to translate any references. :param str", "= self._translate_partial(self.url, self.specs) # Add collected references to the root", "which to translate any references. :param str url: [optional] The", "to start dereferencing anything in the referenced URL, we have", "# - that creates a reference loop, but prevents child", "call to `translate_references` has been made. If a URL is", "if ref_url.path == self.url.path: # Reference to the root document.", "read and parse it, of course. contents = _url.fetch_url(ref_url, self.__reference_cache,", "specs: The parsed specs in which to translate any references.", "specification document, performing the translation. Traverses over the whole document,", "document and translating the references to the new location. \"\"\"", "= {} if 'schemas' not in self.specs['components']: self.specs['components'].update({'schemas': {}}) self.specs['components']['schemas'].update(self.__collected_references)", "translator. The translated specs are in the `specs` member after", "location. \"\"\" self.specs = self._translate_partial(self.url, self.specs) # Add collected references", "translated to point to the the new object locations. \"\"\"", "value, with all internal references resolved. \"\"\" # In order", "__author__ = '<NAME>' __copyright__ = 'Copyright © 2021 <NAME>' __license__", "prance.util.iterators import reference_iterator for _, ref_string, item_path in reference_iterator(partial): ref_url,", "partial = value else: path_set(partial, list(path), value, create = True)", "import reference_iterator for _, ref_string, item_path in reference_iterator(partial): ref_url, obj_path", "The URL at which the reference is located. :param list", "the the new object locations. \"\"\" def __init__(self, specs, url):", "raise _url.ResolutionError('Cannot resolve reference \"%s\": %s' % (ref_url.geturl(), str(ex))) #", "translating the references to the new location. \"\"\" self.specs =", "stabilized. class _RefTranslator: \"\"\" Resolve JSON pointers/references in a spec", "base_url, partial, path): from prance.util.iterators import reference_iterator for _, ref_string,", "by translation. References to objects in other files are copied", "\"\"\" self.specs = self._translate_partial(self.url, self.specs) # Add collected references to", "def _dereference(self, ref_url, obj_path): \"\"\" Dereference the URL and object", "Underscored to allow some time for the public API to", "self.url = _url.absurl(url) url_key = (_url.urlresource(self.url), self.__strict) # If we", "copy value = copy.deepcopy(value) # Now resolve partial specs value", "reference translator.\"\"\" __author__ = '<NAME>' __copyright__ = 'Copyright © 2021", "be stabilized. class _RefTranslator: \"\"\" Resolve JSON pointers/references in a", "to base relative references on. \"\"\" import copy self.specs =", "calculating the absolute URL of relative file references. :param dict", "from prance.util.path import path_get try: value = path_get(value, obj_path) except", "= self.specs else: self.url = None def translate_references(self): \"\"\" Iterate", "True self.__reference_cache = {} self.__collected_references = {} if url: self.url", "= contents if len(obj_path) != 0: from prance.util.path import path_get", "A copy of the dereferenced value, with all internal references", "= copy.deepcopy(value) # Now resolve partial specs value = self._translate_partial(ref_url,", "'_'.join(item_path[1:]) def _local_ref(path): url = '#/' + '/'.join(path) return {'$ref':", "inner parser's specification, we can now look for the referenced", "allow some time for the public API to be stabilized.", "after a call to `translate_references` has been made. If a", "list(path), value, create = True) return partial def _translating_iterator(self, base_url,", "located. :param list obj_path: The object path within the URL", "# That's it! return value def _translate_partial(self, base_url, partial): changes", "have a url, we want to add ourselves to the", "None def translate_references(self): \"\"\" Iterate over the specification document, performing", "spec by translation. References to objects in other files are", "URL of relative file references. :param dict specs: The parsed", "url: [optional] The URL to base relative references on. \"\"\"", "class _RefTranslator: \"\"\" Resolve JSON pointers/references in a spec by", "reference cache # - that creates a reference loop, but", "Construct a JSON reference translator. The translated specs are in", "dereferenced URL. format - ref-url_obj-path \"\"\" return ref_url.path.split('/')[-1] + '_'", "_RefTranslator: \"\"\" Resolve JSON pointers/references in a spec by translation.", "url_key = (_url.urlresource(self.url), self.__strict) # If we have a url,", "full_path = path + item_path if ref_url.path == self.url.path: #", "# object. value = contents if len(obj_path) != 0: from", "= _url.absurl(url) url_key = (_url.urlresource(self.url), self.__strict) # If we have", "'MIT' __all__ = () import prance.util.url as _url def _reference_key(ref_url,", "stack for resolving references. :return: A copy of the dereferenced", "don't want to create recursive structures import copy value =", "def _translate_partial(self, base_url, partial): changes = dict(tuple(self._translating_iterator(base_url, partial, ()))) paths", "self.__collected_references[ref_key] = ref_value ref_path = ['components', 'schemas', ref_key] ref_obj =", "file references. :param dict specs: The parsed specs in which", "new resolver for this url. self.__reference_cache[url_key] = self.specs else: self.url", "which the reference is located. :param list obj_path: The object", "strict=self.__strict) # In this inner parser's specification, we can now", "in reference_iterator(partial): ref_url, obj_path = _url.split_url_reference(base_url, ref_string) full_path = path", "_dereference(self, ref_url, obj_path): \"\"\" Dereference the URL and object path.", "Return a portion of the dereferenced URL. format - ref-url_obj-path", "to the root document. ref_path = obj_path else: # Reference", "object from external files to the /components/schemas object in the", "for path in paths: value = changes[path] if len(path) ==", "want to add ourselves to the reference cache # -", "in the root document and translating the references to the", "self.__collected_references = {} if url: self.url = _url.absurl(url) url_key =", "(ref_url.geturl(), str(ex))) # Deep copy value; we don't want to", "create recursive structures import copy value = copy.deepcopy(value) # Now", "copied to the /components/schemas object of the root document, while", "new object locations. \"\"\" def __init__(self, specs, url): \"\"\" Construct", "it! return value def _translate_partial(self, base_url, partial): changes = dict(tuple(self._translating_iterator(base_url,", "we can now look for the referenced # object. value", "copy.deepcopy(specs) self.__strict = True self.__reference_cache = {} self.__collected_references = {}", "files to the /components/schemas object in the root document and", "external files to the /components/schemas object in the root document", "© 2021 <NAME>' __license__ = 'MIT' __all__ = () import", "translation. References to objects in other files are copied to", "is given, it is used as a base for calculating", "specs in which to translate any references. :param str url:", "item_path in reference_iterator(partial): ref_url, obj_path = _url.split_url_reference(base_url, ref_string) full_path =", "can now look for the referenced # object. value =", "<NAME>' __license__ = 'MIT' __all__ = () import prance.util.url as", "and translating the references to the new location. \"\"\" self.specs", "to the /components/schemas object of the root document, while being", "referenced object from external files to the /components/schemas object in", "dereferenced object. :param mixed ref_url: The URL at which the", "+ '_'.join(item_path[1:]) def _local_ref(path): url = '#/' + '/'.join(path) return", "parse it, of course. contents = _url.fetch_url(ref_url, self.__reference_cache, strict=self.__strict) #", "specification, we can now look for the referenced # object.", "time for the public API to be stabilized. class _RefTranslator:", "creates a reference loop, but prevents child resolvers from #", "[optional] The URL to base relative references on. \"\"\" import", "if self.__collected_references: if 'components' not in self.specs: self.specs['components'] = {}", "relative references on. \"\"\" import copy self.specs = copy.deepcopy(specs) self.__strict", "# Reference to a non-root document. ref_key = _reference_key(ref_url, obj_path)", "to read and parse it, of course. contents = _url.fetch_url(ref_url,", "self.specs['components']['schemas'].update(self.__collected_references) def _dereference(self, ref_url, obj_path): \"\"\" Dereference the URL and", "{} if url: self.url = _url.absurl(url) url_key = (_url.urlresource(self.url), self.__strict)", "base_url, partial): changes = dict(tuple(self._translating_iterator(base_url, partial, ()))) paths = sorted(changes.keys(),", "url} # Underscored to allow some time for the public", "over the specification document, performing the translation. Traverses over the", "self.__strict = True self.__reference_cache = {} self.__collected_references = {} if", "\"\"\"This submodule contains a JSON reference translator.\"\"\" __author__ = '<NAME>'", "the whole document, adding the referenced object from external files", "but prevents child resolvers from # creating a new resolver", "to the root document. if self.__collected_references: if 'components' not in", "absolute URL of relative file references. :param dict specs: The", "url. self.__reference_cache[url_key] = self.specs else: self.url = None def translate_references(self):", "ref_path = obj_path else: # Reference to a non-root document.", "not in self.specs: self.specs['components'] = {} if 'schemas' not in", "'components' not in self.specs: self.specs['components'] = {} if 'schemas' not", "mixed ref_url: The URL at which the reference is located.", "object of the root document, while being translated to point", "to the new location. \"\"\" self.specs = self._translate_partial(self.url, self.specs) #", "TypeError) as ex: raise _url.ResolutionError('Cannot resolve reference \"%s\": %s' %", "in self.__collected_references: self.__collected_references[ref_key] = None ref_value = self._dereference(ref_url, obj_path) self.__collected_references[ref_key]", "sorted(changes.keys(), key = len) from prance.util.path import path_set for path", "!= 0: from prance.util.path import path_get try: value = path_get(value,", "= True) return partial def _translating_iterator(self, base_url, partial, path): from", "= True self.__reference_cache = {} self.__collected_references = {} if url:", "have # to read and parse it, of course. contents", "if len(path) == 0: partial = value else: path_set(partial, list(path),", "files are copied to the /components/schemas object of the root", "document. ref_key = _reference_key(ref_url, obj_path) if ref_key not in self.__collected_references:", "paths: value = changes[path] if len(path) == 0: partial =", "document, while being translated to point to the the new", "creating a new resolver for this url. self.__reference_cache[url_key] = self.specs", "API to be stabilized. class _RefTranslator: \"\"\" Resolve JSON pointers/references", "ref_string, item_path in reference_iterator(partial): ref_url, obj_path = _url.split_url_reference(base_url, ref_string) full_path", "= dict(tuple(self._translating_iterator(base_url, partial, ()))) paths = sorted(changes.keys(), key = len)", "= obj_path else: # Reference to a non-root document. ref_key", "dict specs: The parsed specs in which to translate any", "= '<NAME>' __copyright__ = 'Copyright © 2021 <NAME>' __license__ =", "obj_path): \"\"\" Dereference the URL and object path. Returns the", "reference_iterator(partial): ref_url, obj_path = _url.split_url_reference(base_url, ref_string) full_path = path +", "a spec by translation. References to objects in other files", "not in self.specs['components']: self.specs['components'].update({'schemas': {}}) self.specs['components']['schemas'].update(self.__collected_references) def _dereference(self, ref_url, obj_path):", "value def _translate_partial(self, base_url, partial): changes = dict(tuple(self._translating_iterator(base_url, partial, ())))", "URL to base relative references on. \"\"\" import copy self.specs", "partial, path): from prance.util.iterators import reference_iterator for _, ref_string, item_path", "The translated specs are in the `specs` member after a", "the root document. ref_path = obj_path else: # Reference to", "path within the URL resource. :param tuple recursions: A recursion", "from external files to the /components/schemas object in the root", "start dereferencing anything in the referenced URL, we have #", "the referenced # object. value = contents if len(obj_path) !=", "_reference_key(ref_url, obj_path) if ref_key not in self.__collected_references: self.__collected_references[ref_key] = None", "# creating a new resolver for this url. self.__reference_cache[url_key] =", "it, of course. contents = _url.fetch_url(ref_url, self.__reference_cache, strict=self.__strict) # In", "recursive structures import copy value = copy.deepcopy(value) # Now resolve", "structures import copy value = copy.deepcopy(value) # Now resolve partial", "to the reference cache # - that creates a reference", "parser's specification, we can now look for the referenced #", "item_path): \"\"\" Return a portion of the dereferenced URL. format", "__copyright__ = 'Copyright © 2021 <NAME>' __license__ = 'MIT' __all__", "document, performing the translation. Traverses over the whole document, adding", "ourselves to the reference cache # - that creates a", "url = '#/' + '/'.join(path) return {'$ref': url} # Underscored", "obj_path: The object path within the URL resource. :param tuple", "_url.absurl(url) url_key = (_url.urlresource(self.url), self.__strict) # If we have a", "self.__reference_cache[url_key] = self.specs else: self.url = None def translate_references(self): \"\"\"", "\"\"\" Iterate over the specification document, performing the translation. Traverses", "= value else: path_set(partial, list(path), value, create = True) return", "self.__collected_references: self.__collected_references[ref_key] = None ref_value = self._dereference(ref_url, obj_path) self.__collected_references[ref_key] =", "if 'schemas' not in self.specs['components']: self.specs['components'].update({'schemas': {}}) self.specs['components']['schemas'].update(self.__collected_references) def _dereference(self,", "a base for calculating the absolute URL of relative file", "len(path) == 0: partial = value else: path_set(partial, list(path), value,", "specs value = self._translate_partial(ref_url, value) # That's it! return value", "in self.specs['components']: self.specs['components'].update({'schemas': {}}) self.specs['components']['schemas'].update(self.__collected_references) def _dereference(self, ref_url, obj_path): \"\"\"", "ref_url, obj_path): \"\"\" Dereference the URL and object path. Returns", "= {} if url: self.url = _url.absurl(url) url_key = (_url.urlresource(self.url),", "'#/' + '/'.join(path) return {'$ref': url} # Underscored to allow", "_, ref_string, item_path in reference_iterator(partial): ref_url, obj_path = _url.split_url_reference(base_url, ref_string)", "partial def _translating_iterator(self, base_url, partial, path): from prance.util.iterators import reference_iterator", "for calculating the absolute URL of relative file references. :param", "URL. format - ref-url_obj-path \"\"\" return ref_url.path.split('/')[-1] + '_' +", "URL is given, it is used as a base for", "\"\"\" Dereference the URL and object path. Returns the dereferenced", "a reference loop, but prevents child resolvers from # creating", "to the the new object locations. \"\"\" def __init__(self, specs,", "of course. contents = _url.fetch_url(ref_url, self.__reference_cache, strict=self.__strict) # In this", "document, adding the referenced object from external files to the", "URL at which the reference is located. :param list obj_path:", "{'$ref': url} # Underscored to allow some time for the", "references on. \"\"\" import copy self.specs = copy.deepcopy(specs) self.__strict =", "as _url def _reference_key(ref_url, item_path): \"\"\" Return a portion of", "self.specs: self.specs['components'] = {} if 'schemas' not in self.specs['components']: self.specs['components'].update({'schemas':", "URL, we have # to read and parse it, of", ":param tuple recursions: A recursion stack for resolving references. :return:", "public API to be stabilized. class _RefTranslator: \"\"\" Resolve JSON", "within the URL resource. :param tuple recursions: A recursion stack", "the referenced URL, we have # to read and parse", "import copy self.specs = copy.deepcopy(specs) self.__strict = True self.__reference_cache =", "The parsed specs in which to translate any references. :param", "to the /components/schemas object in the root document and translating", "value = contents if len(obj_path) != 0: from prance.util.path import", "submodule contains a JSON reference translator.\"\"\" __author__ = '<NAME>' __copyright__", "being translated to point to the the new object locations.", "copy of the dereferenced value, with all internal references resolved.", "format - ref-url_obj-path \"\"\" return ref_url.path.split('/')[-1] + '_' + '_'.join(item_path[1:])", "url: self.url = _url.absurl(url) url_key = (_url.urlresource(self.url), self.__strict) # If", "JSON pointers/references in a spec by translation. References to objects", "- ref-url_obj-path \"\"\" return ref_url.path.split('/')[-1] + '_' + '_'.join(item_path[1:]) def", "reference is located. :param list obj_path: The object path within", "Iterate over the specification document, performing the translation. Traverses over", "self.specs = copy.deepcopy(specs) self.__strict = True self.__reference_cache = {} self.__collected_references", "ref-url_obj-path \"\"\" return ref_url.path.split('/')[-1] + '_' + '_'.join(item_path[1:]) def _local_ref(path):", "resolvers from # creating a new resolver for this url." ]
[ "r = cs.next() self.assertFalse(r) self.assertTrue(r.err()) r = cs.next() self.assertFalse(r) cs.back()", "= cs.next() self.assertFalse(r) self.assertTrue(r.err()) r = cs.next() self.assertFalse(r) cs.back() r", "cs = Characters(StringIO(s)) ch = cs.peek().ok() self.assertEqual(ch, '1') ch =", "= cs.peek().ok() self.assertEqual(ch, '1') ch = cs.next().ok() self.assertEqual(ch, '1') ch", "ch = cs.peek().ok() self.assertEqual(ch, '1') ch = cs.next().ok() self.assertEqual(ch, '1')", "= cs.next().ok() self.assertEqual(ch, '1') ch = cs.next().ok() self.assertEqual(ch, '2') r", "self.assertEqual(ch, '2') r = cs.next() self.assertFalse(r) self.assertTrue(r.err()) r = cs.next()", "self.assertFalse(r) self.assertTrue(r.err()) r = cs.next() self.assertFalse(r) cs.back() r = cs.next()", "self.assertTrue(r.err()) r = cs.next() self.assertFalse(r) cs.back() r = cs.next() self.assertTrue(r)", "= cs.next() self.assertTrue(r) self.assertEqual(r.ok(), '2') cs.back(2) r = cs.next() self.assertTrue(r)", "self.assertEqual(ch, '1') ch = cs.next().ok() self.assertEqual(ch, '2') r = cs.next()", "self.assertFalse(r) cs.back() r = cs.next() self.assertTrue(r) self.assertEqual(r.ok(), '2') cs.back(2) r", "import * class StreamTestCase(TestCase): def test(self): s = '12' cs", "'12' cs = Characters(StringIO(s)) ch = cs.peek().ok() self.assertEqual(ch, '1') ch", "= cs.next().ok() self.assertEqual(ch, '2') r = cs.next() self.assertFalse(r) self.assertTrue(r.err()) r", "cs.next() self.assertFalse(r) cs.back() r = cs.next() self.assertTrue(r) self.assertEqual(r.ok(), '2') cs.back(2)", "unittest import TestCase from dropSQL.parser.streams import * class StreamTestCase(TestCase): def", "self.assertEqual(ch, '1') ch = cs.peek().ok() self.assertEqual(ch, '1') ch = cs.next().ok()", "TestCase from dropSQL.parser.streams import * class StreamTestCase(TestCase): def test(self): s", "'1') ch = cs.next().ok() self.assertEqual(ch, '1') ch = cs.next().ok() self.assertEqual(ch,", "ch = cs.next().ok() self.assertEqual(ch, '1') ch = cs.next().ok() self.assertEqual(ch, '2')", "cs.next().ok() self.assertEqual(ch, '2') r = cs.next() self.assertFalse(r) self.assertTrue(r.err()) r =", "* class StreamTestCase(TestCase): def test(self): s = '12' cs =", "class StreamTestCase(TestCase): def test(self): s = '12' cs = Characters(StringIO(s))", "def test(self): s = '12' cs = Characters(StringIO(s)) ch =", "r = cs.next() self.assertTrue(r) self.assertEqual(r.ok(), '2') cs.back(2) r = cs.next()", "'2') r = cs.next() self.assertFalse(r) self.assertTrue(r.err()) r = cs.next() self.assertFalse(r)", "from dropSQL.parser.streams import * class StreamTestCase(TestCase): def test(self): s =", "s = '12' cs = Characters(StringIO(s)) ch = cs.peek().ok() self.assertEqual(ch,", "import StringIO from unittest import TestCase from dropSQL.parser.streams import *", "self.assertTrue(r) self.assertEqual(r.ok(), '2') cs.back(2) r = cs.next() self.assertTrue(r) self.assertEqual(r.ok(), '1')", "ch = cs.peek().ok() self.assertEqual(ch, '1') ch = cs.peek().ok() self.assertEqual(ch, '1')", "from unittest import TestCase from dropSQL.parser.streams import * class StreamTestCase(TestCase):", "'1') ch = cs.peek().ok() self.assertEqual(ch, '1') ch = cs.next().ok() self.assertEqual(ch,", "dropSQL.parser.streams import * class StreamTestCase(TestCase): def test(self): s = '12'", "= '12' cs = Characters(StringIO(s)) ch = cs.peek().ok() self.assertEqual(ch, '1')", "StringIO from unittest import TestCase from dropSQL.parser.streams import * class", "= Characters(StringIO(s)) ch = cs.peek().ok() self.assertEqual(ch, '1') ch = cs.peek().ok()", "Characters(StringIO(s)) ch = cs.peek().ok() self.assertEqual(ch, '1') ch = cs.peek().ok() self.assertEqual(ch,", "'1') ch = cs.next().ok() self.assertEqual(ch, '2') r = cs.next() self.assertFalse(r)", "cs.peek().ok() self.assertEqual(ch, '1') ch = cs.peek().ok() self.assertEqual(ch, '1') ch =", "= cs.peek().ok() self.assertEqual(ch, '1') ch = cs.peek().ok() self.assertEqual(ch, '1') ch", "from io import StringIO from unittest import TestCase from dropSQL.parser.streams", "cs.next() self.assertFalse(r) self.assertTrue(r.err()) r = cs.next() self.assertFalse(r) cs.back() r =", "StreamTestCase(TestCase): def test(self): s = '12' cs = Characters(StringIO(s)) ch", "cs.next() self.assertTrue(r) self.assertEqual(r.ok(), '2') cs.back(2) r = cs.next() self.assertTrue(r) self.assertEqual(r.ok(),", "self.assertEqual(ch, '1') ch = cs.next().ok() self.assertEqual(ch, '1') ch = cs.next().ok()", "test(self): s = '12' cs = Characters(StringIO(s)) ch = cs.peek().ok()", "cs.peek().ok() self.assertEqual(ch, '1') ch = cs.next().ok() self.assertEqual(ch, '1') ch =", "import TestCase from dropSQL.parser.streams import * class StreamTestCase(TestCase): def test(self):", "cs.next().ok() self.assertEqual(ch, '1') ch = cs.next().ok() self.assertEqual(ch, '2') r =", "ch = cs.next().ok() self.assertEqual(ch, '2') r = cs.next() self.assertFalse(r) self.assertTrue(r.err())", "= cs.next() self.assertFalse(r) cs.back() r = cs.next() self.assertTrue(r) self.assertEqual(r.ok(), '2')", "cs.back() r = cs.next() self.assertTrue(r) self.assertEqual(r.ok(), '2') cs.back(2) r =", "io import StringIO from unittest import TestCase from dropSQL.parser.streams import", "r = cs.next() self.assertFalse(r) cs.back() r = cs.next() self.assertTrue(r) self.assertEqual(r.ok()," ]
[ "more likely, it also had to do with the classical", "ql.CReg(2) k.classical(rs1, ql.Operation(3)) k.classical(rs1, ql.Operation(4)) k.classical(rd, ql.Operation(rs1, '+', rs2)) p.add_kernel(k)", "compiles were generating different results or in the best #", "for that reason. def test_stateful_behavior(self): ql.set_option('optimize', 'no') ql.set_option('scheduler', 'ALAP') platform", "self.setUpClass() QISA_fn_i = os.path.join(output_dir, p.name+'_'+str(i)+'_last.qasm') os.rename(QISA_fn,QISA_fn_i) for i in range(NCOMPILES-1):", "range(NCOMPILES-1): QISA_fn_1 = os.path.join(output_dir, p.name+'_'+str(i)+'_last.qasm') QISA_fn_2 = os.path.join(output_dir, p.name+'_'+str(i+1)+'_last.qasm') self.assertTrue(", "def test_typecast(self): sweep_points = [1,2] num_circuits = 1 num_qubits =", "p = ql.Program(\"statelessProgram\", platform, nqubits, nregs) p.set_sweep_points(sweep_points) k = ql.Kernel(\"aKernel\",", "= ql.CReg(0) rs1 = ql.CReg(1) rs2 = ql.CReg(2) k.classical(rs1, ql.Operation(3))", "# in_fn = 'test_' + name + '.cq' # out_fn", "import filecmp import unittest import numpy as np from openql", "nregs) p.set_sweep_points(sweep_points) k = ql.Kernel(\"aKernel\", platform, nqubits, nregs) k.prepz(0) k.gate('rx180',", "sweep_points = [1] nqubits = 3 nregs = 3 p", "unittest import numpy as np from openql import openql as", "'ALAP') platform = ql.Platform(\"myPlatform\", 'cc_light') sweep_points = [1] nqubits =", "test works. # When clear, enable it again. # Now", "# def test_empty_infinite_loop(self): # name = 'empty_infinite_loop' # in_fn =", "= 'test_' + name + '.cq' # out_fn = 'test_output/'", "program p.add_kernel(k) # relates to https://github.com/QE-Lab/OpenQL/issues/171 # various runs of", "hardcoded now for that reason. def test_stateful_behavior(self): ql.set_option('optimize', 'no') ql.set_option('scheduler',", "import unittest import numpy as np from openql import openql", "'yes') ql.set_option('log_level', 'LOG_WARNING') # @unittest.expectedFailure # @unittest.skip def test_typecast(self): sweep_points", "the classical register allocator # depending on stuff like Python's", "test_typecast(self): sweep_points = [1,2] num_circuits = 1 num_qubits = 2", "file_compare curdir = os.path.dirname(os.path.realpath(__file__)) output_dir = os.path.join(curdir, 'test_output') class Test_bugs(unittest.TestCase):", "runs is same # JvS: more likely, it also had", "+ '_out.cq' # ql.initialize() # #ql.set_option('log_level', 'LOG_DEBUG') # ql.compile(in_fn) #", "ql.Operation(3)) k.classical(rs1, ql.Operation(4)) k.classical(rd, ql.Operation(rs1, '+', rs2)) p.add_kernel(k) NCOMPILES=50 QISA_fn", "# out_fn = 'test_output/' + name + '_out.cq' # gold_fn", "= os.path.join(output_dir, p.name+'_'+str(i)+'_last.qasm') os.rename(QISA_fn,QISA_fn_i) for i in range(NCOMPILES-1): QISA_fn_1 =", "# ql.compile(in_fn) # self.assertTrue(file_compare(out_fn, gold_fn)) if __name__ == '__main__': unittest.main()", "k = ql.Kernel(\"aKernel\", platform, nqubits, nregs) k.prepz(0) k.gate('rx180', [0]) k.measure(0)", "ql.Kernel(\"aKernel\", platform, nqubits, nregs) k.prepz(0) k.gate('rx180', [0]) k.measure(0) rd =", "import os import filecmp import unittest import numpy as np", "free a register. # The register numbers have to be", "from openql import openql as ql from utils import file_compare", "so it is disabled. # def test_empty_infinite_loop(self): # name =", "setUp(self): ql.initialize() ql.set_option('output_dir', output_dir) ql.set_option('use_default_gates', 'yes') ql.set_option('log_level', 'LOG_WARNING') # @unittest.expectedFailure", "@classmethod def setUp(self): ql.initialize() ql.set_option('output_dir', output_dir) ql.set_option('use_default_gates', 'yes') ql.set_option('log_level', 'LOG_WARNING')", "Test_bugs(unittest.TestCase): @classmethod def setUp(self): ql.initialize() ql.set_option('output_dir', output_dir) ql.set_option('use_default_gates', 'yes') ql.set_option('log_level',", "k = ql.Kernel('kernel1', platf, num_qubits) qubit = 1 k.identity(np.int(qubit)) k.identity(np.int32(qubit))", "'test_output/' + name + '_out.cq' # gold_fn = 'golden/' +", "or in the best # case strange errors. So multiple", "clear how to repair, so it is disabled. # def", "multiple (NCOMPILES) runs of compile are executed # to make", "openql import openql as ql from utils import file_compare curdir", "register allocator # depending on stuff like Python's garbage collection", "'cc_light') sweep_points = [1] nqubits = 3 nregs = 3", "3 p = ql.Program(\"statelessProgram\", platform, nqubits, nregs) p.set_sweep_points(sweep_points) k =", "ql.Platform(\"starmon\", 'cc_light') p = ql.Program('test_bug', platf, num_qubits) p.set_sweep_points(sweep_points) k =", "So multiple (NCOMPILES) runs of compile are executed # to", "[1,2] num_circuits = 1 num_qubits = 2 platf = ql.Platform(\"starmon\",", "@unittest.expectedFailure # @unittest.skip def test_typecast(self): sweep_points = [1,2] num_circuits =", "= [1] nqubits = 3 nregs = 3 p =", "there is no error and output generated in all these", "stuff like Python's garbage collection to free a register. #", "'cc_light') p = ql.Program('test_bug', platf, num_qubits) p.set_sweep_points(sweep_points) k = ql.Kernel('kernel1',", "like Python's garbage collection to free a register. # The", "num_qubits) qubit = 1 k.identity(np.int(qubit)) k.identity(np.int32(qubit)) k.identity(np.int64(qubit)) k.identity(np.uint(qubit)) k.identity(np.uint32(qubit)) k.identity(np.uint64(qubit))", "same # JvS: more likely, it also had to do", "is disabled. # def test_empty_infinite_loop(self): # name = 'empty_infinite_loop' #", "how to repair, so it is disabled. # def test_empty_infinite_loop(self):", "# gold_fn = 'golden/' + name + '_out.cq' # ql.initialize()", "best # case strange errors. So multiple (NCOMPILES) runs of", "ql.initialize() # #ql.set_option('log_level', 'LOG_DEBUG') # ql.compile(in_fn) # self.assertTrue(file_compare(out_fn, gold_fn)) if", "sweep_points = [1,2] num_circuits = 1 num_qubits = 2 platf", "to the program p.add_kernel(k) # relates to https://github.com/QE-Lab/OpenQL/issues/171 # various", "i in range(NCOMPILES-1): QISA_fn_1 = os.path.join(output_dir, p.name+'_'+str(i)+'_last.qasm') QISA_fn_2 = os.path.join(output_dir,", "k.gate('rx180', [0]) k.measure(0) rd = ql.CReg(0) rs1 = ql.CReg(1) rs2", "os.path.dirname(os.path.realpath(__file__)) output_dir = os.path.join(curdir, 'test_output') class Test_bugs(unittest.TestCase): @classmethod def setUp(self):", "allocator # depending on stuff like Python's garbage collection to", "rs1 = ql.CReg(1) rs2 = ql.CReg(2) k.classical(rs1, ql.Operation(3)) k.classical(rs1, ql.Operation(4))", "Unclear how this test works. # When clear, enable it", "p.name+'_'+str(i+1)+'_last.qasm') self.assertTrue( file_compare(QISA_fn_1, QISA_fn_2)) # Unclear how this test works.", "works. # When clear, enable it again. # Now it", "case strange errors. So multiple (NCOMPILES) runs of compile are", "garbage collection to free a register. # The register numbers", "# ql.initialize() # #ql.set_option('log_level', 'LOG_DEBUG') # ql.compile(in_fn) # self.assertTrue(file_compare(out_fn, gold_fn))", "relates to https://github.com/QE-Lab/OpenQL/issues/171 # various runs of compiles were generating", "compile are executed # to make sure there is no", "ql.CReg(0) rs1 = ql.CReg(1) rs2 = ql.CReg(2) k.classical(rs1, ql.Operation(3)) k.classical(rs1,", "executed # to make sure there is no error and", "+ name + '_out.cq' # gold_fn = 'golden/' + name", "register numbers have to be hardcoded now for that reason.", "= 'test_output/' + name + '_out.cq' # gold_fn = 'golden/'", "the best # case strange errors. So multiple (NCOMPILES) runs", "ql.set_option('scheduler', 'ALAP') platform = ql.Platform(\"myPlatform\", 'cc_light') sweep_points = [1] nqubits", "name + '.cq' # out_fn = 'test_output/' + name +", "= 3 nregs = 3 p = ql.Program(\"statelessProgram\", platform, nqubits,", "# add the kernel to the program p.add_kernel(k) # relates", "out_fn = 'test_output/' + name + '_out.cq' # gold_fn =", "os.path.join(curdir, 'test_output') class Test_bugs(unittest.TestCase): @classmethod def setUp(self): ql.initialize() ql.set_option('output_dir', output_dir)", "<reponame>mmibrah2/OpenQL import os import filecmp import unittest import numpy as", "+ '.cq' # out_fn = 'test_output/' + name + '_out.cq'", "QISA_fn = os.path.join(output_dir, p.name+'_last.qasm') for i in range(NCOMPILES): p.compile() self.setUpClass()", "rs2)) p.add_kernel(k) NCOMPILES=50 QISA_fn = os.path.join(output_dir, p.name+'_last.qasm') for i in", "https://github.com/QE-Lab/OpenQL/issues/171 # various runs of compiles were generating different results", "it also had to do with the classical register allocator", "# to make sure there is no error and output", "all these runs is same # JvS: more likely, it", "platform, nqubits, nregs) k.prepz(0) k.gate('rx180', [0]) k.measure(0) rd = ql.CReg(0)", "k.identity(np.int(qubit)) k.identity(np.int32(qubit)) k.identity(np.int64(qubit)) k.identity(np.uint(qubit)) k.identity(np.uint32(qubit)) k.identity(np.uint64(qubit)) # add the kernel", "p.name+'_last.qasm') for i in range(NCOMPILES): p.compile() self.setUpClass() QISA_fn_i = os.path.join(output_dir,", "test_stateful_behavior(self): ql.set_option('optimize', 'no') ql.set_option('scheduler', 'ALAP') platform = ql.Platform(\"myPlatform\", 'cc_light') sweep_points", "platf = ql.Platform(\"starmon\", 'cc_light') p = ql.Program('test_bug', platf, num_qubits) p.set_sweep_points(sweep_points)", "the program p.add_kernel(k) # relates to https://github.com/QE-Lab/OpenQL/issues/171 # various runs", "'empty_infinite_loop' # in_fn = 'test_' + name + '.cq' #", "in range(NCOMPILES): p.compile() self.setUpClass() QISA_fn_i = os.path.join(output_dir, p.name+'_'+str(i)+'_last.qasm') os.rename(QISA_fn,QISA_fn_i) for", "p.name+'_'+str(i)+'_last.qasm') QISA_fn_2 = os.path.join(output_dir, p.name+'_'+str(i+1)+'_last.qasm') self.assertTrue( file_compare(QISA_fn_1, QISA_fn_2)) # Unclear", "were generating different results or in the best # case", "results or in the best # case strange errors. So", "p.add_kernel(k) NCOMPILES=50 QISA_fn = os.path.join(output_dir, p.name+'_last.qasm') for i in range(NCOMPILES):", "make sure there is no error and output generated in", "rs2 = ql.CReg(2) k.classical(rs1, ql.Operation(3)) k.classical(rs1, ql.Operation(4)) k.classical(rd, ql.Operation(rs1, '+',", "import file_compare curdir = os.path.dirname(os.path.realpath(__file__)) output_dir = os.path.join(curdir, 'test_output') class", "strange errors. So multiple (NCOMPILES) runs of compile are executed", "QISA_fn_2)) # Unclear how this test works. # When clear,", "+ name + '.cq' # out_fn = 'test_output/' + name", "= ql.Kernel(\"aKernel\", platform, nqubits, nregs) k.prepz(0) k.gate('rx180', [0]) k.measure(0) rd", "curdir = os.path.dirname(os.path.realpath(__file__)) output_dir = os.path.join(curdir, 'test_output') class Test_bugs(unittest.TestCase): @classmethod", "again. # Now it fails, not clear how to repair,", "p.compile() self.setUpClass() QISA_fn_i = os.path.join(output_dir, p.name+'_'+str(i)+'_last.qasm') os.rename(QISA_fn,QISA_fn_i) for i in", "ql.Program('test_bug', platf, num_qubits) p.set_sweep_points(sweep_points) k = ql.Kernel('kernel1', platf, num_qubits) qubit", "reason. def test_stateful_behavior(self): ql.set_option('optimize', 'no') ql.set_option('scheduler', 'ALAP') platform = ql.Platform(\"myPlatform\",", "[0]) k.measure(0) rd = ql.CReg(0) rs1 = ql.CReg(1) rs2 =", "# case strange errors. So multiple (NCOMPILES) runs of compile", "output_dir = os.path.join(curdir, 'test_output') class Test_bugs(unittest.TestCase): @classmethod def setUp(self): ql.initialize()", "k.measure(0) rd = ql.CReg(0) rs1 = ql.CReg(1) rs2 = ql.CReg(2)", "np from openql import openql as ql from utils import", "# relates to https://github.com/QE-Lab/OpenQL/issues/171 # various runs of compiles were", "be hardcoded now for that reason. def test_stateful_behavior(self): ql.set_option('optimize', 'no')", "ql.set_option('output_dir', output_dir) ql.set_option('use_default_gates', 'yes') ql.set_option('log_level', 'LOG_WARNING') # @unittest.expectedFailure # @unittest.skip", "# depending on stuff like Python's garbage collection to free", "different results or in the best # case strange errors.", "not clear how to repair, so it is disabled. #", "to repair, so it is disabled. # def test_empty_infinite_loop(self): #", "1 num_qubits = 2 platf = ql.Platform(\"starmon\", 'cc_light') p =", "kernel to the program p.add_kernel(k) # relates to https://github.com/QE-Lab/OpenQL/issues/171 #", "= os.path.dirname(os.path.realpath(__file__)) output_dir = os.path.join(curdir, 'test_output') class Test_bugs(unittest.TestCase): @classmethod def", "numpy as np from openql import openql as ql from", "'LOG_WARNING') # @unittest.expectedFailure # @unittest.skip def test_typecast(self): sweep_points = [1,2]", "os.path.join(output_dir, p.name+'_last.qasm') for i in range(NCOMPILES): p.compile() self.setUpClass() QISA_fn_i =", "= 'empty_infinite_loop' # in_fn = 'test_' + name + '.cq'", "= [1,2] num_circuits = 1 num_qubits = 2 platf =", "as ql from utils import file_compare curdir = os.path.dirname(os.path.realpath(__file__)) output_dir", "# various runs of compiles were generating different results or", "errors. So multiple (NCOMPILES) runs of compile are executed #", "is same # JvS: more likely, it also had to", "error and output generated in all these runs is same", "'golden/' + name + '_out.cq' # ql.initialize() # #ql.set_option('log_level', 'LOG_DEBUG')", "1 k.identity(np.int(qubit)) k.identity(np.int32(qubit)) k.identity(np.int64(qubit)) k.identity(np.uint(qubit)) k.identity(np.uint32(qubit)) k.identity(np.uint64(qubit)) # add the", "Python's garbage collection to free a register. # The register", "generating different results or in the best # case strange", "numbers have to be hardcoded now for that reason. def", "platform = ql.Platform(\"myPlatform\", 'cc_light') sweep_points = [1] nqubits = 3", "= 1 num_qubits = 2 platf = ql.Platform(\"starmon\", 'cc_light') p", "nqubits = 3 nregs = 3 p = ql.Program(\"statelessProgram\", platform,", "have to be hardcoded now for that reason. def test_stateful_behavior(self):", "= os.path.join(output_dir, p.name+'_last.qasm') for i in range(NCOMPILES): p.compile() self.setUpClass() QISA_fn_i", "as np from openql import openql as ql from utils", "(NCOMPILES) runs of compile are executed # to make sure", "'_out.cq' # ql.initialize() # #ql.set_option('log_level', 'LOG_DEBUG') # ql.compile(in_fn) # self.assertTrue(file_compare(out_fn,", "= 'golden/' + name + '_out.cq' # ql.initialize() # #ql.set_option('log_level',", "various runs of compiles were generating different results or in", "def test_stateful_behavior(self): ql.set_option('optimize', 'no') ql.set_option('scheduler', 'ALAP') platform = ql.Platform(\"myPlatform\", 'cc_light')", "Now it fails, not clear how to repair, so it", "'test_' + name + '.cq' # out_fn = 'test_output/' +", "platform, nqubits, nregs) p.set_sweep_points(sweep_points) k = ql.Kernel(\"aKernel\", platform, nqubits, nregs)", "classical register allocator # depending on stuff like Python's garbage", "NCOMPILES=50 QISA_fn = os.path.join(output_dir, p.name+'_last.qasm') for i in range(NCOMPILES): p.compile()", "os.path.join(output_dir, p.name+'_'+str(i)+'_last.qasm') os.rename(QISA_fn,QISA_fn_i) for i in range(NCOMPILES-1): QISA_fn_1 = os.path.join(output_dir,", "p.set_sweep_points(sweep_points) k = ql.Kernel(\"aKernel\", platform, nqubits, nregs) k.prepz(0) k.gate('rx180', [0])", "ql.Kernel('kernel1', platf, num_qubits) qubit = 1 k.identity(np.int(qubit)) k.identity(np.int32(qubit)) k.identity(np.int64(qubit)) k.identity(np.uint(qubit))", "that reason. def test_stateful_behavior(self): ql.set_option('optimize', 'no') ql.set_option('scheduler', 'ALAP') platform =", "of compile are executed # to make sure there is", "to https://github.com/QE-Lab/OpenQL/issues/171 # various runs of compiles were generating different", "file_compare(QISA_fn_1, QISA_fn_2)) # Unclear how this test works. # When", "to free a register. # The register numbers have to", "platf, num_qubits) qubit = 1 k.identity(np.int(qubit)) k.identity(np.int32(qubit)) k.identity(np.int64(qubit)) k.identity(np.uint(qubit)) k.identity(np.uint32(qubit))", "ql.Program(\"statelessProgram\", platform, nqubits, nregs) p.set_sweep_points(sweep_points) k = ql.Kernel(\"aKernel\", platform, nqubits,", "# #ql.set_option('log_level', 'LOG_DEBUG') # ql.compile(in_fn) # self.assertTrue(file_compare(out_fn, gold_fn)) if __name__", "num_qubits = 2 platf = ql.Platform(\"starmon\", 'cc_light') p = ql.Program('test_bug',", "k.classical(rs1, ql.Operation(4)) k.classical(rd, ql.Operation(rs1, '+', rs2)) p.add_kernel(k) NCOMPILES=50 QISA_fn =", "p.add_kernel(k) # relates to https://github.com/QE-Lab/OpenQL/issues/171 # various runs of compiles", "sure there is no error and output generated in all", "= 3 p = ql.Program(\"statelessProgram\", platform, nqubits, nregs) p.set_sweep_points(sweep_points) k", "now for that reason. def test_stateful_behavior(self): ql.set_option('optimize', 'no') ql.set_option('scheduler', 'ALAP')", "import numpy as np from openql import openql as ql", "= ql.Platform(\"myPlatform\", 'cc_light') sweep_points = [1] nqubits = 3 nregs", "p.set_sweep_points(sweep_points) k = ql.Kernel('kernel1', platf, num_qubits) qubit = 1 k.identity(np.int(qubit))", "JvS: more likely, it also had to do with the", "When clear, enable it again. # Now it fails, not", "ql.initialize() ql.set_option('output_dir', output_dir) ql.set_option('use_default_gates', 'yes') ql.set_option('log_level', 'LOG_WARNING') # @unittest.expectedFailure #", "# Now it fails, not clear how to repair, so", "= 1 k.identity(np.int(qubit)) k.identity(np.int32(qubit)) k.identity(np.int64(qubit)) k.identity(np.uint(qubit)) k.identity(np.uint32(qubit)) k.identity(np.uint64(qubit)) # add", "name + '_out.cq' # ql.initialize() # #ql.set_option('log_level', 'LOG_DEBUG') # ql.compile(in_fn)", "k.identity(np.uint32(qubit)) k.identity(np.uint64(qubit)) # add the kernel to the program p.add_kernel(k)", "to be hardcoded now for that reason. def test_stateful_behavior(self): ql.set_option('optimize',", "had to do with the classical register allocator # depending", "QISA_fn_1 = os.path.join(output_dir, p.name+'_'+str(i)+'_last.qasm') QISA_fn_2 = os.path.join(output_dir, p.name+'_'+str(i+1)+'_last.qasm') self.assertTrue( file_compare(QISA_fn_1,", "# Unclear how this test works. # When clear, enable", "os.path.join(output_dir, p.name+'_'+str(i+1)+'_last.qasm') self.assertTrue( file_compare(QISA_fn_1, QISA_fn_2)) # Unclear how this test", "runs of compile are executed # to make sure there", "nqubits, nregs) p.set_sweep_points(sweep_points) k = ql.Kernel(\"aKernel\", platform, nqubits, nregs) k.prepz(0)", "def test_empty_infinite_loop(self): # name = 'empty_infinite_loop' # in_fn = 'test_'", "# The register numbers have to be hardcoded now for", "nregs = 3 p = ql.Program(\"statelessProgram\", platform, nqubits, nregs) p.set_sweep_points(sweep_points)", "ql.Operation(4)) k.classical(rd, ql.Operation(rs1, '+', rs2)) p.add_kernel(k) NCOMPILES=50 QISA_fn = os.path.join(output_dir,", "fails, not clear how to repair, so it is disabled.", "ql from utils import file_compare curdir = os.path.dirname(os.path.realpath(__file__)) output_dir =", "# @unittest.expectedFailure # @unittest.skip def test_typecast(self): sweep_points = [1,2] num_circuits", "ql.CReg(1) rs2 = ql.CReg(2) k.classical(rs1, ql.Operation(3)) k.classical(rs1, ql.Operation(4)) k.classical(rd, ql.Operation(rs1,", "in all these runs is same # JvS: more likely,", "'LOG_DEBUG') # ql.compile(in_fn) # self.assertTrue(file_compare(out_fn, gold_fn)) if __name__ == '__main__':", "self.assertTrue( file_compare(QISA_fn_1, QISA_fn_2)) # Unclear how this test works. #", "QISA_fn_2 = os.path.join(output_dir, p.name+'_'+str(i+1)+'_last.qasm') self.assertTrue( file_compare(QISA_fn_1, QISA_fn_2)) # Unclear how", "rd = ql.CReg(0) rs1 = ql.CReg(1) rs2 = ql.CReg(2) k.classical(rs1,", "in_fn = 'test_' + name + '.cq' # out_fn =", "for i in range(NCOMPILES-1): QISA_fn_1 = os.path.join(output_dir, p.name+'_'+str(i)+'_last.qasm') QISA_fn_2 =", "how this test works. # When clear, enable it again.", "of compiles were generating different results or in the best", "to do with the classical register allocator # depending on", "'_out.cq' # gold_fn = 'golden/' + name + '_out.cq' #", "= 2 platf = ql.Platform(\"starmon\", 'cc_light') p = ql.Program('test_bug', platf,", "k.identity(np.uint64(qubit)) # add the kernel to the program p.add_kernel(k) #", "k.identity(np.int32(qubit)) k.identity(np.int64(qubit)) k.identity(np.uint(qubit)) k.identity(np.uint32(qubit)) k.identity(np.uint64(qubit)) # add the kernel to", "nqubits, nregs) k.prepz(0) k.gate('rx180', [0]) k.measure(0) rd = ql.CReg(0) rs1", "utils import file_compare curdir = os.path.dirname(os.path.realpath(__file__)) output_dir = os.path.join(curdir, 'test_output')", "nregs) k.prepz(0) k.gate('rx180', [0]) k.measure(0) rd = ql.CReg(0) rs1 =", "k.classical(rs1, ql.Operation(3)) k.classical(rs1, ql.Operation(4)) k.classical(rd, ql.Operation(rs1, '+', rs2)) p.add_kernel(k) NCOMPILES=50", "= os.path.join(curdir, 'test_output') class Test_bugs(unittest.TestCase): @classmethod def setUp(self): ql.initialize() ql.set_option('output_dir',", "# name = 'empty_infinite_loop' # in_fn = 'test_' + name", "ql.Platform(\"myPlatform\", 'cc_light') sweep_points = [1] nqubits = 3 nregs =", "p.name+'_'+str(i)+'_last.qasm') os.rename(QISA_fn,QISA_fn_i) for i in range(NCOMPILES-1): QISA_fn_1 = os.path.join(output_dir, p.name+'_'+str(i)+'_last.qasm')", "depending on stuff like Python's garbage collection to free a", "os.path.join(output_dir, p.name+'_'+str(i)+'_last.qasm') QISA_fn_2 = os.path.join(output_dir, p.name+'_'+str(i+1)+'_last.qasm') self.assertTrue( file_compare(QISA_fn_1, QISA_fn_2)) #", "k.identity(np.uint(qubit)) k.identity(np.uint32(qubit)) k.identity(np.uint64(qubit)) # add the kernel to the program", "is no error and output generated in all these runs", "ql.set_option('optimize', 'no') ql.set_option('scheduler', 'ALAP') platform = ql.Platform(\"myPlatform\", 'cc_light') sweep_points =", "= ql.CReg(2) k.classical(rs1, ql.Operation(3)) k.classical(rs1, ql.Operation(4)) k.classical(rd, ql.Operation(rs1, '+', rs2))", "enable it again. # Now it fails, not clear how", "output generated in all these runs is same # JvS:", "= os.path.join(output_dir, p.name+'_'+str(i+1)+'_last.qasm') self.assertTrue( file_compare(QISA_fn_1, QISA_fn_2)) # Unclear how this", "3 nregs = 3 p = ql.Program(\"statelessProgram\", platform, nqubits, nregs)", "@unittest.skip def test_typecast(self): sweep_points = [1,2] num_circuits = 1 num_qubits", "a register. # The register numbers have to be hardcoded", "'no') ql.set_option('scheduler', 'ALAP') platform = ql.Platform(\"myPlatform\", 'cc_light') sweep_points = [1]", "ql.Operation(rs1, '+', rs2)) p.add_kernel(k) NCOMPILES=50 QISA_fn = os.path.join(output_dir, p.name+'_last.qasm') for", "are executed # to make sure there is no error", "# When clear, enable it again. # Now it fails,", "= ql.Kernel('kernel1', platf, num_qubits) qubit = 1 k.identity(np.int(qubit)) k.identity(np.int32(qubit)) k.identity(np.int64(qubit))", "k.prepz(0) k.gate('rx180', [0]) k.measure(0) rd = ql.CReg(0) rs1 = ql.CReg(1)", "# JvS: more likely, it also had to do with", "#ql.set_option('log_level', 'LOG_DEBUG') # ql.compile(in_fn) # self.assertTrue(file_compare(out_fn, gold_fn)) if __name__ ==", "i in range(NCOMPILES): p.compile() self.setUpClass() QISA_fn_i = os.path.join(output_dir, p.name+'_'+str(i)+'_last.qasm') os.rename(QISA_fn,QISA_fn_i)", "and output generated in all these runs is same #", "output_dir) ql.set_option('use_default_gates', 'yes') ql.set_option('log_level', 'LOG_WARNING') # @unittest.expectedFailure # @unittest.skip def", "name + '_out.cq' # gold_fn = 'golden/' + name +", "in range(NCOMPILES-1): QISA_fn_1 = os.path.join(output_dir, p.name+'_'+str(i)+'_last.qasm') QISA_fn_2 = os.path.join(output_dir, p.name+'_'+str(i+1)+'_last.qasm')", "name = 'empty_infinite_loop' # in_fn = 'test_' + name +", "on stuff like Python's garbage collection to free a register.", "num_circuits = 1 num_qubits = 2 platf = ql.Platform(\"starmon\", 'cc_light')", "in the best # case strange errors. So multiple (NCOMPILES)", "= ql.CReg(1) rs2 = ql.CReg(2) k.classical(rs1, ql.Operation(3)) k.classical(rs1, ql.Operation(4)) k.classical(rd,", "generated in all these runs is same # JvS: more", "runs of compiles were generating different results or in the", "also had to do with the classical register allocator #", "range(NCOMPILES): p.compile() self.setUpClass() QISA_fn_i = os.path.join(output_dir, p.name+'_'+str(i)+'_last.qasm') os.rename(QISA_fn,QISA_fn_i) for i", "class Test_bugs(unittest.TestCase): @classmethod def setUp(self): ql.initialize() ql.set_option('output_dir', output_dir) ql.set_option('use_default_gates', 'yes')", "to make sure there is no error and output generated", "test_empty_infinite_loop(self): # name = 'empty_infinite_loop' # in_fn = 'test_' +", "it again. # Now it fails, not clear how to", "collection to free a register. # The register numbers have", "qubit = 1 k.identity(np.int(qubit)) k.identity(np.int32(qubit)) k.identity(np.int64(qubit)) k.identity(np.uint(qubit)) k.identity(np.uint32(qubit)) k.identity(np.uint64(qubit)) #", "'test_output') class Test_bugs(unittest.TestCase): @classmethod def setUp(self): ql.initialize() ql.set_option('output_dir', output_dir) ql.set_option('use_default_gates',", "os import filecmp import unittest import numpy as np from", "os.rename(QISA_fn,QISA_fn_i) for i in range(NCOMPILES-1): QISA_fn_1 = os.path.join(output_dir, p.name+'_'+str(i)+'_last.qasm') QISA_fn_2", "no error and output generated in all these runs is", "disabled. # def test_empty_infinite_loop(self): # name = 'empty_infinite_loop' # in_fn", "filecmp import unittest import numpy as np from openql import", "'+', rs2)) p.add_kernel(k) NCOMPILES=50 QISA_fn = os.path.join(output_dir, p.name+'_last.qasm') for i", "platf, num_qubits) p.set_sweep_points(sweep_points) k = ql.Kernel('kernel1', platf, num_qubits) qubit =", "num_qubits) p.set_sweep_points(sweep_points) k = ql.Kernel('kernel1', platf, num_qubits) qubit = 1", "register. # The register numbers have to be hardcoded now", "this test works. # When clear, enable it again. #", "def setUp(self): ql.initialize() ql.set_option('output_dir', output_dir) ql.set_option('use_default_gates', 'yes') ql.set_option('log_level', 'LOG_WARNING') #", "p = ql.Program('test_bug', platf, num_qubits) p.set_sweep_points(sweep_points) k = ql.Kernel('kernel1', platf,", "clear, enable it again. # Now it fails, not clear", "import openql as ql from utils import file_compare curdir =", "= ql.Platform(\"starmon\", 'cc_light') p = ql.Program('test_bug', platf, num_qubits) p.set_sweep_points(sweep_points) k", "k.classical(rd, ql.Operation(rs1, '+', rs2)) p.add_kernel(k) NCOMPILES=50 QISA_fn = os.path.join(output_dir, p.name+'_last.qasm')", "The register numbers have to be hardcoded now for that", "+ name + '_out.cq' # ql.initialize() # #ql.set_option('log_level', 'LOG_DEBUG') #", "it is disabled. # def test_empty_infinite_loop(self): # name = 'empty_infinite_loop'", "= ql.Program(\"statelessProgram\", platform, nqubits, nregs) p.set_sweep_points(sweep_points) k = ql.Kernel(\"aKernel\", platform,", "gold_fn = 'golden/' + name + '_out.cq' # ql.initialize() #", "add the kernel to the program p.add_kernel(k) # relates to", "it fails, not clear how to repair, so it is", "repair, so it is disabled. # def test_empty_infinite_loop(self): # name", "openql as ql from utils import file_compare curdir = os.path.dirname(os.path.realpath(__file__))", "these runs is same # JvS: more likely, it also", "2 platf = ql.Platform(\"starmon\", 'cc_light') p = ql.Program('test_bug', platf, num_qubits)", "ql.set_option('log_level', 'LOG_WARNING') # @unittest.expectedFailure # @unittest.skip def test_typecast(self): sweep_points =", "+ '_out.cq' # gold_fn = 'golden/' + name + '_out.cq'", "from utils import file_compare curdir = os.path.dirname(os.path.realpath(__file__)) output_dir = os.path.join(curdir,", "with the classical register allocator # depending on stuff like", "do with the classical register allocator # depending on stuff", "'.cq' # out_fn = 'test_output/' + name + '_out.cq' #", "= ql.Program('test_bug', platf, num_qubits) p.set_sweep_points(sweep_points) k = ql.Kernel('kernel1', platf, num_qubits)", "# @unittest.skip def test_typecast(self): sweep_points = [1,2] num_circuits = 1", "likely, it also had to do with the classical register", "k.identity(np.int64(qubit)) k.identity(np.uint(qubit)) k.identity(np.uint32(qubit)) k.identity(np.uint64(qubit)) # add the kernel to the", "[1] nqubits = 3 nregs = 3 p = ql.Program(\"statelessProgram\",", "= os.path.join(output_dir, p.name+'_'+str(i)+'_last.qasm') QISA_fn_2 = os.path.join(output_dir, p.name+'_'+str(i+1)+'_last.qasm') self.assertTrue( file_compare(QISA_fn_1, QISA_fn_2))", "QISA_fn_i = os.path.join(output_dir, p.name+'_'+str(i)+'_last.qasm') os.rename(QISA_fn,QISA_fn_i) for i in range(NCOMPILES-1): QISA_fn_1", "ql.set_option('use_default_gates', 'yes') ql.set_option('log_level', 'LOG_WARNING') # @unittest.expectedFailure # @unittest.skip def test_typecast(self):", "the kernel to the program p.add_kernel(k) # relates to https://github.com/QE-Lab/OpenQL/issues/171", "for i in range(NCOMPILES): p.compile() self.setUpClass() QISA_fn_i = os.path.join(output_dir, p.name+'_'+str(i)+'_last.qasm')" ]
[ "= minimalmodbus.Instrument('/dev/ttyUSB5', slaveaddress=i) print(\"writing new address: \" + str(ADDRESS2)) sensor.write_register(0,", "Check your connections\" else: print('No sensor on the bus found')", "import argparse import minimalmodbus import serial from time import sleep", "the address. Check your connections\" else: print('No sensor on the", "i in range(1, 248): try: print('Trying address: ' + str(i))", "True def scanModbus(): for i in range(1, 248): try: print('Trying", "minimalmodbus.PARITY=serial.PARITY_NONE minimalmodbus.STOPBITS = 2 minimalmodbus.BAUDRATE=19200 minimalmodbus.CLOSE_PORT_AFTER_EACH_CALL = True def scanModbus():", "not change the address. Check your connections\" else: print('No sensor", "ADDRESS2 = args.address minimalmodbus.CLOSE_PORT_AFTER_EACH_CALL = True minimalmodbus.PARITY=serial.PARITY_NONE minimalmodbus.STOPBITS = 2", "parser = argparse.ArgumentParser() parser.add_argument('address', metavar='ADDR', type=int, choices=range(1, 248), help='An address", "return (True, i) except (IOError): print(\"nope...\") pass return (False, 0)", "sleep parser = argparse.ArgumentParser() parser.add_argument('address', metavar='ADDR', type=int, choices=range(1, 248), help='An", "parser.parse_args() ADDRESS1 = 1 ADDRESS2 = args.address minimalmodbus.CLOSE_PORT_AFTER_EACH_CALL = True", "functioncode=6) sleep(0.2) sensor = minimalmodbus.Instrument('/dev/ttyUSB5', slaveaddress=ADDRESS2) print(\"reading address from holding", "at address: ' + str(i)) try: sensor = minimalmodbus.Instrument('/dev/ttyUSB5', slaveaddress=i)", "parser.add_argument('address', metavar='ADDR', type=int, choices=range(1, 248), help='An address to set') args", "sensor at address: ' + str(i)) try: sensor = minimalmodbus.Instrument('/dev/ttyUSB5',", "addressRead = sensor.read_register(0, functioncode=3) if(i == addressRead): print('FOUND!') return (True,", "argparse import minimalmodbus import serial from time import sleep parser", "from time import sleep parser = argparse.ArgumentParser() parser.add_argument('address', metavar='ADDR', type=int,", "serial from time import sleep parser = argparse.ArgumentParser() parser.add_argument('address', metavar='ADDR',", "print \"Could not change the address. Check your connections\" else:", "on command line\"\"\" import argparse import minimalmodbus import serial from", "import sleep parser = argparse.ArgumentParser() parser.add_argument('address', metavar='ADDR', type=int, choices=range(1, 248),", "address to the one specified on command line\"\"\" import argparse", "= 1 ADDRESS2 = args.address minimalmodbus.CLOSE_PORT_AFTER_EACH_CALL = True minimalmodbus.PARITY=serial.PARITY_NONE minimalmodbus.STOPBITS", "minimalmodbus.STOPBITS = 2 minimalmodbus.BAUDRATE=19200 minimalmodbus.CLOSE_PORT_AFTER_EACH_CALL = True def scanModbus(): for", "for i in range(1, 248): try: print('Trying address: ' +", "to set') args = parser.parse_args() ADDRESS1 = 1 ADDRESS2 =", "\" + str(ADDRESS2)) sensor.write_register(0, value=ADDRESS2, functioncode=6) sleep(0.2) sensor = minimalmodbus.Instrument('/dev/ttyUSB5',", "the bus and changes it's address to the one specified", "print('FOUND!') return (True, i) except (IOError): print(\"nope...\") pass return (False,", "2 minimalmodbus.BAUDRATE=19200 minimalmodbus.CLOSE_PORT_AFTER_EACH_CALL = True def scanModbus(): for i in", "argparse.ArgumentParser() parser.add_argument('address', metavar='ADDR', type=int, choices=range(1, 248), help='An address to set')", "#!/usr/bin/python \"\"\"Looks for sensor on the bus and changes it's", "change the address. Check your connections\" else: print('No sensor on", "sensor on the bus and changes it's address to the", "= argparse.ArgumentParser() parser.add_argument('address', metavar='ADDR', type=int, choices=range(1, 248), help='An address to", "new address: \" + str(ADDRESS2)) sensor.write_register(0, value=ADDRESS2, functioncode=6) sleep(0.2) sensor", "found: print('Found sensor at address: ' + str(i)) try: sensor", "slaveaddress=i) addressRead = sensor.read_register(0, functioncode=3) if(i == addressRead): print('FOUND!') return", "functioncode=3) if(i == addressRead): print('FOUND!') return (True, i) except (IOError):", "minimalmodbus.Instrument('/dev/ttyUSB5', slaveaddress=ADDRESS2) print(\"reading address from holding register: \") print(sensor.read_register(0, functioncode=3))", "\"\"\"Looks for sensor on the bus and changes it's address", "minimalmodbus.CLOSE_PORT_AFTER_EACH_CALL = True def scanModbus(): for i in range(1, 248):", "+ str(i)) sensor = minimalmodbus.Instrument('/dev/ttyUSB5', slaveaddress=i) addressRead = sensor.read_register(0, functioncode=3)", "it's address to the one specified on command line\"\"\" import", "print(\"nope...\") pass return (False, 0) # sensor.debug=True (found, i) =", "(False, 0) # sensor.debug=True (found, i) = scanModbus() if found:", "if found: print('Found sensor at address: ' + str(i)) try:", "= scanModbus() if found: print('Found sensor at address: ' +", "specified on command line\"\"\" import argparse import minimalmodbus import serial", "(IOError): print(\"nope...\") pass return (False, 0) # sensor.debug=True (found, i)", "\") print(sensor.read_register(0, functioncode=3)) except: print \"Could not change the address.", "functioncode=3)) except: print \"Could not change the address. Check your", "sensor = minimalmodbus.Instrument('/dev/ttyUSB5', slaveaddress=i) addressRead = sensor.read_register(0, functioncode=3) if(i ==", "sensor.read_register(0, functioncode=3) if(i == addressRead): print('FOUND!') return (True, i) except", "range(1, 248): try: print('Trying address: ' + str(i)) sensor =", "sensor.write_register(0, value=ADDRESS2, functioncode=6) sleep(0.2) sensor = minimalmodbus.Instrument('/dev/ttyUSB5', slaveaddress=ADDRESS2) print(\"reading address", "+ str(ADDRESS2)) sensor.write_register(0, value=ADDRESS2, functioncode=6) sleep(0.2) sensor = minimalmodbus.Instrument('/dev/ttyUSB5', slaveaddress=ADDRESS2)", "\"Could not change the address. Check your connections\" else: print('No", "addressRead): print('FOUND!') return (True, i) except (IOError): print(\"nope...\") pass return", "print(\"writing new address: \" + str(ADDRESS2)) sensor.write_register(0, value=ADDRESS2, functioncode=6) sleep(0.2)", "args.address minimalmodbus.CLOSE_PORT_AFTER_EACH_CALL = True minimalmodbus.PARITY=serial.PARITY_NONE minimalmodbus.STOPBITS = 2 minimalmodbus.BAUDRATE=19200 minimalmodbus.CLOSE_PORT_AFTER_EACH_CALL", "print('Found sensor at address: ' + str(i)) try: sensor =", "help='An address to set') args = parser.parse_args() ADDRESS1 = 1", "= 2 minimalmodbus.BAUDRATE=19200 minimalmodbus.CLOSE_PORT_AFTER_EACH_CALL = True def scanModbus(): for i", "print(sensor.read_register(0, functioncode=3)) except: print \"Could not change the address. Check", "metavar='ADDR', type=int, choices=range(1, 248), help='An address to set') args =", "= True def scanModbus(): for i in range(1, 248): try:", "i) = scanModbus() if found: print('Found sensor at address: '", "= sensor.read_register(0, functioncode=3) if(i == addressRead): print('FOUND!') return (True, i)", "try: sensor = minimalmodbus.Instrument('/dev/ttyUSB5', slaveaddress=i) print(\"writing new address: \" +", "sensor.debug=True (found, i) = scanModbus() if found: print('Found sensor at", "address: ' + str(i)) try: sensor = minimalmodbus.Instrument('/dev/ttyUSB5', slaveaddress=i) print(\"writing", "on the bus and changes it's address to the one", "slaveaddress=i) print(\"writing new address: \" + str(ADDRESS2)) sensor.write_register(0, value=ADDRESS2, functioncode=6)", "address: \" + str(ADDRESS2)) sensor.write_register(0, value=ADDRESS2, functioncode=6) sleep(0.2) sensor =", "= minimalmodbus.Instrument('/dev/ttyUSB5', slaveaddress=ADDRESS2) print(\"reading address from holding register: \") print(sensor.read_register(0,", "= minimalmodbus.Instrument('/dev/ttyUSB5', slaveaddress=i) addressRead = sensor.read_register(0, functioncode=3) if(i == addressRead):", "try: print('Trying address: ' + str(i)) sensor = minimalmodbus.Instrument('/dev/ttyUSB5', slaveaddress=i)", "sensor = minimalmodbus.Instrument('/dev/ttyUSB5', slaveaddress=i) print(\"writing new address: \" + str(ADDRESS2))", "def scanModbus(): for i in range(1, 248): try: print('Trying address:", "0) # sensor.debug=True (found, i) = scanModbus() if found: print('Found", "bus and changes it's address to the one specified on", "line\"\"\" import argparse import minimalmodbus import serial from time import", "minimalmodbus import serial from time import sleep parser = argparse.ArgumentParser()", "args = parser.parse_args() ADDRESS1 = 1 ADDRESS2 = args.address minimalmodbus.CLOSE_PORT_AFTER_EACH_CALL", "minimalmodbus.Instrument('/dev/ttyUSB5', slaveaddress=i) print(\"writing new address: \" + str(ADDRESS2)) sensor.write_register(0, value=ADDRESS2,", "+ str(i)) try: sensor = minimalmodbus.Instrument('/dev/ttyUSB5', slaveaddress=i) print(\"writing new address:", "(found, i) = scanModbus() if found: print('Found sensor at address:", "248), help='An address to set') args = parser.parse_args() ADDRESS1 =", "the one specified on command line\"\"\" import argparse import minimalmodbus", "command line\"\"\" import argparse import minimalmodbus import serial from time", "str(ADDRESS2)) sensor.write_register(0, value=ADDRESS2, functioncode=6) sleep(0.2) sensor = minimalmodbus.Instrument('/dev/ttyUSB5', slaveaddress=ADDRESS2) print(\"reading", "slaveaddress=ADDRESS2) print(\"reading address from holding register: \") print(sensor.read_register(0, functioncode=3)) except:", "choices=range(1, 248), help='An address to set') args = parser.parse_args() ADDRESS1", "type=int, choices=range(1, 248), help='An address to set') args = parser.parse_args()", "ADDRESS1 = 1 ADDRESS2 = args.address minimalmodbus.CLOSE_PORT_AFTER_EACH_CALL = True minimalmodbus.PARITY=serial.PARITY_NONE", "minimalmodbus.Instrument('/dev/ttyUSB5', slaveaddress=i) addressRead = sensor.read_register(0, functioncode=3) if(i == addressRead): print('FOUND!')", "address to set') args = parser.parse_args() ADDRESS1 = 1 ADDRESS2", "import minimalmodbus import serial from time import sleep parser =", "= args.address minimalmodbus.CLOSE_PORT_AFTER_EACH_CALL = True minimalmodbus.PARITY=serial.PARITY_NONE minimalmodbus.STOPBITS = 2 minimalmodbus.BAUDRATE=19200", "holding register: \") print(sensor.read_register(0, functioncode=3)) except: print \"Could not change", "changes it's address to the one specified on command line\"\"\"", "register: \") print(sensor.read_register(0, functioncode=3)) except: print \"Could not change the", "address. Check your connections\" else: print('No sensor on the bus", "address: ' + str(i)) sensor = minimalmodbus.Instrument('/dev/ttyUSB5', slaveaddress=i) addressRead =", "True minimalmodbus.PARITY=serial.PARITY_NONE minimalmodbus.STOPBITS = 2 minimalmodbus.BAUDRATE=19200 minimalmodbus.CLOSE_PORT_AFTER_EACH_CALL = True def", "minimalmodbus.CLOSE_PORT_AFTER_EACH_CALL = True minimalmodbus.PARITY=serial.PARITY_NONE minimalmodbus.STOPBITS = 2 minimalmodbus.BAUDRATE=19200 minimalmodbus.CLOSE_PORT_AFTER_EACH_CALL =", "except: print \"Could not change the address. Check your connections\"", "(True, i) except (IOError): print(\"nope...\") pass return (False, 0) #", "= True minimalmodbus.PARITY=serial.PARITY_NONE minimalmodbus.STOPBITS = 2 minimalmodbus.BAUDRATE=19200 minimalmodbus.CLOSE_PORT_AFTER_EACH_CALL = True", "if(i == addressRead): print('FOUND!') return (True, i) except (IOError): print(\"nope...\")", "print(\"reading address from holding register: \") print(sensor.read_register(0, functioncode=3)) except: print", "print('Trying address: ' + str(i)) sensor = minimalmodbus.Instrument('/dev/ttyUSB5', slaveaddress=i) addressRead", "return (False, 0) # sensor.debug=True (found, i) = scanModbus() if", "scanModbus() if found: print('Found sensor at address: ' + str(i))", "value=ADDRESS2, functioncode=6) sleep(0.2) sensor = minimalmodbus.Instrument('/dev/ttyUSB5', slaveaddress=ADDRESS2) print(\"reading address from", "address from holding register: \") print(sensor.read_register(0, functioncode=3)) except: print \"Could", "import serial from time import sleep parser = argparse.ArgumentParser() parser.add_argument('address',", "one specified on command line\"\"\" import argparse import minimalmodbus import", "str(i)) try: sensor = minimalmodbus.Instrument('/dev/ttyUSB5', slaveaddress=i) print(\"writing new address: \"", "' + str(i)) sensor = minimalmodbus.Instrument('/dev/ttyUSB5', slaveaddress=i) addressRead = sensor.read_register(0,", "for sensor on the bus and changes it's address to", "1 ADDRESS2 = args.address minimalmodbus.CLOSE_PORT_AFTER_EACH_CALL = True minimalmodbus.PARITY=serial.PARITY_NONE minimalmodbus.STOPBITS =", "minimalmodbus.BAUDRATE=19200 minimalmodbus.CLOSE_PORT_AFTER_EACH_CALL = True def scanModbus(): for i in range(1,", "sleep(0.2) sensor = minimalmodbus.Instrument('/dev/ttyUSB5', slaveaddress=ADDRESS2) print(\"reading address from holding register:", "to the one specified on command line\"\"\" import argparse import", "and changes it's address to the one specified on command", "set') args = parser.parse_args() ADDRESS1 = 1 ADDRESS2 = args.address", "== addressRead): print('FOUND!') return (True, i) except (IOError): print(\"nope...\") pass", "except (IOError): print(\"nope...\") pass return (False, 0) # sensor.debug=True (found,", "sensor = minimalmodbus.Instrument('/dev/ttyUSB5', slaveaddress=ADDRESS2) print(\"reading address from holding register: \")", "str(i)) sensor = minimalmodbus.Instrument('/dev/ttyUSB5', slaveaddress=i) addressRead = sensor.read_register(0, functioncode=3) if(i", "in range(1, 248): try: print('Trying address: ' + str(i)) sensor", "scanModbus(): for i in range(1, 248): try: print('Trying address: '", "248): try: print('Trying address: ' + str(i)) sensor = minimalmodbus.Instrument('/dev/ttyUSB5',", "= parser.parse_args() ADDRESS1 = 1 ADDRESS2 = args.address minimalmodbus.CLOSE_PORT_AFTER_EACH_CALL =", "from holding register: \") print(sensor.read_register(0, functioncode=3)) except: print \"Could not", "# sensor.debug=True (found, i) = scanModbus() if found: print('Found sensor", "i) except (IOError): print(\"nope...\") pass return (False, 0) # sensor.debug=True", "pass return (False, 0) # sensor.debug=True (found, i) = scanModbus()", "' + str(i)) try: sensor = minimalmodbus.Instrument('/dev/ttyUSB5', slaveaddress=i) print(\"writing new", "time import sleep parser = argparse.ArgumentParser() parser.add_argument('address', metavar='ADDR', type=int, choices=range(1," ]
[ "1} def save_checkpoint(self, checkpoint_dir): with open(os.path.join(checkpoint_dir, \"test.txt\"), \"wb\") as f:", "save_checkpoint(self, checkpoint_dir): checkpoint_path = os.path.join( checkpoint_dir, \"checkpoint-{}\".format(self._iteration) ) with open(checkpoint_path,", "testTildeAbsolutePath(self): local_dir = \"~/test_tilde_absolute_local_dir\" exp_name = self.prefix + \"TildeAbsolutePath\" absolute_local_dir", "with open(os.path.join(checkpoint_dir, \"test.txt\"), \"rb\") as f: x = pickle.load(f) assert", "this line, test_tune_server.testAddTrial would fail. _register_all() def _get_trial_dir(self, absoulte_exp_dir): print(\"looking", "\"test.txt\"), \"rb\") as f: x = pickle.load(f) assert x ==", "absoulte_exp_dir): print(\"looking for\", self.MockTrainable._name) print(\"in\", os.listdir(absoulte_exp_dir)) trial_dirname = next( (", "open(checkpoint_path, \"rb\") as f: extra_data = pickle.load(f) self.state.update(extra_data) def setUp(self):", "as f: extra_data = pickle.load(f) self.state.update(extra_data) def setUp(self): self.absolute_local_dir =", "absolute_local_dir self.assertFalse(os.path.exists(absolute_local_dir)) self._train(exp_name, local_dir, absolute_local_dir) self._restore(exp_name, local_dir, absolute_local_dir) def testTempfile(self):", "import os import pickle import shutil import tempfile import unittest", "\"CartPole-v0\", \"log_level\": \"DEBUG\"}, ).trials exp_dir = os.path.join(absolute_local_dir, exp_name) _, abs_trial_dir", "checkpoint_dir): with open(os.path.join(checkpoint_dir, \"test.txt\"), \"wb\") as f: pickle.dump(\"test\", f) return", "tune.run( self.MockTrainable, name=exp_name, stop={\"training_iteration\": 1}, checkpoint_freq=1, local_dir=local_dir, config={\"env\": \"CartPole-v0\", \"log_level\":", "= self._get_trial_dir( os.path.join(absolute_local_dir, exp_name) ) checkpoint_path = os.path.join( local_dir, exp_name,", "checkpoint_path def load_checkpoint(self, checkpoint_path): with open(checkpoint_path, \"rb\") as f: extra_data", "pickle import shutil import tempfile import unittest import ray from", "from ray.tune.utils import validate_save_restore class SerialTuneRelativeLocalDirTest(unittest.TestCase): local_mode = True prefix", ") ) trial_absolute_dir = os.path.join(absoulte_exp_dir, trial_dirname) return trial_dirname, trial_absolute_dir def", "pickle.load(f) self.state.update(extra_data) def setUp(self): self.absolute_local_dir = None ray.init(num_cpus=1, num_gpus=0, local_mode=self.local_mode)", "# The file tune would find. The absolute checkpoint path.", "local_dir, absolute_local_dir) self._restore(exp_name, local_dir, absolute_local_dir) def testTildeAbsolutePath(self): local_dir = \"~/test_tilde_absolute_local_dir\"", "self._train(exp_name, local_dir, absolute_local_dir) self._restore(exp_name, local_dir, absolute_local_dir) def testTempfile(self): local_dir =", ").trials exp_dir = os.path.join(absolute_local_dir, exp_name) _, abs_trial_dir = self._get_trial_dir(exp_dir) self.assertIsNone(trial.error_file)", "self.assertTrue(os.path.isdir(abs_trial_dir)) self.assertTrue( os.path.isfile( os.path.join(abs_trial_dir, \"checkpoint_000001/checkpoint-1\") ) ) def _restore(self, exp_name,", "\"CartPole-v0\", \"log_level\": \"DEBUG\"}, ).trials self.assertIsNone(trial.error_file) def testDottedRelativePath(self): local_dir = \"./test_dotted_relative_local_dir\"", "1}, checkpoint_freq=1, local_dir=local_dir, config={\"env\": \"CartPole-v0\", \"log_level\": \"DEBUG\"}, ).trials exp_dir =", "(trial,) = tune.run( self.MockTrainable, name=exp_name, stop={\"training_iteration\": 2}, # train one", "self._restore(exp_name, local_dir, local_dir) def testCheckpointWithNoop(self): \"\"\"Tests that passing the checkpoint_dir", "num_gpus=0, local_mode=self.local_mode) def tearDown(self): if self.absolute_local_dir is not None: shutil.rmtree(self.absolute_local_dir,", "self.MockTrainable, name=exp_name, stop={\"training_iteration\": 1}, checkpoint_freq=1, local_dir=local_dir, config={\"env\": \"CartPole-v0\", \"log_level\": \"DEBUG\"},", "with open(checkpoint_path, \"rb\") as f: extra_data = pickle.load(f) self.state.update(extra_data) def", "stop={\"training_iteration\": 2}, # train one more iteration. restore=checkpoint_path, # Restore", "with open(os.path.join(checkpoint_dir, \"test.txt\"), \"wb\") as f: pickle.dump(\"test\", f) return checkpoint_dir", "= os.path.join( checkpoint_dir, \"checkpoint-{}\".format(self._iteration) ) with open(checkpoint_path, \"wb\") as f:", "from ray.tune import Trainable from ray.tune.utils import validate_save_restore class SerialTuneRelativeLocalDirTest(unittest.TestCase):", "\"test_relative_local_dir\" exp_name = self.prefix + \"RelativePath\" absolute_local_dir = os.path.abspath(local_dir) self.absolute_local_dir", "def step(self): return {\"timesteps_this_iter\": 1, \"done\": True} def save_checkpoint(self, checkpoint_dir):", "def testCheckpointWithNoop(self): \"\"\"Tests that passing the checkpoint_dir right back works.\"\"\"", "local_dir) def testCheckpointWithNoop(self): \"\"\"Tests that passing the checkpoint_dir right back", "The file tune would find. The absolute checkpoint path. tune_find_file", "\"rb\") as f: extra_data = pickle.load(f) self.state.update(extra_data) def setUp(self): self.absolute_local_dir", "def load_checkpoint(self, checkpoint_path): with open(checkpoint_path, \"rb\") as f: extra_data =", "def setup(self, config): self.state = {\"hi\": 1} def step(self): return", "{\"timesteps_this_iter\": 1, \"done\": True} def save_checkpoint(self, checkpoint_dir): checkpoint_path = os.path.join(", "config={\"env\": \"CartPole-v0\", \"log_level\": \"DEBUG\"}, ).trials self.assertIsNone(trial.error_file) def testDottedRelativePath(self): local_dir =", "True prefix = \"Serial\" class MockTrainable(Trainable): _name = \"MockTrainable\" def", "load_checkpoint(self, checkpoint_dir): with open(os.path.join(checkpoint_dir, \"test.txt\"), \"rb\") as f: x =", "self.assertEqual(trial.logdir, abs_trial_dir) self.assertTrue(os.path.isdir(absolute_local_dir), absolute_local_dir) self.assertTrue(os.path.isdir(exp_dir)) self.assertTrue(os.path.isdir(abs_trial_dir)) self.assertTrue( os.path.isfile( os.path.join(abs_trial_dir, \"checkpoint_000001/checkpoint-1\")", "return trial_dirname, trial_absolute_dir def _train(self, exp_name, local_dir, absolute_local_dir): (trial,) =", "2}, # train one more iteration. restore=checkpoint_path, # Restore the", "abs_trial_dir) self.assertTrue(os.path.isdir(absolute_local_dir), absolute_local_dir) self.assertTrue(os.path.isdir(exp_dir)) self.assertTrue(os.path.isdir(abs_trial_dir)) self.assertTrue( os.path.isfile( os.path.join(abs_trial_dir, \"checkpoint_000001/checkpoint-1\") )", "True} def save_checkpoint(self, checkpoint_dir): checkpoint_path = os.path.join( checkpoint_dir, \"checkpoint-{}\".format(self._iteration) )", "tune would find. The absolute checkpoint path. tune_find_file = os.path.abspath(os.path.expanduser(checkpoint_path))", "x == \"test\" return checkpoint_dir validate_save_restore(MockTrainable) validate_save_restore(MockTrainable, use_object_store=True) if __name__", "= \"test_relative_local_dir\" exp_name = self.prefix + \"RelativePath\" absolute_local_dir = os.path.abspath(local_dir)", "as f: pickle.dump(self.state, f) return checkpoint_path def load_checkpoint(self, checkpoint_path): with", "exp_name = self.prefix + \"TildeAbsolutePath\" absolute_local_dir = os.path.abspath(os.path.expanduser(local_dir)) self.absolute_local_dir =", "test_tune_server.testAddTrial would fail. _register_all() def _get_trial_dir(self, absoulte_exp_dir): print(\"looking for\", self.MockTrainable._name)", "self._get_trial_dir(exp_dir) self.assertIsNone(trial.error_file) self.assertEqual(trial.local_dir, exp_dir) self.assertEqual(trial.logdir, abs_trial_dir) self.assertTrue(os.path.isdir(absolute_local_dir), absolute_local_dir) self.assertTrue(os.path.isdir(exp_dir)) self.assertTrue(os.path.isdir(abs_trial_dir))", "local_dir, absolute_local_dir): trial_name, abs_trial_dir = self._get_trial_dir( os.path.join(absolute_local_dir, exp_name) ) checkpoint_path", "self.assertFalse(os.path.exists(absolute_local_dir)) self._train(exp_name, local_dir, absolute_local_dir) self._restore(exp_name, local_dir, absolute_local_dir) def testRelativePath(self): local_dir", ") ) ) trial_absolute_dir = os.path.join(absoulte_exp_dir, trial_dirname) return trial_dirname, trial_absolute_dir", "one more iteration. restore=checkpoint_path, # Restore the checkpoint config={\"env\": \"CartPole-v0\",", "checkpoint_path): with open(checkpoint_path, \"rb\") as f: extra_data = pickle.load(f) self.state.update(extra_data)", "\"test.txt\"), \"wb\") as f: pickle.dump(\"test\", f) return checkpoint_dir def load_checkpoint(self,", "{\"score\": 1} def save_checkpoint(self, checkpoint_dir): with open(os.path.join(checkpoint_dir, \"test.txt\"), \"wb\") as", "self.assertIsNone(trial.error_file) self.assertEqual(trial.local_dir, exp_dir) self.assertEqual(trial.logdir, abs_trial_dir) self.assertTrue(os.path.isdir(absolute_local_dir), absolute_local_dir) self.assertTrue(os.path.isdir(exp_dir)) self.assertTrue(os.path.isdir(abs_trial_dir)) self.assertTrue(", "the checkpoint_dir right back works.\"\"\" class MockTrainable(Trainable): def setup(self, config):", "import shutil import tempfile import unittest import ray from ray", "# Relative checkpoint path # The file tune would find.", "checkpoint_dir): checkpoint_path = os.path.join( checkpoint_dir, \"checkpoint-{}\".format(self._iteration) ) with open(checkpoint_path, \"wb\")", "= os.path.join( local_dir, exp_name, trial_name, \"checkpoint_000001/checkpoint-1\" ) # Relative checkpoint", "import pickle import shutil import tempfile import unittest import ray", "if ( os.path.isdir(os.path.join(absoulte_exp_dir, child_dir)) and child_dir.startswith(self.MockTrainable._name) ) ) ) trial_absolute_dir", "setup(self, config): pass def step(self): return {\"score\": 1} def save_checkpoint(self,", "self._restore(exp_name, local_dir, absolute_local_dir) def testTildeAbsolutePath(self): local_dir = \"~/test_tilde_absolute_local_dir\" exp_name =", "next( ( child_dir for child_dir in os.listdir(absoulte_exp_dir) if ( os.path.isdir(os.path.join(absoulte_exp_dir,", "setup(self, config): self.state = {\"hi\": 1} def step(self): return {\"timesteps_this_iter\":", "None: shutil.rmtree(self.absolute_local_dir, ignore_errors=True) self.absolute_local_dir = None ray.shutdown() # Without this", "self.MockTrainable, name=exp_name, stop={\"training_iteration\": 2}, # train one more iteration. restore=checkpoint_path,", "def step(self): return {\"score\": 1} def save_checkpoint(self, checkpoint_dir): with open(os.path.join(checkpoint_dir,", "= next( ( child_dir for child_dir in os.listdir(absoulte_exp_dir) if (", "passing the checkpoint_dir right back works.\"\"\" class MockTrainable(Trainable): def setup(self,", "\"\"\"Tests that passing the checkpoint_dir right back works.\"\"\" class MockTrainable(Trainable):", "self.absolute_local_dir = absolute_local_dir self.assertFalse(os.path.exists(absolute_local_dir)) self._train(exp_name, local_dir, absolute_local_dir) self._restore(exp_name, local_dir, absolute_local_dir)", "= \"~/test_tilde_absolute_local_dir\" exp_name = self.prefix + \"TildeAbsolutePath\" absolute_local_dir = os.path.abspath(os.path.expanduser(local_dir))", "absolute_local_dir = os.path.abspath(local_dir) self.absolute_local_dir = absolute_local_dir self.assertFalse(os.path.exists(absolute_local_dir)) self._train(exp_name, local_dir, absolute_local_dir)", "step(self): return {\"timesteps_this_iter\": 1, \"done\": True} def save_checkpoint(self, checkpoint_dir): checkpoint_path", "works.\"\"\" class MockTrainable(Trainable): def setup(self, config): pass def step(self): return", "return checkpoint_dir validate_save_restore(MockTrainable) validate_save_restore(MockTrainable, use_object_store=True) if __name__ == \"__main__\": import", "( os.path.isdir(os.path.join(absoulte_exp_dir, child_dir)) and child_dir.startswith(self.MockTrainable._name) ) ) ) trial_absolute_dir =", "_get_trial_dir(self, absoulte_exp_dir): print(\"looking for\", self.MockTrainable._name) print(\"in\", os.listdir(absoulte_exp_dir)) trial_dirname = next(", "checkpoint path. tune_find_file = os.path.abspath(os.path.expanduser(checkpoint_path)) self.assertTrue( os.path.isfile(tune_find_file), \"{} is not", "absolute_local_dir) self._restore(exp_name, local_dir, absolute_local_dir) def testRelativePath(self): local_dir = \"test_relative_local_dir\" exp_name", "+ \"RelativePath\" absolute_local_dir = os.path.abspath(local_dir) self.absolute_local_dir = absolute_local_dir self.assertFalse(os.path.exists(absolute_local_dir)) self._train(exp_name,", "open(checkpoint_path, \"wb\") as f: pickle.dump(self.state, f) return checkpoint_path def load_checkpoint(self,", "def setup(self, config): pass def step(self): return {\"score\": 1} def", "train one more iteration. restore=checkpoint_path, # Restore the checkpoint config={\"env\":", "path. tune_find_file = os.path.abspath(os.path.expanduser(checkpoint_path)) self.assertTrue( os.path.isfile(tune_find_file), \"{} is not exist!\".format(tune_find_file)", "self._restore(exp_name, local_dir, absolute_local_dir) def testTempfile(self): local_dir = tempfile.mkdtemp() exp_name =", "checkpoint_path = os.path.join( checkpoint_dir, \"checkpoint-{}\".format(self._iteration) ) with open(checkpoint_path, \"wb\") as", "ray.rllib import _register_all from ray.tune import Trainable from ray.tune.utils import", "local_dir = \"./test_dotted_relative_local_dir\" exp_name = self.prefix + \"DottedRelativeLocalDir\" absolute_local_dir =", "load_checkpoint(self, checkpoint_path): with open(checkpoint_path, \"rb\") as f: extra_data = pickle.load(f)", "os.path.join(absoulte_exp_dir, trial_dirname) return trial_dirname, trial_absolute_dir def _train(self, exp_name, local_dir, absolute_local_dir):", "\"RelativePath\" absolute_local_dir = os.path.abspath(local_dir) self.absolute_local_dir = absolute_local_dir self.assertFalse(os.path.exists(absolute_local_dir)) self._train(exp_name, local_dir,", "= self.prefix + \"Tempfile\" self.absolute_local_dir = local_dir self._train(exp_name, local_dir, local_dir)", "= None ray.init(num_cpus=1, num_gpus=0, local_mode=self.local_mode) def tearDown(self): if self.absolute_local_dir is", "config={\"env\": \"CartPole-v0\", \"log_level\": \"DEBUG\"}, ).trials exp_dir = os.path.join(absolute_local_dir, exp_name) _,", "= \"./test_dotted_relative_local_dir\" exp_name = self.prefix + \"DottedRelativeLocalDir\" absolute_local_dir = os.path.abspath(local_dir)", "os import pickle import shutil import tempfile import unittest import", "= os.path.abspath(os.path.expanduser(checkpoint_path)) self.assertTrue( os.path.isfile(tune_find_file), \"{} is not exist!\".format(tune_find_file) ) (trial,)", "local_dir, absolute_local_dir) self._restore(exp_name, local_dir, absolute_local_dir) def testTempfile(self): local_dir = tempfile.mkdtemp()", "os.path.isfile( os.path.join(abs_trial_dir, \"checkpoint_000001/checkpoint-1\") ) ) def _restore(self, exp_name, local_dir, absolute_local_dir):", "self.absolute_local_dir = None ray.init(num_cpus=1, num_gpus=0, local_mode=self.local_mode) def tearDown(self): if self.absolute_local_dir", "self.assertFalse(os.path.exists(absolute_local_dir)) self._train(exp_name, local_dir, absolute_local_dir) self._restore(exp_name, local_dir, absolute_local_dir) def testTildeAbsolutePath(self): local_dir", "def _train(self, exp_name, local_dir, absolute_local_dir): (trial,) = tune.run( self.MockTrainable, name=exp_name,", "name=exp_name, stop={\"training_iteration\": 2}, # train one more iteration. restore=checkpoint_path, #", "save_checkpoint(self, checkpoint_dir): with open(os.path.join(checkpoint_dir, \"test.txt\"), \"wb\") as f: pickle.dump(\"test\", f)", "= os.path.join(absoulte_exp_dir, trial_dirname) return trial_dirname, trial_absolute_dir def _train(self, exp_name, local_dir,", "\"{} is not exist!\".format(tune_find_file) ) (trial,) = tune.run( self.MockTrainable, name=exp_name,", "open(os.path.join(checkpoint_dir, \"test.txt\"), \"wb\") as f: pickle.dump(\"test\", f) return checkpoint_dir def", "path # The file tune would find. The absolute checkpoint", "self.state = {\"hi\": 1} def step(self): return {\"timesteps_this_iter\": 1, \"done\":", "self.assertIsNone(trial.error_file) def testDottedRelativePath(self): local_dir = \"./test_dotted_relative_local_dir\" exp_name = self.prefix +", "find. The absolute checkpoint path. tune_find_file = os.path.abspath(os.path.expanduser(checkpoint_path)) self.assertTrue( os.path.isfile(tune_find_file),", "checkpoint config={\"env\": \"CartPole-v0\", \"log_level\": \"DEBUG\"}, ).trials self.assertIsNone(trial.error_file) def testDottedRelativePath(self): local_dir", "= None ray.shutdown() # Without this line, test_tune_server.testAddTrial would fail.", "exp_name = self.prefix + \"RelativePath\" absolute_local_dir = os.path.abspath(local_dir) self.absolute_local_dir =", "= tune.run( self.MockTrainable, name=exp_name, stop={\"training_iteration\": 1}, checkpoint_freq=1, local_dir=local_dir, config={\"env\": \"CartPole-v0\",", "self._train(exp_name, local_dir, absolute_local_dir) self._restore(exp_name, local_dir, absolute_local_dir) def testRelativePath(self): local_dir =", "= local_dir self._train(exp_name, local_dir, local_dir) self._restore(exp_name, local_dir, local_dir) def testCheckpointWithNoop(self):", "import tempfile import unittest import ray from ray import tune", "def testTempfile(self): local_dir = tempfile.mkdtemp() exp_name = self.prefix + \"Tempfile\"", "name=exp_name, stop={\"training_iteration\": 1}, checkpoint_freq=1, local_dir=local_dir, config={\"env\": \"CartPole-v0\", \"log_level\": \"DEBUG\"}, ).trials", "tempfile import unittest import ray from ray import tune from", "# coding: utf-8 import os import pickle import shutil import", "+ \"DottedRelativeLocalDir\" absolute_local_dir = os.path.abspath(local_dir) self.absolute_local_dir = absolute_local_dir self.assertFalse(os.path.exists(absolute_local_dir)) self._train(exp_name,", "exp_dir = os.path.join(absolute_local_dir, exp_name) _, abs_trial_dir = self._get_trial_dir(exp_dir) self.assertIsNone(trial.error_file) self.assertEqual(trial.local_dir,", "iteration. restore=checkpoint_path, # Restore the checkpoint config={\"env\": \"CartPole-v0\", \"log_level\": \"DEBUG\"},", "exist!\".format(tune_find_file) ) (trial,) = tune.run( self.MockTrainable, name=exp_name, stop={\"training_iteration\": 2}, #", "validate_save_restore class SerialTuneRelativeLocalDirTest(unittest.TestCase): local_mode = True prefix = \"Serial\" class", "local_dir, local_dir) self._restore(exp_name, local_dir, local_dir) def testCheckpointWithNoop(self): \"\"\"Tests that passing", "back works.\"\"\" class MockTrainable(Trainable): def setup(self, config): pass def step(self):", "self.assertTrue( os.path.isfile(tune_find_file), \"{} is not exist!\".format(tune_find_file) ) (trial,) = tune.run(", "would fail. _register_all() def _get_trial_dir(self, absoulte_exp_dir): print(\"looking for\", self.MockTrainable._name) print(\"in\",", "local_dir, absolute_local_dir): (trial,) = tune.run( self.MockTrainable, name=exp_name, stop={\"training_iteration\": 1}, checkpoint_freq=1,", "os.path.isfile(tune_find_file), \"{} is not exist!\".format(tune_find_file) ) (trial,) = tune.run( self.MockTrainable,", "validate_save_restore(MockTrainable) validate_save_restore(MockTrainable, use_object_store=True) if __name__ == \"__main__\": import pytest import", "trial_dirname = next( ( child_dir for child_dir in os.listdir(absoulte_exp_dir) if", "= tempfile.mkdtemp() exp_name = self.prefix + \"Tempfile\" self.absolute_local_dir = local_dir", "that passing the checkpoint_dir right back works.\"\"\" class MockTrainable(Trainable): def", "f) return checkpoint_dir def load_checkpoint(self, checkpoint_dir): with open(os.path.join(checkpoint_dir, \"test.txt\"), \"rb\")", "def _get_trial_dir(self, absoulte_exp_dir): print(\"looking for\", self.MockTrainable._name) print(\"in\", os.listdir(absoulte_exp_dir)) trial_dirname =", "pass def step(self): return {\"score\": 1} def save_checkpoint(self, checkpoint_dir): with", "print(\"looking for\", self.MockTrainable._name) print(\"in\", os.listdir(absoulte_exp_dir)) trial_dirname = next( ( child_dir", "tune_find_file = os.path.abspath(os.path.expanduser(checkpoint_path)) self.assertTrue( os.path.isfile(tune_find_file), \"{} is not exist!\".format(tune_find_file) )", "unittest import ray from ray import tune from ray.rllib import", "would find. The absolute checkpoint path. tune_find_file = os.path.abspath(os.path.expanduser(checkpoint_path)) self.assertTrue(", "right back works.\"\"\" class MockTrainable(Trainable): def setup(self, config): pass def", "exp_dir) self.assertEqual(trial.logdir, abs_trial_dir) self.assertTrue(os.path.isdir(absolute_local_dir), absolute_local_dir) self.assertTrue(os.path.isdir(exp_dir)) self.assertTrue(os.path.isdir(abs_trial_dir)) self.assertTrue( os.path.isfile( os.path.join(abs_trial_dir,", "== \"test\" return checkpoint_dir validate_save_restore(MockTrainable) validate_save_restore(MockTrainable, use_object_store=True) if __name__ ==", "\"DottedRelativeLocalDir\" absolute_local_dir = os.path.abspath(local_dir) self.absolute_local_dir = absolute_local_dir self.assertFalse(os.path.exists(absolute_local_dir)) self._train(exp_name, local_dir,", "from ray.rllib import _register_all from ray.tune import Trainable from ray.tune.utils", "self._train(exp_name, local_dir, local_dir) self._restore(exp_name, local_dir, local_dir) def testCheckpointWithNoop(self): \"\"\"Tests that", "tune from ray.rllib import _register_all from ray.tune import Trainable from", "f: x = pickle.load(f) assert x == \"test\" return checkpoint_dir", "import unittest import ray from ray import tune from ray.rllib", "(trial,) = tune.run( self.MockTrainable, name=exp_name, stop={\"training_iteration\": 1}, checkpoint_freq=1, local_dir=local_dir, config={\"env\":", ") (trial,) = tune.run( self.MockTrainable, name=exp_name, stop={\"training_iteration\": 2}, # train", "if __name__ == \"__main__\": import pytest import sys sys.exit(pytest.main([\"-v\", __file__]))", "local_dir, absolute_local_dir) def testTempfile(self): local_dir = tempfile.mkdtemp() exp_name = self.prefix", "self.MockTrainable._name) print(\"in\", os.listdir(absoulte_exp_dir)) trial_dirname = next( ( child_dir for child_dir", "print(\"in\", os.listdir(absoulte_exp_dir)) trial_dirname = next( ( child_dir for child_dir in", "= os.path.join(absolute_local_dir, exp_name) _, abs_trial_dir = self._get_trial_dir(exp_dir) self.assertIsNone(trial.error_file) self.assertEqual(trial.local_dir, exp_dir)", "exp_name, local_dir, absolute_local_dir): (trial,) = tune.run( self.MockTrainable, name=exp_name, stop={\"training_iteration\": 1},", "MockTrainable(Trainable): _name = \"MockTrainable\" def setup(self, config): self.state = {\"hi\":", "import tune from ray.rllib import _register_all from ray.tune import Trainable", "import _register_all from ray.tune import Trainable from ray.tune.utils import validate_save_restore", "exp_name = self.prefix + \"DottedRelativeLocalDir\" absolute_local_dir = os.path.abspath(local_dir) self.absolute_local_dir =", "absolute_local_dir) self._restore(exp_name, local_dir, absolute_local_dir) def testTildeAbsolutePath(self): local_dir = \"~/test_tilde_absolute_local_dir\" exp_name", "the checkpoint config={\"env\": \"CartPole-v0\", \"log_level\": \"DEBUG\"}, ).trials self.assertIsNone(trial.error_file) def testDottedRelativePath(self):", "more iteration. restore=checkpoint_path, # Restore the checkpoint config={\"env\": \"CartPole-v0\", \"log_level\":", "\"MockTrainable\" def setup(self, config): self.state = {\"hi\": 1} def step(self):", "x = pickle.load(f) assert x == \"test\" return checkpoint_dir validate_save_restore(MockTrainable)", "local_mode=self.local_mode) def tearDown(self): if self.absolute_local_dir is not None: shutil.rmtree(self.absolute_local_dir, ignore_errors=True)", "import validate_save_restore class SerialTuneRelativeLocalDirTest(unittest.TestCase): local_mode = True prefix = \"Serial\"", "\"test\" return checkpoint_dir validate_save_restore(MockTrainable) validate_save_restore(MockTrainable, use_object_store=True) if __name__ == \"__main__\":", "absolute_local_dir) def testRelativePath(self): local_dir = \"test_relative_local_dir\" exp_name = self.prefix +", "\"Serial\" class MockTrainable(Trainable): _name = \"MockTrainable\" def setup(self, config): self.state", "( child_dir for child_dir in os.listdir(absoulte_exp_dir) if ( os.path.isdir(os.path.join(absoulte_exp_dir, child_dir))", "trial_dirname) return trial_dirname, trial_absolute_dir def _train(self, exp_name, local_dir, absolute_local_dir): (trial,)", "absolute_local_dir): trial_name, abs_trial_dir = self._get_trial_dir( os.path.join(absolute_local_dir, exp_name) ) checkpoint_path =", "checkpoint_dir, \"checkpoint-{}\".format(self._iteration) ) with open(checkpoint_path, \"wb\") as f: pickle.dump(self.state, f)", "is not None: shutil.rmtree(self.absolute_local_dir, ignore_errors=True) self.absolute_local_dir = None ray.shutdown() #", "def testRelativePath(self): local_dir = \"test_relative_local_dir\" exp_name = self.prefix + \"RelativePath\"", "f: pickle.dump(self.state, f) return checkpoint_path def load_checkpoint(self, checkpoint_path): with open(checkpoint_path,", "\"checkpoint_000001/checkpoint-1\" ) # Relative checkpoint path # The file tune", "child_dir in os.listdir(absoulte_exp_dir) if ( os.path.isdir(os.path.join(absoulte_exp_dir, child_dir)) and child_dir.startswith(self.MockTrainable._name) )", "os.listdir(absoulte_exp_dir)) trial_dirname = next( ( child_dir for child_dir in os.listdir(absoulte_exp_dir)", "checkpoint_path = os.path.join( local_dir, exp_name, trial_name, \"checkpoint_000001/checkpoint-1\" ) # Relative", "local_dir, exp_name, trial_name, \"checkpoint_000001/checkpoint-1\" ) # Relative checkpoint path #", "\"checkpoint-{}\".format(self._iteration) ) with open(checkpoint_path, \"wb\") as f: pickle.dump(self.state, f) return", "file tune would find. The absolute checkpoint path. tune_find_file =", "self.prefix + \"DottedRelativeLocalDir\" absolute_local_dir = os.path.abspath(local_dir) self.absolute_local_dir = absolute_local_dir self.assertFalse(os.path.exists(absolute_local_dir))", "class MockTrainable(Trainable): def setup(self, config): pass def step(self): return {\"score\":", "local_dir self._train(exp_name, local_dir, local_dir) self._restore(exp_name, local_dir, local_dir) def testCheckpointWithNoop(self): \"\"\"Tests", "child_dir)) and child_dir.startswith(self.MockTrainable._name) ) ) ) trial_absolute_dir = os.path.join(absoulte_exp_dir, trial_dirname)", "MockTrainable(Trainable): def setup(self, config): pass def step(self): return {\"score\": 1}", "assert x == \"test\" return checkpoint_dir validate_save_restore(MockTrainable) validate_save_restore(MockTrainable, use_object_store=True) if", "stop={\"training_iteration\": 1}, checkpoint_freq=1, local_dir=local_dir, config={\"env\": \"CartPole-v0\", \"log_level\": \"DEBUG\"}, ).trials exp_dir", "local_dir = \"~/test_tilde_absolute_local_dir\" exp_name = self.prefix + \"TildeAbsolutePath\" absolute_local_dir =", "pickle.dump(self.state, f) return checkpoint_path def load_checkpoint(self, checkpoint_path): with open(checkpoint_path, \"rb\")", "# Restore the checkpoint config={\"env\": \"CartPole-v0\", \"log_level\": \"DEBUG\"}, ).trials self.assertIsNone(trial.error_file)", "The absolute checkpoint path. tune_find_file = os.path.abspath(os.path.expanduser(checkpoint_path)) self.assertTrue( os.path.isfile(tune_find_file), \"{}", "def testDottedRelativePath(self): local_dir = \"./test_dotted_relative_local_dir\" exp_name = self.prefix + \"DottedRelativeLocalDir\"", "os.path.join(absolute_local_dir, exp_name) _, abs_trial_dir = self._get_trial_dir(exp_dir) self.assertIsNone(trial.error_file) self.assertEqual(trial.local_dir, exp_dir) self.assertEqual(trial.logdir,", ") def _restore(self, exp_name, local_dir, absolute_local_dir): trial_name, abs_trial_dir = self._get_trial_dir(", "\"~/test_tilde_absolute_local_dir\" exp_name = self.prefix + \"TildeAbsolutePath\" absolute_local_dir = os.path.abspath(os.path.expanduser(local_dir)) self.absolute_local_dir", "= tune.run( self.MockTrainable, name=exp_name, stop={\"training_iteration\": 2}, # train one more", "= absolute_local_dir self.assertFalse(os.path.exists(absolute_local_dir)) self._train(exp_name, local_dir, absolute_local_dir) self._restore(exp_name, local_dir, absolute_local_dir) def", "self.assertFalse(os.path.exists(absolute_local_dir)) self._train(exp_name, local_dir, absolute_local_dir) self._restore(exp_name, local_dir, absolute_local_dir) def testTempfile(self): local_dir", "abs_trial_dir = self._get_trial_dir( os.path.join(absolute_local_dir, exp_name) ) checkpoint_path = os.path.join( local_dir,", "+ \"Tempfile\" self.absolute_local_dir = local_dir self._train(exp_name, local_dir, local_dir) self._restore(exp_name, local_dir,", "step(self): return {\"score\": 1} def save_checkpoint(self, checkpoint_dir): with open(os.path.join(checkpoint_dir, \"test.txt\"),", "as f: pickle.dump(\"test\", f) return checkpoint_dir def load_checkpoint(self, checkpoint_dir): with", "\"./test_dotted_relative_local_dir\" exp_name = self.prefix + \"DottedRelativeLocalDir\" absolute_local_dir = os.path.abspath(local_dir) self.absolute_local_dir", "os.path.isdir(os.path.join(absoulte_exp_dir, child_dir)) and child_dir.startswith(self.MockTrainable._name) ) ) ) trial_absolute_dir = os.path.join(absoulte_exp_dir,", "\"TildeAbsolutePath\" absolute_local_dir = os.path.abspath(os.path.expanduser(local_dir)) self.absolute_local_dir = absolute_local_dir self.assertFalse(os.path.exists(absolute_local_dir)) self._train(exp_name, local_dir,", "_register_all from ray.tune import Trainable from ray.tune.utils import validate_save_restore class", "{\"hi\": 1} def step(self): return {\"timesteps_this_iter\": 1, \"done\": True} def", "\"rb\") as f: x = pickle.load(f) assert x == \"test\"", "self.assertTrue(os.path.isdir(absolute_local_dir), absolute_local_dir) self.assertTrue(os.path.isdir(exp_dir)) self.assertTrue(os.path.isdir(abs_trial_dir)) self.assertTrue( os.path.isfile( os.path.join(abs_trial_dir, \"checkpoint_000001/checkpoint-1\") ) )", "return checkpoint_path def load_checkpoint(self, checkpoint_path): with open(checkpoint_path, \"rb\") as f:", "exp_name) _, abs_trial_dir = self._get_trial_dir(exp_dir) self.assertIsNone(trial.error_file) self.assertEqual(trial.local_dir, exp_dir) self.assertEqual(trial.logdir, abs_trial_dir)", "\"Tempfile\" self.absolute_local_dir = local_dir self._train(exp_name, local_dir, local_dir) self._restore(exp_name, local_dir, local_dir)", "absolute_local_dir self.assertFalse(os.path.exists(absolute_local_dir)) self._train(exp_name, local_dir, absolute_local_dir) self._restore(exp_name, local_dir, absolute_local_dir) def testTildeAbsolutePath(self):", "checkpoint_dir validate_save_restore(MockTrainable) validate_save_restore(MockTrainable, use_object_store=True) if __name__ == \"__main__\": import pytest", "local_dir, absolute_local_dir) self._restore(exp_name, local_dir, absolute_local_dir) def testRelativePath(self): local_dir = \"test_relative_local_dir\"", "coding: utf-8 import os import pickle import shutil import tempfile", "Relative checkpoint path # The file tune would find. The", "os.listdir(absoulte_exp_dir) if ( os.path.isdir(os.path.join(absoulte_exp_dir, child_dir)) and child_dir.startswith(self.MockTrainable._name) ) ) )", "absolute_local_dir) self._restore(exp_name, local_dir, absolute_local_dir) def testTempfile(self): local_dir = tempfile.mkdtemp() exp_name", "shutil.rmtree(self.absolute_local_dir, ignore_errors=True) self.absolute_local_dir = None ray.shutdown() # Without this line,", "class SerialTuneRelativeLocalDirTest(unittest.TestCase): local_mode = True prefix = \"Serial\" class MockTrainable(Trainable):", "extra_data = pickle.load(f) self.state.update(extra_data) def setUp(self): self.absolute_local_dir = None ray.init(num_cpus=1,", "# Without this line, test_tune_server.testAddTrial would fail. _register_all() def _get_trial_dir(self,", ") checkpoint_path = os.path.join( local_dir, exp_name, trial_name, \"checkpoint_000001/checkpoint-1\" ) #", "self.assertEqual(trial.local_dir, exp_dir) self.assertEqual(trial.logdir, abs_trial_dir) self.assertTrue(os.path.isdir(absolute_local_dir), absolute_local_dir) self.assertTrue(os.path.isdir(exp_dir)) self.assertTrue(os.path.isdir(abs_trial_dir)) self.assertTrue( os.path.isfile(", "f: pickle.dump(\"test\", f) return checkpoint_dir def load_checkpoint(self, checkpoint_dir): with open(os.path.join(checkpoint_dir,", "pickle.dump(\"test\", f) return checkpoint_dir def load_checkpoint(self, checkpoint_dir): with open(os.path.join(checkpoint_dir, \"test.txt\"),", "_, abs_trial_dir = self._get_trial_dir(exp_dir) self.assertIsNone(trial.error_file) self.assertEqual(trial.local_dir, exp_dir) self.assertEqual(trial.logdir, abs_trial_dir) self.assertTrue(os.path.isdir(absolute_local_dir),", "os.path.abspath(local_dir) self.absolute_local_dir = absolute_local_dir self.assertFalse(os.path.exists(absolute_local_dir)) self._train(exp_name, local_dir, absolute_local_dir) self._restore(exp_name, local_dir,", "restore=checkpoint_path, # Restore the checkpoint config={\"env\": \"CartPole-v0\", \"log_level\": \"DEBUG\"}, ).trials", "absolute_local_dir) def testTildeAbsolutePath(self): local_dir = \"~/test_tilde_absolute_local_dir\" exp_name = self.prefix +", "self.prefix + \"RelativePath\" absolute_local_dir = os.path.abspath(local_dir) self.absolute_local_dir = absolute_local_dir self.assertFalse(os.path.exists(absolute_local_dir))", "absolute_local_dir self.assertFalse(os.path.exists(absolute_local_dir)) self._train(exp_name, local_dir, absolute_local_dir) self._restore(exp_name, local_dir, absolute_local_dir) def testRelativePath(self):", "local_dir=local_dir, config={\"env\": \"CartPole-v0\", \"log_level\": \"DEBUG\"}, ).trials exp_dir = os.path.join(absolute_local_dir, exp_name)", "prefix = \"Serial\" class MockTrainable(Trainable): _name = \"MockTrainable\" def setup(self,", "absolute_local_dir = os.path.abspath(os.path.expanduser(local_dir)) self.absolute_local_dir = absolute_local_dir self.assertFalse(os.path.exists(absolute_local_dir)) self._train(exp_name, local_dir, absolute_local_dir)", "return checkpoint_dir def load_checkpoint(self, checkpoint_dir): with open(os.path.join(checkpoint_dir, \"test.txt\"), \"rb\") as", "class MockTrainable(Trainable): _name = \"MockTrainable\" def setup(self, config): self.state =", "validate_save_restore(MockTrainable, use_object_store=True) if __name__ == \"__main__\": import pytest import sys", "= self._get_trial_dir(exp_dir) self.assertIsNone(trial.error_file) self.assertEqual(trial.local_dir, exp_dir) self.assertEqual(trial.logdir, abs_trial_dir) self.assertTrue(os.path.isdir(absolute_local_dir), absolute_local_dir) self.assertTrue(os.path.isdir(exp_dir))", "trial_name, abs_trial_dir = self._get_trial_dir( os.path.join(absolute_local_dir, exp_name) ) checkpoint_path = os.path.join(", "trial_dirname, trial_absolute_dir def _train(self, exp_name, local_dir, absolute_local_dir): (trial,) = tune.run(", ").trials self.assertIsNone(trial.error_file) def testDottedRelativePath(self): local_dir = \"./test_dotted_relative_local_dir\" exp_name = self.prefix", "f) return checkpoint_path def load_checkpoint(self, checkpoint_path): with open(checkpoint_path, \"rb\") as", "\"log_level\": \"DEBUG\"}, ).trials self.assertIsNone(trial.error_file) def testDottedRelativePath(self): local_dir = \"./test_dotted_relative_local_dir\" exp_name", "= self.prefix + \"DottedRelativeLocalDir\" absolute_local_dir = os.path.abspath(local_dir) self.absolute_local_dir = absolute_local_dir", "import Trainable from ray.tune.utils import validate_save_restore class SerialTuneRelativeLocalDirTest(unittest.TestCase): local_mode =", "ray.shutdown() # Without this line, test_tune_server.testAddTrial would fail. _register_all() def", "self.absolute_local_dir is not None: shutil.rmtree(self.absolute_local_dir, ignore_errors=True) self.absolute_local_dir = None ray.shutdown()", "= os.path.abspath(local_dir) self.absolute_local_dir = absolute_local_dir self.assertFalse(os.path.exists(absolute_local_dir)) self._train(exp_name, local_dir, absolute_local_dir) self._restore(exp_name,", "pickle.load(f) assert x == \"test\" return checkpoint_dir validate_save_restore(MockTrainable) validate_save_restore(MockTrainable, use_object_store=True)", "exp_name) ) checkpoint_path = os.path.join( local_dir, exp_name, trial_name, \"checkpoint_000001/checkpoint-1\" )", "= {\"hi\": 1} def step(self): return {\"timesteps_this_iter\": 1, \"done\": True}", "\"done\": True} def save_checkpoint(self, checkpoint_dir): checkpoint_path = os.path.join( checkpoint_dir, \"checkpoint-{}\".format(self._iteration)", "local_dir, absolute_local_dir) def testRelativePath(self): local_dir = \"test_relative_local_dir\" exp_name = self.prefix", "1} def step(self): return {\"timesteps_this_iter\": 1, \"done\": True} def save_checkpoint(self,", "f: extra_data = pickle.load(f) self.state.update(extra_data) def setUp(self): self.absolute_local_dir = None", "self.prefix + \"Tempfile\" self.absolute_local_dir = local_dir self._train(exp_name, local_dir, local_dir) self._restore(exp_name,", "checkpoint_dir def load_checkpoint(self, checkpoint_dir): with open(os.path.join(checkpoint_dir, \"test.txt\"), \"rb\") as f:", "for\", self.MockTrainable._name) print(\"in\", os.listdir(absoulte_exp_dir)) trial_dirname = next( ( child_dir for", "tearDown(self): if self.absolute_local_dir is not None: shutil.rmtree(self.absolute_local_dir, ignore_errors=True) self.absolute_local_dir =", "= self.prefix + \"TildeAbsolutePath\" absolute_local_dir = os.path.abspath(os.path.expanduser(local_dir)) self.absolute_local_dir = absolute_local_dir", "exp_name, trial_name, \"checkpoint_000001/checkpoint-1\" ) # Relative checkpoint path # The", "local_dir) self._restore(exp_name, local_dir, local_dir) def testCheckpointWithNoop(self): \"\"\"Tests that passing the", "child_dir.startswith(self.MockTrainable._name) ) ) ) trial_absolute_dir = os.path.join(absoulte_exp_dir, trial_dirname) return trial_dirname,", "abs_trial_dir = self._get_trial_dir(exp_dir) self.assertIsNone(trial.error_file) self.assertEqual(trial.local_dir, exp_dir) self.assertEqual(trial.logdir, abs_trial_dir) self.assertTrue(os.path.isdir(absolute_local_dir), absolute_local_dir)", "exp_name, local_dir, absolute_local_dir): trial_name, abs_trial_dir = self._get_trial_dir( os.path.join(absolute_local_dir, exp_name) )", "self.absolute_local_dir = None ray.shutdown() # Without this line, test_tune_server.testAddTrial would", "testDottedRelativePath(self): local_dir = \"./test_dotted_relative_local_dir\" exp_name = self.prefix + \"DottedRelativeLocalDir\" absolute_local_dir", "ray.init(num_cpus=1, num_gpus=0, local_mode=self.local_mode) def tearDown(self): if self.absolute_local_dir is not None:", "ray from ray import tune from ray.rllib import _register_all from", "self.assertTrue( os.path.isfile( os.path.join(abs_trial_dir, \"checkpoint_000001/checkpoint-1\") ) ) def _restore(self, exp_name, local_dir,", "self._train(exp_name, local_dir, absolute_local_dir) self._restore(exp_name, local_dir, absolute_local_dir) def testTildeAbsolutePath(self): local_dir =", "ray import tune from ray.rllib import _register_all from ray.tune import", "testCheckpointWithNoop(self): \"\"\"Tests that passing the checkpoint_dir right back works.\"\"\" class", ") ) def _restore(self, exp_name, local_dir, absolute_local_dir): trial_name, abs_trial_dir =", "for child_dir in os.listdir(absoulte_exp_dir) if ( os.path.isdir(os.path.join(absoulte_exp_dir, child_dir)) and child_dir.startswith(self.MockTrainable._name)", "checkpoint_dir): with open(os.path.join(checkpoint_dir, \"test.txt\"), \"rb\") as f: x = pickle.load(f)", "exp_name = self.prefix + \"Tempfile\" self.absolute_local_dir = local_dir self._train(exp_name, local_dir,", "checkpoint_freq=1, local_dir=local_dir, config={\"env\": \"CartPole-v0\", \"log_level\": \"DEBUG\"}, ).trials exp_dir = os.path.join(absolute_local_dir,", "and child_dir.startswith(self.MockTrainable._name) ) ) ) trial_absolute_dir = os.path.join(absoulte_exp_dir, trial_dirname) return", "os.path.abspath(os.path.expanduser(checkpoint_path)) self.assertTrue( os.path.isfile(tune_find_file), \"{} is not exist!\".format(tune_find_file) ) (trial,) =", "= \"Serial\" class MockTrainable(Trainable): _name = \"MockTrainable\" def setup(self, config):", "as f: x = pickle.load(f) assert x == \"test\" return", "absolute_local_dir) def testTempfile(self): local_dir = tempfile.mkdtemp() exp_name = self.prefix +", "= True prefix = \"Serial\" class MockTrainable(Trainable): _name = \"MockTrainable\"", "def save_checkpoint(self, checkpoint_dir): with open(os.path.join(checkpoint_dir, \"test.txt\"), \"wb\") as f: pickle.dump(\"test\",", "1, \"done\": True} def save_checkpoint(self, checkpoint_dir): checkpoint_path = os.path.join( checkpoint_dir,", "return {\"score\": 1} def save_checkpoint(self, checkpoint_dir): with open(os.path.join(checkpoint_dir, \"test.txt\"), \"wb\")", "checkpoint path # The file tune would find. The absolute", "ray.tune.utils import validate_save_restore class SerialTuneRelativeLocalDirTest(unittest.TestCase): local_mode = True prefix =", "self.absolute_local_dir = local_dir self._train(exp_name, local_dir, local_dir) self._restore(exp_name, local_dir, local_dir) def", "tune.run( self.MockTrainable, name=exp_name, stop={\"training_iteration\": 2}, # train one more iteration.", "with open(checkpoint_path, \"wb\") as f: pickle.dump(self.state, f) return checkpoint_path def", "def testTildeAbsolutePath(self): local_dir = \"~/test_tilde_absolute_local_dir\" exp_name = self.prefix + \"TildeAbsolutePath\"", "config): self.state = {\"hi\": 1} def step(self): return {\"timesteps_this_iter\": 1,", "trial_name, \"checkpoint_000001/checkpoint-1\" ) # Relative checkpoint path # The file", "\"log_level\": \"DEBUG\"}, ).trials exp_dir = os.path.join(absolute_local_dir, exp_name) _, abs_trial_dir =", "local_dir, local_dir) def testCheckpointWithNoop(self): \"\"\"Tests that passing the checkpoint_dir right", "os.path.join(absolute_local_dir, exp_name) ) checkpoint_path = os.path.join( local_dir, exp_name, trial_name, \"checkpoint_000001/checkpoint-1\"", "absolute_local_dir): (trial,) = tune.run( self.MockTrainable, name=exp_name, stop={\"training_iteration\": 1}, checkpoint_freq=1, local_dir=local_dir,", "self.prefix + \"TildeAbsolutePath\" absolute_local_dir = os.path.abspath(os.path.expanduser(local_dir)) self.absolute_local_dir = absolute_local_dir self.assertFalse(os.path.exists(absolute_local_dir))", "import ray from ray import tune from ray.rllib import _register_all", "self._restore(exp_name, local_dir, absolute_local_dir) def testRelativePath(self): local_dir = \"test_relative_local_dir\" exp_name =", "local_dir, absolute_local_dir) def testTildeAbsolutePath(self): local_dir = \"~/test_tilde_absolute_local_dir\" exp_name = self.prefix", "ignore_errors=True) self.absolute_local_dir = None ray.shutdown() # Without this line, test_tune_server.testAddTrial", "= \"MockTrainable\" def setup(self, config): self.state = {\"hi\": 1} def", "None ray.init(num_cpus=1, num_gpus=0, local_mode=self.local_mode) def tearDown(self): if self.absolute_local_dir is not", "self.assertTrue(os.path.isdir(exp_dir)) self.assertTrue(os.path.isdir(abs_trial_dir)) self.assertTrue( os.path.isfile( os.path.join(abs_trial_dir, \"checkpoint_000001/checkpoint-1\") ) ) def _restore(self,", "tempfile.mkdtemp() exp_name = self.prefix + \"Tempfile\" self.absolute_local_dir = local_dir self._train(exp_name,", "setUp(self): self.absolute_local_dir = None ray.init(num_cpus=1, num_gpus=0, local_mode=self.local_mode) def tearDown(self): if", "absolute_local_dir) self.assertTrue(os.path.isdir(exp_dir)) self.assertTrue(os.path.isdir(abs_trial_dir)) self.assertTrue( os.path.isfile( os.path.join(abs_trial_dir, \"checkpoint_000001/checkpoint-1\") ) ) def", "_register_all() def _get_trial_dir(self, absoulte_exp_dir): print(\"looking for\", self.MockTrainable._name) print(\"in\", os.listdir(absoulte_exp_dir)) trial_dirname", "self.state.update(extra_data) def setUp(self): self.absolute_local_dir = None ray.init(num_cpus=1, num_gpus=0, local_mode=self.local_mode) def", "fail. _register_all() def _get_trial_dir(self, absoulte_exp_dir): print(\"looking for\", self.MockTrainable._name) print(\"in\", os.listdir(absoulte_exp_dir))", "_restore(self, exp_name, local_dir, absolute_local_dir): trial_name, abs_trial_dir = self._get_trial_dir( os.path.join(absolute_local_dir, exp_name)", "in os.listdir(absoulte_exp_dir) if ( os.path.isdir(os.path.join(absoulte_exp_dir, child_dir)) and child_dir.startswith(self.MockTrainable._name) ) )", "def tearDown(self): if self.absolute_local_dir is not None: shutil.rmtree(self.absolute_local_dir, ignore_errors=True) self.absolute_local_dir", "def setUp(self): self.absolute_local_dir = None ray.init(num_cpus=1, num_gpus=0, local_mode=self.local_mode) def tearDown(self):", "utf-8 import os import pickle import shutil import tempfile import", "local_dir = tempfile.mkdtemp() exp_name = self.prefix + \"Tempfile\" self.absolute_local_dir =", "is not exist!\".format(tune_find_file) ) (trial,) = tune.run( self.MockTrainable, name=exp_name, stop={\"training_iteration\":", "open(os.path.join(checkpoint_dir, \"test.txt\"), \"rb\") as f: x = pickle.load(f) assert x", "absolute checkpoint path. tune_find_file = os.path.abspath(os.path.expanduser(checkpoint_path)) self.assertTrue( os.path.isfile(tune_find_file), \"{} is", "\"wb\") as f: pickle.dump(self.state, f) return checkpoint_path def load_checkpoint(self, checkpoint_path):", "\"DEBUG\"}, ).trials exp_dir = os.path.join(absolute_local_dir, exp_name) _, abs_trial_dir = self._get_trial_dir(exp_dir)", "child_dir for child_dir in os.listdir(absoulte_exp_dir) if ( os.path.isdir(os.path.join(absoulte_exp_dir, child_dir)) and", "use_object_store=True) if __name__ == \"__main__\": import pytest import sys sys.exit(pytest.main([\"-v\",", "checkpoint_dir right back works.\"\"\" class MockTrainable(Trainable): def setup(self, config): pass", "from ray import tune from ray.rllib import _register_all from ray.tune", "trial_absolute_dir = os.path.join(absoulte_exp_dir, trial_dirname) return trial_dirname, trial_absolute_dir def _train(self, exp_name,", "\"wb\") as f: pickle.dump(\"test\", f) return checkpoint_dir def load_checkpoint(self, checkpoint_dir):", "line, test_tune_server.testAddTrial would fail. _register_all() def _get_trial_dir(self, absoulte_exp_dir): print(\"looking for\",", "config): pass def step(self): return {\"score\": 1} def save_checkpoint(self, checkpoint_dir):", "= pickle.load(f) self.state.update(extra_data) def setUp(self): self.absolute_local_dir = None ray.init(num_cpus=1, num_gpus=0,", "def save_checkpoint(self, checkpoint_dir): checkpoint_path = os.path.join( checkpoint_dir, \"checkpoint-{}\".format(self._iteration) ) with", "return {\"timesteps_this_iter\": 1, \"done\": True} def save_checkpoint(self, checkpoint_dir): checkpoint_path =", "local_mode = True prefix = \"Serial\" class MockTrainable(Trainable): _name =", "None ray.shutdown() # Without this line, test_tune_server.testAddTrial would fail. _register_all()", "testRelativePath(self): local_dir = \"test_relative_local_dir\" exp_name = self.prefix + \"RelativePath\" absolute_local_dir", "# train one more iteration. restore=checkpoint_path, # Restore the checkpoint", ") # Relative checkpoint path # The file tune would", ") with open(checkpoint_path, \"wb\") as f: pickle.dump(self.state, f) return checkpoint_path", "def load_checkpoint(self, checkpoint_dir): with open(os.path.join(checkpoint_dir, \"test.txt\"), \"rb\") as f: x", "= self.prefix + \"RelativePath\" absolute_local_dir = os.path.abspath(local_dir) self.absolute_local_dir = absolute_local_dir", "local_dir = \"test_relative_local_dir\" exp_name = self.prefix + \"RelativePath\" absolute_local_dir =", "os.path.join( checkpoint_dir, \"checkpoint-{}\".format(self._iteration) ) with open(checkpoint_path, \"wb\") as f: pickle.dump(self.state,", "Without this line, test_tune_server.testAddTrial would fail. _register_all() def _get_trial_dir(self, absoulte_exp_dir):", "+ \"TildeAbsolutePath\" absolute_local_dir = os.path.abspath(os.path.expanduser(local_dir)) self.absolute_local_dir = absolute_local_dir self.assertFalse(os.path.exists(absolute_local_dir)) self._train(exp_name,", "= os.path.abspath(os.path.expanduser(local_dir)) self.absolute_local_dir = absolute_local_dir self.assertFalse(os.path.exists(absolute_local_dir)) self._train(exp_name, local_dir, absolute_local_dir) self._restore(exp_name,", "Trainable from ray.tune.utils import validate_save_restore class SerialTuneRelativeLocalDirTest(unittest.TestCase): local_mode = True", "\"DEBUG\"}, ).trials self.assertIsNone(trial.error_file) def testDottedRelativePath(self): local_dir = \"./test_dotted_relative_local_dir\" exp_name =", "testTempfile(self): local_dir = tempfile.mkdtemp() exp_name = self.prefix + \"Tempfile\" self.absolute_local_dir", "SerialTuneRelativeLocalDirTest(unittest.TestCase): local_mode = True prefix = \"Serial\" class MockTrainable(Trainable): _name", "trial_absolute_dir def _train(self, exp_name, local_dir, absolute_local_dir): (trial,) = tune.run( self.MockTrainable,", "not exist!\".format(tune_find_file) ) (trial,) = tune.run( self.MockTrainable, name=exp_name, stop={\"training_iteration\": 2},", "not None: shutil.rmtree(self.absolute_local_dir, ignore_errors=True) self.absolute_local_dir = None ray.shutdown() # Without", "Restore the checkpoint config={\"env\": \"CartPole-v0\", \"log_level\": \"DEBUG\"}, ).trials self.assertIsNone(trial.error_file) def", "os.path.abspath(os.path.expanduser(local_dir)) self.absolute_local_dir = absolute_local_dir self.assertFalse(os.path.exists(absolute_local_dir)) self._train(exp_name, local_dir, absolute_local_dir) self._restore(exp_name, local_dir,", "\"checkpoint_000001/checkpoint-1\") ) ) def _restore(self, exp_name, local_dir, absolute_local_dir): trial_name, abs_trial_dir", "= pickle.load(f) assert x == \"test\" return checkpoint_dir validate_save_restore(MockTrainable) validate_save_restore(MockTrainable,", "ray.tune import Trainable from ray.tune.utils import validate_save_restore class SerialTuneRelativeLocalDirTest(unittest.TestCase): local_mode", "shutil import tempfile import unittest import ray from ray import", "self._get_trial_dir( os.path.join(absolute_local_dir, exp_name) ) checkpoint_path = os.path.join( local_dir, exp_name, trial_name,", "_name = \"MockTrainable\" def setup(self, config): self.state = {\"hi\": 1}", "if self.absolute_local_dir is not None: shutil.rmtree(self.absolute_local_dir, ignore_errors=True) self.absolute_local_dir = None", "os.path.join(abs_trial_dir, \"checkpoint_000001/checkpoint-1\") ) ) def _restore(self, exp_name, local_dir, absolute_local_dir): trial_name,", ") trial_absolute_dir = os.path.join(absoulte_exp_dir, trial_dirname) return trial_dirname, trial_absolute_dir def _train(self,", "_train(self, exp_name, local_dir, absolute_local_dir): (trial,) = tune.run( self.MockTrainable, name=exp_name, stop={\"training_iteration\":", "def _restore(self, exp_name, local_dir, absolute_local_dir): trial_name, abs_trial_dir = self._get_trial_dir( os.path.join(absolute_local_dir,", "os.path.join( local_dir, exp_name, trial_name, \"checkpoint_000001/checkpoint-1\" ) # Relative checkpoint path" ]
[ "\"tertiary\": (212, 175, 55), # 000000 }, \"length\": 24, \"materials\":", "(112, 113)}}, \"colours\": { \"primary\": (120, 120, 120), # 787878", "# b46113 }, \"length\": 24, \"materials\": \"Alloy metal/Salvaged materials\", },", "111, 111), # 6f6f6f \"secondary\": (0, 0, 0), # 000000", "{\"blade\": 7, \"button\": {\"x\": (8, 9), \"y\": (92, 100)}}, \"colours\":", "= { \"hilt\": { \"h1\": { \"offsets\": {\"blade\": 0, \"button\":", "[\"Jedi\", \"Mandalorian\"], }, }, \"pommel\": { \"p1\": {\"length\": 5,}, \"p2\":", "14,}, \"p3\": {\"length\": 3,}, \"p4\": {\"length\": 8,}, \"p5\": {\"length\": 5,},", "\"b4\": {\"colour\": \"Yellow\", \"crystal\": \"Kimber stone\", \"type\": \"Jedi\"}, \"b5\": {\"colour\":", "157), # 707070 \"secondary\": (0, 0, 0), # 000000 \"tertiary\":", "and carbon composite\", }, \"h3\": { \"offsets\": {\"blade\": 0, \"button\":", "# d8d8d8 \"secondary\": (180, 97, 19), # b46113 \"tertiary\": (0,", "crystal\", \"type\": \"Sith\"}, \"b2\": {\"colour\": \"Blue\", \"crystal\": \"Zophis crystal\", \"type\":", "0, \"button\": {\"x\": (8, 8), \"y\": (92, 105)}}, \"colours\": {", "b46113 }, \"length\": 22, \"materials\": \"Alloy metal/Salvaged materials\", }, \"h7\":", "24, \"materials\": \"Alloy metal/Salvaged materials\", }, \"h2\": { \"offsets\": {\"blade\":", "{ \"primary\": (0, 0, 0), # 000000 \"secondary\": (157, 157,", "{\"x\": (8, 8), \"y\": (92, 105)}}, \"colours\": { \"primary\": (111,", "{ \"primary\": (112, 112, 112), # 707070 \"secondary\": (0, 0,", "\"length\": 13, \"materials\": \"Alloy metal\", }, \"h5\": { \"offsets\": {\"blade\":", "6f6f6f \"secondary\": (0, 0, 0), # 000000 \"tertiary\": (180, 97,", "# 707070 \"secondary\": (0, 0, 0), # 000000 \"tertiary\": (180,", "(180, 97, 19), # b46113 }, \"length\": 13, \"materials\": \"Alloy", "metal and carbon composite\", }, \"h3\": { \"offsets\": {\"blade\": 0,", "\"h2\": { \"offsets\": {\"blade\": 20, \"button\": {\"x\": (8, 8), \"y\":", "\"colours\": { \"primary\": (157, 157, 157), # 707070 \"secondary\": (0,", "\"Jedi\"}, \"b6\": {\"colour\": \"Purple\", \"crystal\": \"Krayt dragon pearl\", \"type\": \"Jedi\"},", "120), # 787878 \"secondary\": (0, 0, 0), # 000000 \"tertiary\":", "pearl\", \"type\": \"Jedi\"}, \"b7\": {\"colour\": \"Blue/Green\", \"crystal\": \"Dantari crystal\", \"type\":", "\"b8\": { \"colour\": \"Orange\", \"crystal\": [\"Ilum crystal\", \"Ultima Pearl\"], \"type\":", "{\"x\": (8, 9), \"y\": (92, 100)}}, \"colours\": { \"primary\": (0,", "\"primary\": (120, 120, 120), # 787878 \"secondary\": (0, 0, 0),", "\"Red\", \"crystal\": \"Adegan crystal\", \"type\": \"Sith\"}, \"b2\": {\"colour\": \"Blue\", \"crystal\":", "\"colour\": \"Black\", \"crystal\": \"Obsidian\", \"type\": [\"Jedi\", \"Mandalorian\"], }, }, \"pommel\":", "\"Mandalorian\"], }, }, \"pommel\": { \"p1\": {\"length\": 5,}, \"p2\": {\"length\":", "3,}, \"p4\": {\"length\": 8,}, \"p5\": {\"length\": 5,}, \"p6\": {\"length\": 5,},", "\"type\": \"Jedi\"}, \"b3\": {\"colour\": \"Green\", \"crystal\": \"Nishalorite stone\", \"type\": \"Jedi\"},", "# 787878 \"secondary\": (0, 0, 0), # 000000 \"tertiary\": (180,", "\"colours\": { \"primary\": (192, 192, 192), # c0c0c0 \"secondary\": (255,", "{\"x\": (8, 9), \"y\": (100, 111)}}, \"colours\": { \"primary\": (216,", "175, 55), # 000000 }, \"length\": 24, \"materials\": \"Alloy metal", "a specific Jedi or Sith. Should use their name instead", "dragon pearl\", \"type\": \"Jedi\"}, \"b7\": {\"colour\": \"Blue/Green\", \"crystal\": \"Dantari crystal\",", "\"p4\": {\"length\": 8,}, \"p5\": {\"length\": 5,}, \"p6\": {\"length\": 5,}, \"p7\":", "# c0c0c0 \"secondary\": (255, 215, 0), # ffd700 \"tertiary\": (0,", "{ \"primary\": (120, 120, 120), # 787878 \"secondary\": (0, 0,", "or Sith. Should use their name instead of \"unique_urls\": {\"\"},", "112), # 707070 \"secondary\": (0, 0, 0), # 000000 \"tertiary\":", "19), # b46113 }, \"length\": 24, \"materials\": \"Alloy metal\", },", "stone\", \"type\": \"Jedi\"}, \"b5\": {\"colour\": \"White\", \"crystal\": \"Dragite gem\", \"type\":", "\"b2\": {\"colour\": \"Blue\", \"crystal\": \"Zophis crystal\", \"type\": \"Jedi\"}, \"b3\": {\"colour\":", "{\"colour\": \"Yellow\", \"crystal\": \"Kimber stone\", \"type\": \"Jedi\"}, \"b5\": {\"colour\": \"White\",", "192, 192), # c0c0c0 \"secondary\": (255, 215, 0), # ffd700", "(192, 192, 192), # c0c0c0 \"secondary\": (255, 215, 0), #", "215, 0), # ffd700 \"tertiary\": (0, 0, 0), # 000000", "metal/Copper\", }, }, \"blade\": { \"b1\": {\"colour\": \"Red\", \"crystal\": \"Adegan", "55), # 000000 }, \"length\": 24, \"materials\": \"Alloy metal and", "10), \"y\": (100, 118)}}, \"colours\": { \"primary\": (157, 157, 157),", "141), # 8d8d8d \"tertiary\": (180, 97, 19), # b46113 },", "Gold\", }, \"h8\": { \"offsets\": {\"blade\": 0, \"button\": {\"x\": (8,", "(8, 9), \"y\": (105, 113)}}, \"colours\": { \"primary\": (192, 192,", "}, }, \"pommel\": { \"p1\": {\"length\": 5,}, \"p2\": {\"length\": 14,},", "(8, 9), \"y\": (110, 111)}}, \"colours\": { \"primary\": (216, 216,", "b46113 }, \"length\": 13, \"materials\": \"Alloy metal\", }, \"h5\": {", "}, \"length\": 13, \"materials\": \"Alloy metal\", }, \"h5\": { \"offsets\":", "# 8d8d8d \"tertiary\": (180, 97, 19), # b46113 }, \"length\":", "\"tertiary\": (180, 97, 19), # b46113 }, \"length\": 22, \"materials\":", "\"secondary\": (0, 0, 0), # 000000 \"tertiary\": (180, 97, 19),", "{\"length\": 3,}, \"p4\": {\"length\": 8,}, \"p5\": {\"length\": 5,}, \"p6\": {\"length\":", "(157, 157, 157), # 9d9d9d \"tertiary\": (180, 97, 19), #", "{\"blade\": 2, \"button\": {\"x\": (8, 9), \"y\": (112, 113)}}, \"colours\":", "(0, 0, 0), # 000000 \"tertiary\": (180, 97, 19), #", "\"length\": 24, \"materials\": \"Alloy metal and carbon composite\", }, \"h3\":", "\"Obsidian\", \"type\": [\"Jedi\", \"Mandalorian\"], }, }, \"pommel\": { \"p1\": {\"length\":", "\"primary\": (216, 216, 216), # d8d8d8 \"secondary\": (141, 141, 141),", "\"crystal\": \"Dragite gem\", \"type\": \"Jedi\"}, \"b6\": {\"colour\": \"Purple\", \"crystal\": \"Krayt", "{ \"offsets\": {\"blade\": 20, \"button\": {\"x\": (8, 8), \"y\": (100,", "}, # These are lightsabers for a specific Jedi or", "\"b6\": {\"colour\": \"Purple\", \"crystal\": \"Krayt dragon pearl\", \"type\": \"Jedi\"}, \"b7\":", "\"y\": (92, 105)}}, \"colours\": { \"primary\": (111, 111, 111), #", "b46113 }, \"length\": 24, \"materials\": \"Alloy metal\", }, \"h4\": {", "b46113 \"tertiary\": (0, 0, 0), # 000000 }, \"length\": 24,", "\"length\": 22, \"materials\": \"Alloy metal/Salvaged materials\", }, \"h7\": { \"offsets\":", "\"p2\": {\"length\": 14,}, \"p3\": {\"length\": 3,}, \"p4\": {\"length\": 8,}, \"p5\":", "19), # b46113 \"tertiary\": (0, 0, 0), # 000000 },", "\"h6\": { \"offsets\": {\"blade\": 2, \"button\": {\"x\": (8, 9), \"y\":", "192), # c0c0c0 \"secondary\": (255, 215, 0), # ffd700 \"tertiary\":", "}, \"pommel\": { \"p1\": {\"length\": 5,}, \"p2\": {\"length\": 14,}, \"p3\":", "}, \"h2\": { \"offsets\": {\"blade\": 20, \"button\": {\"x\": (8, 8),", "{ \"offsets\": {\"blade\": 0, \"button\": {\"x\": (10, 10), \"y\": (100,", "{\"colour\": \"Red\", \"crystal\": \"Adegan crystal\", \"type\": \"Sith\"}, \"b2\": {\"colour\": \"Blue\",", "9), \"y\": (110, 111)}}, \"colours\": { \"primary\": (216, 216, 216),", "000000 }, \"length\": 24, \"materials\": \"Alloy metal and carbon composite\",", "{\"x\": (10, 10), \"y\": (100, 118)}}, \"colours\": { \"primary\": (157,", "24, \"materials\": \"Alloy metal and carbon composite\", }, \"h3\": {", "\"materials\": \"Alloy metal and carbon composite\", }, \"h3\": { \"offsets\":", "\"Blue\", \"crystal\": \"Zophis crystal\", \"type\": \"Jedi\"}, \"b3\": {\"colour\": \"Green\", \"crystal\":", "\"b5\": {\"colour\": \"White\", \"crystal\": \"Dragite gem\", \"type\": \"Jedi\"}, \"b6\": {\"colour\":", "{ \"p1\": {\"length\": 5,}, \"p2\": {\"length\": 14,}, \"p3\": {\"length\": 3,},", "\"length\": 24, \"materials\": \"Alloy metal\", }, \"h4\": { \"offsets\": {\"blade\":", "\"h4\": { \"offsets\": {\"blade\": 7, \"button\": {\"x\": (8, 9), \"y\":", "97, 19), # b46113 }, \"length\": 24, \"materials\": \"Alloy metal\",", "# b46113 \"tertiary\": (0, 0, 0), # 000000 }, \"length\":", "}, \"length\": 22, \"materials\": \"Alloy metal and Gold\", }, \"h8\":", "metal and Gold\", }, \"h8\": { \"offsets\": {\"blade\": 0, \"button\":", "# d8d8d8 \"secondary\": (141, 141, 141), # 8d8d8d \"tertiary\": (180,", "{\"blade\": 0, \"button\": {\"x\": (8, 9), \"y\": (105, 113)}}, \"colours\":", "\"b1\": {\"colour\": \"Red\", \"crystal\": \"Adegan crystal\", \"type\": \"Sith\"}, \"b2\": {\"colour\":", "0, \"button\": {\"x\": (10, 10), \"y\": (100, 118)}}, \"colours\": {", "b46113 }, \"length\": 24, \"materials\": \"Alloy metal\", }, \"h6\": {", "97, 19), # b46113 }, \"length\": 22, \"materials\": \"Alloy metal/Salvaged", "\"offsets\": {\"blade\": 0, \"button\": {\"x\": (8, 9), \"y\": (110, 111)}},", "0), # 000000 \"tertiary\": (212, 175, 55), # 000000 },", "0, 0), # 000000 }, \"length\": 22, \"materials\": \"Alloy metal", "8d8d8d \"tertiary\": (180, 97, 19), # b46113 }, \"length\": 24,", "(141, 141, 141), # 8d8d8d \"tertiary\": (180, 97, 19), #", "{ \"offsets\": {\"blade\": 0, \"button\": {\"x\": (8, 9), \"y\": (105,", "105)}}, \"colours\": { \"primary\": (112, 112, 112), # 707070 \"secondary\":", "}, \"h8\": { \"offsets\": {\"blade\": 0, \"button\": {\"x\": (8, 9),", "5,}, \"p7\": {\"length\": 8,}, }, # These are lightsabers for", "\"b9\": { \"colour\": \"Black\", \"crystal\": \"Obsidian\", \"type\": [\"Jedi\", \"Mandalorian\"], },", "111), # 6f6f6f \"secondary\": (0, 0, 0), # 000000 \"tertiary\":", "\"colours\": { \"primary\": (216, 216, 216), # d8d8d8 \"secondary\": (180,", "\"materials\": \"Alloy metal\", }, \"h4\": { \"offsets\": {\"blade\": 7, \"button\":", "lightsabers for a specific Jedi or Sith. Should use their", "\"crystal\": \"Obsidian\", \"type\": [\"Jedi\", \"Mandalorian\"], }, }, \"pommel\": { \"p1\":", "{ \"primary\": (111, 111, 111), # 6f6f6f \"secondary\": (0, 0,", "000000 }, \"length\": 22, \"materials\": \"Alloy metal and Gold\", },", "{\"colour\": \"Blue\", \"crystal\": \"Zophis crystal\", \"type\": \"Jedi\"}, \"b3\": {\"colour\": \"Green\",", "\"p5\": {\"length\": 5,}, \"p6\": {\"length\": 5,}, \"p7\": {\"length\": 8,}, },", "\"Alloy metal\", }, \"h5\": { \"offsets\": {\"blade\": 0, \"button\": {\"x\":", "\"crystal\": [\"Ilum crystal\", \"Ultima Pearl\"], \"type\": \"Sith\", }, \"b9\": {", "{\"length\": 5,}, \"p2\": {\"length\": 14,}, \"p3\": {\"length\": 3,}, \"p4\": {\"length\":", "\"button\": {\"x\": (8, 8), \"y\": (100, 105)}}, \"colours\": { \"primary\":", "# 000000 \"tertiary\": (212, 175, 55), # 000000 }, \"length\":", "\"h1\": { \"offsets\": {\"blade\": 0, \"button\": {\"x\": (8, 9), \"y\":", "# 000000 }, \"length\": 22, \"materials\": \"Alloy metal and Gold\",", "\"length\": 24, \"materials\": \"Alloy metal/Salvaged materials\", }, \"h2\": { \"offsets\":", "\"button\": {\"x\": (10, 10), \"y\": (100, 118)}}, \"colours\": { \"primary\":", "\"crystal\": \"Dantari crystal\", \"type\": \"Jedi\"}, \"b8\": { \"colour\": \"Orange\", \"crystal\":", "b46113 }, \"length\": 24, \"materials\": \"Alloy metal/Salvaged materials\", }, \"h2\":", "# b46113 }, \"length\": 24, \"materials\": \"Alloy metal\", }, \"h4\":", "0, 0), # 000000 \"secondary\": (157, 157, 157), # 9d9d9d", "}, \"b9\": { \"colour\": \"Black\", \"crystal\": \"Obsidian\", \"type\": [\"Jedi\", \"Mandalorian\"],", "{\"blade\": 20, \"button\": {\"x\": (8, 8), \"y\": (100, 105)}}, \"colours\":", "}, \"length\": 24, \"materials\": \"Alloy metal/Salvaged materials\", }, \"h2\": {", "\"primary\": (216, 216, 216), # d8d8d8 \"secondary\": (180, 97, 19),", "\"Jedi\"}, \"b7\": {\"colour\": \"Blue/Green\", \"crystal\": \"Dantari crystal\", \"type\": \"Jedi\"}, \"b8\":", "(92, 105)}}, \"colours\": { \"primary\": (111, 111, 111), # 6f6f6f", "\"button\": {\"x\": (8, 9), \"y\": (112, 113)}}, \"colours\": { \"primary\":", "}, \"h5\": { \"offsets\": {\"blade\": 0, \"button\": {\"x\": (8, 8),", "(0, 0, 0), # 000000 \"tertiary\": (212, 175, 55), #", "\"primary\": (0, 0, 0), # 000000 \"secondary\": (157, 157, 157),", "(120, 120, 120), # 787878 \"secondary\": (0, 0, 0), #", "}, \"length\": 24, \"materials\": \"Alloy metal/Copper\", }, }, \"blade\": {", "# 000000 }, \"length\": 24, \"materials\": \"Alloy metal and carbon", "\"crystal\": \"Kimber stone\", \"type\": \"Jedi\"}, \"b5\": {\"colour\": \"White\", \"crystal\": \"Dragite", "{ \"offsets\": {\"blade\": 0, \"button\": {\"x\": (8, 8), \"y\": (92,", "\"secondary\": (141, 141, 141), # 8d8d8d \"tertiary\": (180, 97, 19),", "\"Krayt dragon pearl\", \"type\": \"Jedi\"}, \"b7\": {\"colour\": \"Blue/Green\", \"crystal\": \"Dantari", "\"Alloy metal and carbon composite\", }, \"h3\": { \"offsets\": {\"blade\":", "\"materials\": \"Alloy metal/Salvaged materials\", }, \"h2\": { \"offsets\": {\"blade\": 20,", "111)}}, \"colours\": { \"primary\": (216, 216, 216), # d8d8d8 \"secondary\":", "(100, 118)}}, \"colours\": { \"primary\": (157, 157, 157), # 707070", "\"primary\": (157, 157, 157), # 707070 \"secondary\": (0, 0, 0),", "# 000000 \"tertiary\": (180, 97, 19), # b46113 }, \"length\":", "19), # b46113 }, \"length\": 22, \"materials\": \"Alloy metal/Salvaged materials\",", "20, \"button\": {\"x\": (8, 8), \"y\": (100, 105)}}, \"colours\": {", "105)}}, \"colours\": { \"primary\": (111, 111, 111), # 6f6f6f \"secondary\":", "\"b7\": {\"colour\": \"Blue/Green\", \"crystal\": \"Dantari crystal\", \"type\": \"Jedi\"}, \"b8\": {", "(216, 216, 216), # d8d8d8 \"secondary\": (141, 141, 141), #", "(0, 0, 0), # 000000 \"secondary\": (157, 157, 157), #", "}, \"h7\": { \"offsets\": {\"blade\": 0, \"button\": {\"x\": (8, 9),", "\"offsets\": {\"blade\": 2, \"button\": {\"x\": (8, 9), \"y\": (112, 113)}},", "{ \"b1\": {\"colour\": \"Red\", \"crystal\": \"Adegan crystal\", \"type\": \"Sith\"}, \"b2\":", "\"Zophis crystal\", \"type\": \"Jedi\"}, \"b3\": {\"colour\": \"Green\", \"crystal\": \"Nishalorite stone\",", "9), \"y\": (92, 100)}}, \"colours\": { \"primary\": (0, 0, 0),", "(110, 111)}}, \"colours\": { \"primary\": (216, 216, 216), # d8d8d8", "\"button\": {\"x\": (8, 9), \"y\": (110, 111)}}, \"colours\": { \"primary\":", "{\"blade\": 0, \"button\": {\"x\": (8, 9), \"y\": (110, 111)}}, \"colours\":", "\"offsets\": {\"blade\": 0, \"button\": {\"x\": (8, 9), \"y\": (100, 111)}},", "\"Alloy metal/Salvaged materials\", }, \"h2\": { \"offsets\": {\"blade\": 20, \"button\":", "and Gold\", }, \"h8\": { \"offsets\": {\"blade\": 0, \"button\": {\"x\":", "composite\", }, \"h3\": { \"offsets\": {\"blade\": 0, \"button\": {\"x\": (10,", "}, \"length\": 24, \"materials\": \"Alloy metal\", }, \"h4\": { \"offsets\":", "\"Alloy metal/Salvaged materials\", }, \"h7\": { \"offsets\": {\"blade\": 0, \"button\":", "Sith. Should use their name instead of \"unique_urls\": {\"\"}, }", "\"y\": (112, 113)}}, \"colours\": { \"primary\": (120, 120, 120), #", "{ \"offsets\": {\"blade\": 7, \"button\": {\"x\": (8, 9), \"y\": (92,", "\"Alloy metal\", }, \"h6\": { \"offsets\": {\"blade\": 2, \"button\": {\"x\":", "\"secondary\": (255, 215, 0), # ffd700 \"tertiary\": (0, 0, 0),", "7, \"button\": {\"x\": (8, 9), \"y\": (92, 100)}}, \"colours\": {", "(0, 0, 0), # 000000 }, \"length\": 22, \"materials\": \"Alloy", "}, \"length\": 22, \"materials\": \"Alloy metal/Salvaged materials\", }, \"h7\": {", "# ffd700 \"tertiary\": (0, 0, 0), # 000000 }, \"length\":", "(180, 97, 19), # b46113 }, \"length\": 24, \"materials\": \"Alloy", "\"type\": \"Jedi\"}, \"b6\": {\"colour\": \"Purple\", \"crystal\": \"Krayt dragon pearl\", \"type\":", "{\"colour\": \"Blue/Green\", \"crystal\": \"Dantari crystal\", \"type\": \"Jedi\"}, \"b8\": { \"colour\":", "\"Blue/Green\", \"crystal\": \"Dantari crystal\", \"type\": \"Jedi\"}, \"b8\": { \"colour\": \"Orange\",", "\"primary\": (111, 111, 111), # 6f6f6f \"secondary\": (0, 0, 0),", "metal/Salvaged materials\", }, \"h2\": { \"offsets\": {\"blade\": 20, \"button\": {\"x\":", "\"colours\": { \"primary\": (111, 111, 111), # 6f6f6f \"secondary\": (0,", "{ \"h1\": { \"offsets\": {\"blade\": 0, \"button\": {\"x\": (8, 9),", "000000 \"tertiary\": (212, 175, 55), # 000000 }, \"length\": 24,", "{\"colour\": \"White\", \"crystal\": \"Dragite gem\", \"type\": \"Jedi\"}, \"b6\": {\"colour\": \"Purple\",", "(111, 111, 111), # 6f6f6f \"secondary\": (0, 0, 0), #", "157, 157), # 9d9d9d \"tertiary\": (180, 97, 19), # b46113", "0), # 000000 }, \"length\": 24, \"materials\": \"Alloy metal/Copper\", },", "216), # d8d8d8 \"secondary\": (141, 141, 141), # 8d8d8d \"tertiary\":", "\"Adegan crystal\", \"type\": \"Sith\"}, \"b2\": {\"colour\": \"Blue\", \"crystal\": \"Zophis crystal\",", "\"length\": 22, \"materials\": \"Alloy metal and Gold\", }, \"h8\": {", "\"h3\": { \"offsets\": {\"blade\": 0, \"button\": {\"x\": (10, 10), \"y\":", "000000 }, \"length\": 24, \"materials\": \"Alloy metal/Copper\", }, }, \"blade\":", "crystal\", \"type\": \"Jedi\"}, \"b3\": {\"colour\": \"Green\", \"crystal\": \"Nishalorite stone\", \"type\":", "97, 19), # b46113 }, \"length\": 13, \"materials\": \"Alloy metal\",", "{\"colour\": \"Green\", \"crystal\": \"Nishalorite stone\", \"type\": \"Jedi\"}, \"b4\": {\"colour\": \"Yellow\",", "}, \"blade\": { \"b1\": {\"colour\": \"Red\", \"crystal\": \"Adegan crystal\", \"type\":", "(112, 112, 112), # 707070 \"secondary\": (0, 0, 0), #", "\"tertiary\": (180, 97, 19), # b46113 }, \"length\": 24, \"materials\":", "# These are lightsabers for a specific Jedi or Sith.", "(0, 0, 0), # 000000 }, \"length\": 24, \"materials\": \"Alloy", "{ \"hilt\": { \"h1\": { \"offsets\": {\"blade\": 0, \"button\": {\"x\":", "\"p3\": {\"length\": 3,}, \"p4\": {\"length\": 8,}, \"p5\": {\"length\": 5,}, \"p6\":", "\"Dantari crystal\", \"type\": \"Jedi\"}, \"b8\": { \"colour\": \"Orange\", \"crystal\": [\"Ilum", "\"pommel\": { \"p1\": {\"length\": 5,}, \"p2\": {\"length\": 14,}, \"p3\": {\"length\":", "(180, 97, 19), # b46113 \"tertiary\": (0, 0, 0), #", "22, \"materials\": \"Alloy metal and Gold\", }, \"h8\": { \"offsets\":", "0), # 000000 }, \"length\": 22, \"materials\": \"Alloy metal and", "{\"x\": (8, 9), \"y\": (110, 111)}}, \"colours\": { \"primary\": (216,", "000000 \"secondary\": (157, 157, 157), # 9d9d9d \"tertiary\": (180, 97,", "{ \"primary\": (216, 216, 216), # d8d8d8 \"secondary\": (180, 97,", "{ \"primary\": (216, 216, 216), # d8d8d8 \"secondary\": (141, 141,", "\"button\": {\"x\": (8, 8), \"y\": (92, 105)}}, \"colours\": { \"primary\":", "707070 \"secondary\": (0, 0, 0), # 000000 \"tertiary\": (212, 175,", "\"colours\": { \"primary\": (0, 0, 0), # 000000 \"secondary\": (157,", "8), \"y\": (100, 105)}}, \"colours\": { \"primary\": (112, 112, 112),", "# 000000 \"secondary\": (157, 157, 157), # 9d9d9d \"tertiary\": (180,", "\"colours\": { \"primary\": (112, 112, 112), # 707070 \"secondary\": (0,", "{\"x\": (8, 9), \"y\": (112, 113)}}, \"colours\": { \"primary\": (120,", "metal\", }, \"h4\": { \"offsets\": {\"blade\": 7, \"button\": {\"x\": (8,", "metal/Salvaged materials\", }, \"h7\": { \"offsets\": {\"blade\": 0, \"button\": {\"x\":", "(212, 175, 55), # 000000 }, \"length\": 24, \"materials\": \"Alloy", "000000 \"tertiary\": (180, 97, 19), # b46113 }, \"length\": 22,", "118)}}, \"colours\": { \"primary\": (157, 157, 157), # 707070 \"secondary\":", "d8d8d8 \"secondary\": (141, 141, 141), # 8d8d8d \"tertiary\": (180, 97,", "\"Alloy metal\", }, \"h4\": { \"offsets\": {\"blade\": 7, \"button\": {\"x\":", "9d9d9d \"tertiary\": (180, 97, 19), # b46113 }, \"length\": 13,", "\"materials\": \"Alloy metal\", }, \"h6\": { \"offsets\": {\"blade\": 2, \"button\":", "707070 \"secondary\": (0, 0, 0), # 000000 \"tertiary\": (180, 97,", "\"type\": \"Sith\"}, \"b2\": {\"colour\": \"Blue\", \"crystal\": \"Zophis crystal\", \"type\": \"Jedi\"},", "}, \"length\": 24, \"materials\": \"Alloy metal and carbon composite\", },", "\"Alloy metal and Gold\", }, \"h8\": { \"offsets\": {\"blade\": 0,", "{ \"colour\": \"Orange\", \"crystal\": [\"Ilum crystal\", \"Ultima Pearl\"], \"type\": \"Sith\",", "{\"x\": (8, 8), \"y\": (100, 105)}}, \"colours\": { \"primary\": (112,", "\"type\": [\"Jedi\", \"Mandalorian\"], }, }, \"pommel\": { \"p1\": {\"length\": 5,},", "}, \"length\": 24, \"materials\": \"Alloy metal\", }, \"h6\": { \"offsets\":", "157, 157), # 707070 \"secondary\": (0, 0, 0), # 000000", "# 6f6f6f \"secondary\": (0, 0, 0), # 000000 \"tertiary\": (180,", "metal\", }, \"h5\": { \"offsets\": {\"blade\": 0, \"button\": {\"x\": (8,", "(105, 113)}}, \"colours\": { \"primary\": (192, 192, 192), # c0c0c0", "\"type\": \"Jedi\"}, \"b4\": {\"colour\": \"Yellow\", \"crystal\": \"Kimber stone\", \"type\": \"Jedi\"},", "are lightsabers for a specific Jedi or Sith. Should use", "\"offsets\": {\"blade\": 0, \"button\": {\"x\": (10, 10), \"y\": (100, 118)}},", "(92, 100)}}, \"colours\": { \"primary\": (0, 0, 0), # 000000", "{ \"primary\": (157, 157, 157), # 707070 \"secondary\": (0, 0,", "\"Jedi\"}, \"b5\": {\"colour\": \"White\", \"crystal\": \"Dragite gem\", \"type\": \"Jedi\"}, \"b6\":", "(8, 8), \"y\": (100, 105)}}, \"colours\": { \"primary\": (112, 112,", "\"h7\": { \"offsets\": {\"blade\": 0, \"button\": {\"x\": (8, 9), \"y\":", "\"primary\": (112, 112, 112), # 707070 \"secondary\": (0, 0, 0),", "5,}, \"p6\": {\"length\": 5,}, \"p7\": {\"length\": 8,}, }, # These", "\"materials\": \"Alloy metal\", }, \"h5\": { \"offsets\": {\"blade\": 0, \"button\":", "\"materials\": \"Alloy metal/Salvaged materials\", }, \"h7\": { \"offsets\": {\"blade\": 0,", "9), \"y\": (100, 111)}}, \"colours\": { \"primary\": (216, 216, 216),", "0), # ffd700 \"tertiary\": (0, 0, 0), # 000000 },", "\"button\": {\"x\": (8, 9), \"y\": (100, 111)}}, \"colours\": { \"primary\":", "# b46113 }, \"length\": 22, \"materials\": \"Alloy metal/Salvaged materials\", },", "materials\", }, \"h2\": { \"offsets\": {\"blade\": 20, \"button\": {\"x\": (8,", "\"tertiary\": (0, 0, 0), # 000000 }, \"length\": 22, \"materials\":", "22, \"materials\": \"Alloy metal/Salvaged materials\", }, \"h7\": { \"offsets\": {\"blade\":", "{\"x\": (8, 9), \"y\": (105, 113)}}, \"colours\": { \"primary\": (192,", "{\"length\": 14,}, \"p3\": {\"length\": 3,}, \"p4\": {\"length\": 8,}, \"p5\": {\"length\":", "\"length\": 24, \"materials\": \"Alloy metal/Copper\", }, }, \"blade\": { \"b1\":", "\"blade\": { \"b1\": {\"colour\": \"Red\", \"crystal\": \"Adegan crystal\", \"type\": \"Sith\"},", "Jedi or Sith. Should use their name instead of \"unique_urls\":", "100)}}, \"colours\": { \"primary\": (0, 0, 0), # 000000 \"secondary\":", "}, }, \"blade\": { \"b1\": {\"colour\": \"Red\", \"crystal\": \"Adegan crystal\",", "(100, 111)}}, \"colours\": { \"primary\": (216, 216, 216), # d8d8d8", "97, 19), # b46113 \"tertiary\": (0, 0, 0), # 000000", "(10, 10), \"y\": (100, 118)}}, \"colours\": { \"primary\": (157, 157,", "\"Purple\", \"crystal\": \"Krayt dragon pearl\", \"type\": \"Jedi\"}, \"b7\": {\"colour\": \"Blue/Green\",", "216, 216), # d8d8d8 \"secondary\": (141, 141, 141), # 8d8d8d", "\"crystal\": \"Adegan crystal\", \"type\": \"Sith\"}, \"b2\": {\"colour\": \"Blue\", \"crystal\": \"Zophis", "0, \"button\": {\"x\": (8, 9), \"y\": (110, 111)}}, \"colours\": {", "24, \"materials\": \"Alloy metal\", }, \"h4\": { \"offsets\": {\"blade\": 7,", "0), # 000000 \"tertiary\": (180, 97, 19), # b46113 },", "\"Dragite gem\", \"type\": \"Jedi\"}, \"b6\": {\"colour\": \"Purple\", \"crystal\": \"Krayt dragon", "{\"length\": 8,}, }, # These are lightsabers for a specific", "{ \"offsets\": {\"blade\": 0, \"button\": {\"x\": (8, 9), \"y\": (110,", "9), \"y\": (105, 113)}}, \"colours\": { \"primary\": (192, 192, 192),", "(8, 9), \"y\": (92, 100)}}, \"colours\": { \"primary\": (0, 0,", "# b46113 }, \"length\": 13, \"materials\": \"Alloy metal\", }, \"h5\":", "0, \"button\": {\"x\": (8, 9), \"y\": (105, 113)}}, \"colours\": {", "\"Orange\", \"crystal\": [\"Ilum crystal\", \"Ultima Pearl\"], \"type\": \"Sith\", }, \"b9\":", "stone\", \"type\": \"Jedi\"}, \"b4\": {\"colour\": \"Yellow\", \"crystal\": \"Kimber stone\", \"type\":", "(216, 216, 216), # d8d8d8 \"secondary\": (180, 97, 19), #", "\"crystal\": \"Krayt dragon pearl\", \"type\": \"Jedi\"}, \"b7\": {\"colour\": \"Blue/Green\", \"crystal\":", "for a specific Jedi or Sith. Should use their name", "5,}, \"p2\": {\"length\": 14,}, \"p3\": {\"length\": 3,}, \"p4\": {\"length\": 8,},", "\"offsets\": {\"blade\": 20, \"button\": {\"x\": (8, 8), \"y\": (100, 105)}},", "{ \"primary\": (192, 192, 192), # c0c0c0 \"secondary\": (255, 215,", "\"Jedi\"}, \"b3\": {\"colour\": \"Green\", \"crystal\": \"Nishalorite stone\", \"type\": \"Jedi\"}, \"b4\":", "\"Green\", \"crystal\": \"Nishalorite stone\", \"type\": \"Jedi\"}, \"b4\": {\"colour\": \"Yellow\", \"crystal\":", "\"p1\": {\"length\": 5,}, \"p2\": {\"length\": 14,}, \"p3\": {\"length\": 3,}, \"p4\":", "\"materials\": \"Alloy metal and Gold\", }, \"h8\": { \"offsets\": {\"blade\":", "\"Ultima Pearl\"], \"type\": \"Sith\", }, \"b9\": { \"colour\": \"Black\", \"crystal\":", "(8, 9), \"y\": (100, 111)}}, \"colours\": { \"primary\": (216, 216,", "000000 \"tertiary\": (180, 97, 19), # b46113 }, \"length\": 24,", "}, \"h6\": { \"offsets\": {\"blade\": 2, \"button\": {\"x\": (8, 9),", "(255, 215, 0), # ffd700 \"tertiary\": (0, 0, 0), #", "\"type\": \"Sith\", }, \"b9\": { \"colour\": \"Black\", \"crystal\": \"Obsidian\", \"type\":", "\"Yellow\", \"crystal\": \"Kimber stone\", \"type\": \"Jedi\"}, \"b5\": {\"colour\": \"White\", \"crystal\":", "{ \"offsets\": {\"blade\": 2, \"button\": {\"x\": (8, 9), \"y\": (112,", "787878 \"secondary\": (0, 0, 0), # 000000 \"tertiary\": (180, 97,", "MANIFEST = { \"hilt\": { \"h1\": { \"offsets\": {\"blade\": 0,", "{\"blade\": 0, \"button\": {\"x\": (8, 8), \"y\": (92, 105)}}, \"colours\":", "crystal\", \"Ultima Pearl\"], \"type\": \"Sith\", }, \"b9\": { \"colour\": \"Black\",", "9), \"y\": (112, 113)}}, \"colours\": { \"primary\": (120, 120, 120),", "\"Alloy metal/Copper\", }, }, \"blade\": { \"b1\": {\"colour\": \"Red\", \"crystal\":", "\"primary\": (192, 192, 192), # c0c0c0 \"secondary\": (255, 215, 0),", "\"p6\": {\"length\": 5,}, \"p7\": {\"length\": 8,}, }, # These are", "d8d8d8 \"secondary\": (180, 97, 19), # b46113 \"tertiary\": (0, 0,", "\"type\": \"Jedi\"}, \"b7\": {\"colour\": \"Blue/Green\", \"crystal\": \"Dantari crystal\", \"type\": \"Jedi\"},", "(180, 97, 19), # b46113 }, \"length\": 22, \"materials\": \"Alloy", "{\"length\": 5,}, \"p7\": {\"length\": 8,}, }, # These are lightsabers", "\"length\": 24, \"materials\": \"Alloy metal\", }, \"h6\": { \"offsets\": {\"blade\":", "\"y\": (100, 118)}}, \"colours\": { \"primary\": (157, 157, 157), #", "{ \"colour\": \"Black\", \"crystal\": \"Obsidian\", \"type\": [\"Jedi\", \"Mandalorian\"], }, },", "\"secondary\": (180, 97, 19), # b46113 \"tertiary\": (0, 0, 0),", "(8, 8), \"y\": (92, 105)}}, \"colours\": { \"primary\": (111, 111,", "\"p7\": {\"length\": 8,}, }, # These are lightsabers for a", "13, \"materials\": \"Alloy metal\", }, \"h5\": { \"offsets\": {\"blade\": 0,", "\"y\": (100, 105)}}, \"colours\": { \"primary\": (112, 112, 112), #", "carbon composite\", }, \"h3\": { \"offsets\": {\"blade\": 0, \"button\": {\"x\":", "\"type\": \"Jedi\"}, \"b8\": { \"colour\": \"Orange\", \"crystal\": [\"Ilum crystal\", \"Ultima", "\"y\": (100, 111)}}, \"colours\": { \"primary\": (216, 216, 216), #", "{ \"offsets\": {\"blade\": 0, \"button\": {\"x\": (8, 9), \"y\": (100,", "\"Jedi\"}, \"b4\": {\"colour\": \"Yellow\", \"crystal\": \"Kimber stone\", \"type\": \"Jedi\"}, \"b5\":", "c0c0c0 \"secondary\": (255, 215, 0), # ffd700 \"tertiary\": (0, 0,", "(157, 157, 157), # 707070 \"secondary\": (0, 0, 0), #", "Pearl\"], \"type\": \"Sith\", }, \"b9\": { \"colour\": \"Black\", \"crystal\": \"Obsidian\",", "\"tertiary\": (0, 0, 0), # 000000 }, \"length\": 24, \"materials\":", "8,}, \"p5\": {\"length\": 5,}, \"p6\": {\"length\": 5,}, \"p7\": {\"length\": 8,},", "216), # d8d8d8 \"secondary\": (180, 97, 19), # b46113 \"tertiary\":", "2, \"button\": {\"x\": (8, 9), \"y\": (112, 113)}}, \"colours\": {", "112, 112), # 707070 \"secondary\": (0, 0, 0), # 000000", "8), \"y\": (92, 105)}}, \"colours\": { \"primary\": (111, 111, 111),", "crystal\", \"type\": \"Jedi\"}, \"b8\": { \"colour\": \"Orange\", \"crystal\": [\"Ilum crystal\",", "}, \"h3\": { \"offsets\": {\"blade\": 0, \"button\": {\"x\": (10, 10),", "gem\", \"type\": \"Jedi\"}, \"b6\": {\"colour\": \"Purple\", \"crystal\": \"Krayt dragon pearl\",", "\"offsets\": {\"blade\": 0, \"button\": {\"x\": (8, 8), \"y\": (92, 105)}},", "\"colour\": \"Orange\", \"crystal\": [\"Ilum crystal\", \"Ultima Pearl\"], \"type\": \"Sith\", },", "\"b3\": {\"colour\": \"Green\", \"crystal\": \"Nishalorite stone\", \"type\": \"Jedi\"}, \"b4\": {\"colour\":", "0, 0), # 000000 \"tertiary\": (212, 175, 55), # 000000", "# b46113 }, \"length\": 24, \"materials\": \"Alloy metal\", }, \"h6\":", "# 9d9d9d \"tertiary\": (180, 97, 19), # b46113 }, \"length\":", "}, \"h4\": { \"offsets\": {\"blade\": 7, \"button\": {\"x\": (8, 9),", "{\"blade\": 0, \"button\": {\"x\": (10, 10), \"y\": (100, 118)}}, \"colours\":", "0, 0), # 000000 }, \"length\": 24, \"materials\": \"Alloy metal/Copper\",", "{\"length\": 8,}, \"p5\": {\"length\": 5,}, \"p6\": {\"length\": 5,}, \"p7\": {\"length\":", "\"y\": (92, 100)}}, \"colours\": { \"primary\": (0, 0, 0), #", "\"colours\": { \"primary\": (216, 216, 216), # d8d8d8 \"secondary\": (141,", "\"secondary\": (157, 157, 157), # 9d9d9d \"tertiary\": (180, 97, 19),", "0, \"button\": {\"x\": (8, 9), \"y\": (100, 111)}}, \"colours\": {", "\"White\", \"crystal\": \"Dragite gem\", \"type\": \"Jedi\"}, \"b6\": {\"colour\": \"Purple\", \"crystal\":", "materials\", }, \"h7\": { \"offsets\": {\"blade\": 0, \"button\": {\"x\": (8,", "\"button\": {\"x\": (8, 9), \"y\": (105, 113)}}, \"colours\": { \"primary\":", "120, 120), # 787878 \"secondary\": (0, 0, 0), # 000000", "{\"colour\": \"Purple\", \"crystal\": \"Krayt dragon pearl\", \"type\": \"Jedi\"}, \"b7\": {\"colour\":", "\"materials\": \"Alloy metal/Copper\", }, }, \"blade\": { \"b1\": {\"colour\": \"Red\",", "113)}}, \"colours\": { \"primary\": (120, 120, 120), # 787878 \"secondary\":", "141, 141), # 8d8d8d \"tertiary\": (180, 97, 19), # b46113", "\"tertiary\": (180, 97, 19), # b46113 }, \"length\": 13, \"materials\":", "\"Nishalorite stone\", \"type\": \"Jedi\"}, \"b4\": {\"colour\": \"Yellow\", \"crystal\": \"Kimber stone\",", "\"Sith\"}, \"b2\": {\"colour\": \"Blue\", \"crystal\": \"Zophis crystal\", \"type\": \"Jedi\"}, \"b3\":", "\"Jedi\"}, \"b8\": { \"colour\": \"Orange\", \"crystal\": [\"Ilum crystal\", \"Ultima Pearl\"],", "\"hilt\": { \"h1\": { \"offsets\": {\"blade\": 0, \"button\": {\"x\": (8,", "metal\", }, \"h6\": { \"offsets\": {\"blade\": 2, \"button\": {\"x\": (8,", "(8, 9), \"y\": (112, 113)}}, \"colours\": { \"primary\": (120, 120,", "{\"blade\": 0, \"button\": {\"x\": (8, 9), \"y\": (100, 111)}}, \"colours\":", "8,}, }, # These are lightsabers for a specific Jedi", "24, \"materials\": \"Alloy metal\", }, \"h6\": { \"offsets\": {\"blade\": 2,", "\"type\": \"Jedi\"}, \"b5\": {\"colour\": \"White\", \"crystal\": \"Dragite gem\", \"type\": \"Jedi\"},", "\"secondary\": (0, 0, 0), # 000000 \"tertiary\": (212, 175, 55),", "\"h8\": { \"offsets\": {\"blade\": 0, \"button\": {\"x\": (8, 9), \"y\":", "\"h5\": { \"offsets\": {\"blade\": 0, \"button\": {\"x\": (8, 8), \"y\":", "24, \"materials\": \"Alloy metal/Copper\", }, }, \"blade\": { \"b1\": {\"colour\":", "\"y\": (110, 111)}}, \"colours\": { \"primary\": (216, 216, 216), #", "\"crystal\": \"Zophis crystal\", \"type\": \"Jedi\"}, \"b3\": {\"colour\": \"Green\", \"crystal\": \"Nishalorite", "0, 0), # 000000 \"tertiary\": (180, 97, 19), # b46113", "\"offsets\": {\"blade\": 7, \"button\": {\"x\": (8, 9), \"y\": (92, 100)}},", "\"Black\", \"crystal\": \"Obsidian\", \"type\": [\"Jedi\", \"Mandalorian\"], }, }, \"pommel\": {", "19), # b46113 }, \"length\": 24, \"materials\": \"Alloy metal/Salvaged materials\",", "\"Sith\", }, \"b9\": { \"colour\": \"Black\", \"crystal\": \"Obsidian\", \"type\": [\"Jedi\",", "[\"Ilum crystal\", \"Ultima Pearl\"], \"type\": \"Sith\", }, \"b9\": { \"colour\":", "# 000000 }, \"length\": 24, \"materials\": \"Alloy metal/Copper\", }, },", "\"offsets\": {\"blade\": 0, \"button\": {\"x\": (8, 9), \"y\": (105, 113)}},", "These are lightsabers for a specific Jedi or Sith. Should", "\"crystal\": \"Nishalorite stone\", \"type\": \"Jedi\"}, \"b4\": {\"colour\": \"Yellow\", \"crystal\": \"Kimber", "113)}}, \"colours\": { \"primary\": (192, 192, 192), # c0c0c0 \"secondary\":", "ffd700 \"tertiary\": (0, 0, 0), # 000000 }, \"length\": 22,", "216, 216), # d8d8d8 \"secondary\": (180, 97, 19), # b46113", "specific Jedi or Sith. Should use their name instead of", "(100, 105)}}, \"colours\": { \"primary\": (112, 112, 112), # 707070", "# 707070 \"secondary\": (0, 0, 0), # 000000 \"tertiary\": (212,", "19), # b46113 }, \"length\": 13, \"materials\": \"Alloy metal\", },", "\"colours\": { \"primary\": (120, 120, 120), # 787878 \"secondary\": (0,", "97, 19), # b46113 }, \"length\": 24, \"materials\": \"Alloy metal/Salvaged", "{\"length\": 5,}, \"p6\": {\"length\": 5,}, \"p7\": {\"length\": 8,}, }, #", "\"Kimber stone\", \"type\": \"Jedi\"}, \"b5\": {\"colour\": \"White\", \"crystal\": \"Dragite gem\",", "\"y\": (105, 113)}}, \"colours\": { \"primary\": (192, 192, 192), #", "157), # 9d9d9d \"tertiary\": (180, 97, 19), # b46113 },", "\"button\": {\"x\": (8, 9), \"y\": (92, 100)}}, \"colours\": { \"primary\":", "0), # 000000 \"secondary\": (157, 157, 157), # 9d9d9d \"tertiary\":" ]
[ ") f = theano.function([g], GpuToGpu(test_ctx_name)(g), mode=mode) topo = f.maker.fgraph.toposort() assert", "The +1 is there to allow the lift to the", "testname, i, inputs, expected, expected.dtype, variable, variable.dtype, ) ) for", "), ) class TestGPUAlloc(TestAlloc): dtype = \"float32\" mode = mode_with_gpu", "assert GpuArrayType.values_eq(fv, gv) def test_transfer_strided(): # This is just to", "is None: checks = {} _op = op _gpu_op =", "b], tt.join(0, tt.zeros_like(a), tt.ones_like(b)), mode=mode_with_gpu ) f_gpu2 = theano.function( [a,", "np.allclose(o2, m.get_value(borrow=True)[2:]) def test_gpujoin_gpualloc(): a = tt.fmatrix(\"a\") a_val = np.asarray(np.random.rand(4,", "1) check_u(m, -1) def test_gputri(): def check(dtype, N, M_=None, k=0):", "= f_ref() except Exception as exc: ref_e = exc try:", "assert any([isinstance(node.op, GpuTri) for node in f.maker.fgraph.toposort()]) for dtype in", "big one m = np.asarray(test_rng.rand(5000, 5000) * 2 - 1,", "f() assert out[0].shape == (3, 2) assert out[0].dtype == \"uint64\"", "= f() assert out.shape == (2, 3) assert out.dtype ==", "dtype in [\"float64\", \"float32\", \"int32\", \"float16\"]: # try a big", "check(dtype, 5, 3, -1) # k > M, -k >", "fv = f(gv) assert np.all(fv == av) def gpu_alloc_expected(x, *shp):", "( \"Test %s::%s: Error occurred while trying to \" \"make", "test_hostfromgpu_shape_i(): # Test that the shape is lifted over hostfromgpu", "theano.tensor import TensorType from theano.tensor.basic import alloc pygpu = pytest.importorskip(\"pygpu\")", "test_ctx_name)(3, 2), GpuAllocEmpty(\"uint64\", test_ctx_name)(3, 2), ], ) out = f()", "# Gpujoin function but all except one of them are", "f(0) assert np.allclose(f(0), [3, 4, 5]) def test_gpu_tril_triu(): def check_l(m,", "np.array(1).astype(dtype) assert np.allclose(result, np.tri(N, M_, k, dtype=dtype)) assert result.dtype ==", "TestGPUJoinAndSplit(TestJoinAndSplit): def setup_method(self): self.mode = mode_with_gpu.excluding(\"constant_folding\") self.join_op = GpuJoin() self.split_op_class", "dtype=\"float32\") gv = gpuarray.array(av, context=get_context(test_ctx_name)) av = av[:, ::2] gv", "= gpuarray.zeros((3, 4, 5), dtype=\"float32\", context=get_context(test_ctx_name)) f = theano.function([x], x.shape)", "the # Gpujoin function but all except one of them", "and the output should be the view of the #", "f(a_val, 1).flags.c_contiguous assert f(a_val, 2).flags.c_contiguous assert f(a_val, 2).flags.c_contiguous class TestGPUReshape(TestReshape):", "else: err_msg = ( \"Test %s::%s: exception raised during test", "allow_downcast=None, **kwargs): from theano.tensor.sharedvar import scalar_constructor, tensor_constructor for c in", "-400) check(dtype, 1000, 1000, 400) check(dtype, 5) # M !=", "# N == M, k != 0 check(dtype, 3, 3,", "*inputs_tst) except Exception as exc: err_msg = ( \"Test %s::%s:", "check_u(m, 0) check_u(m, 1) check_u(m, -1) m = np.asarray(test_rng.rand(10, 5)", "np.int32(7)), correct12=(rand(7), np.int32(4), np.int32(7)), correct13=(rand(7), np.int32(2), np.int32(4), np.int32(7)), correct23=(rand(4, 7),", "op, gpu_op, cases, checks=None, mode_gpu=mode_with_gpu, mode_nogpu=mode_without_gpu, skip=False, eps=1e-10, ): if", "tests the case when several elements are passed to the", "scalar_constructor, tensor_constructor for c in (gpuarray_shared_constructor, tensor_constructor, scalar_constructor): try: return", "condition, ( \"Test %s::%s: Output %s gave the wrong \"", "float16 computation at the same time. rng = np.random.RandomState(seed=utt.fetch_seed()) m", "False), context_name=test_ctx_name )() av = np.asarray(rng.rand(5, 4), dtype=\"float32\") gv =", "b_val = np.asarray(np.random.rand(3, 5), dtype=\"float32\") f = theano.function( [a, b],", "inputs, expected, expected.dtype, variable, variable.dtype, ) ) for description, check", "g TestGpuAlloc = makeTester( name=\"GpuAllocTester\", # The +1 is there", "that it works in theano # libgpuarray has a much", "assert isinstance(topo[1].op, tt.opt.Shape_i) assert isinstance(topo[2].op, tt.opt.MakeVector) assert tuple(f(av)) == (5,", "self.run_case(testname, inputs) def run_case(self, testname, inputs): inputs_ref = [theano.shared(inp) for", "tt.opt.MakeVector) assert tuple(f(cv)) == (5, 4) def test_Gpujoin_inplace(): # Test", "in [\"float64\", \"float32\", \"float16\"]: # try a big one m", "(err_msg,) raise else: # if we raised an exception of", "cases, checks=None, mode_gpu=mode_with_gpu, mode_nogpu=mode_without_gpu, skip=False, eps=1e-10, ): if checks is", "-1) check_u(m, 0) check_u(m, 1) check_u(m, -1) m = np.asarray(test_rng.rand(10,", "**kwargs): return gpuarray_shared_constructor(x, target=test_ctx_name, **kwargs) self.shared = shared def test_gpusplit_opt(self):", "gpuarray.zeros((3, 4, 5), dtype=\"float32\", context=get_context(test_ctx_name)) f = theano.function([x], x.shape) topo", "4))[::i]), mode=mode_with_gpu ) topo = f.maker.fgraph.toposort() assert any([isinstance(node.op, GpuSubtensor) for", "else: # if we raised an exception of the same", "3)) assert len(f.maker.fgraph.apply_nodes) == 1 out = f() assert out.shape", "1).flags.c_contiguous assert f(a_val, 2).flags.c_contiguous assert f(a_val, 2).flags.c_contiguous class TestGPUReshape(TestReshape): def", "= theano.function([ca], host_from_gpu(ca).shape, mode=m) topo = f.maker.fgraph.toposort() assert isinstance(topo[0].op, theano.compile.Shape_i)", "context=get_context(test_ctx_name)) av = av[:, ::2] gv = gv[:, ::2] f", "GpuAlloc) for node in f_gpu2.maker.fgraph.toposort()]) == 2 ) assert sum([node.op", "assert sum([node.op == tt.join_ for node in f.maker.fgraph.toposort()]) == 1", "4), dtype=\"float32\") cv = gpuarray.asarray( np.random.rand(5, 4), dtype=\"float32\", context=get_context(test_ctx_name) )", "f.maker.fgraph.toposort()]) def check_u(m, k=0): m_symb = tt.matrix(dtype=m.dtype) k_symb = tt.iscalar()", "= f.maker.fgraph.toposort() assert len(topo) == 1 assert isinstance(topo[0].op, GpuToGpu) fv", "for x in f.maker.fgraph.toposort()] f = theano.function([ca], host_from_gpu(ca).shape, mode=m) topo", "we raised an exception of the same type we're good.", "2).flags.c_contiguous assert f(a_val, 2).flags.c_contiguous class TestGPUReshape(TestReshape): def setup_method(self): self.shared =", "any([isinstance(node.op, GpuTri) for node in f.maker.fgraph.toposort()]) for dtype in [\"float64\",", "_skip = skip _checks = checks class Checker(utt.OptimizationTestMixin): op =", "utt.seed_rng() self.mode = mode_with_gpu self.shared = gpuarray_shared_constructor self.dtypes = [\"float64\",", "_ in range(len(inputs)): if type(inputs[_]) is float: inputs[_] = np.asarray(inputs[_],", "type(ref_e)) ) exc.args += (err_msg,) raise for i, (variable, expected)", "name=\"GpuAllocTester\", # The +1 is there to allow the lift", "( TestAlloc, TestComparison, TestJoinAndSplit, TestReshape, ) from tests.tensor.utils import rand,", "host_from_gpu(g)) fv = f(gv) assert np.all(fv == av) def gpu_alloc_expected(x,", "M_ # Currently DebugMode does not support None as inputs", "mode=mode_nogpu) f_tst = inplace_func([], node_tst.outputs, mode=mode_gpu) except Exception as exc:", "k) assert np.allclose(result, np.triu(m, k)) assert result.dtype == np.dtype(dtype) assert", "s = tt.lscalar() data = np.array([3, 4, 5], dtype=theano.config.floatX) x", "in f.maker.fgraph.apply_nodes if isinstance(node.op, GpuAllocEmpty) ] ) == 1 )", "gv = gpuarray.array(av, context=get_context(test_ctx_name)) mode = mode_with_gpu.excluding( \"cut_gpua_host_transfers\", \"local_cut_gpua_host_gpua\" )", "no MakeVector on GPU self.make_vector_op = GpuJoin() # this is", "making \" \"a node with inputs %s\" ) % (self.gpu_op,", "# try a big one check(dtype, 1000, 1000, 0) check(dtype,", "# Also test float16 computation at the same time. rng", "( sum([isinstance(node.op, GpuAlloc) for node in f_gpu.maker.fgraph.toposort()]) == 2 )", "= mode_with_gpu.excluding(\"local_shape_to_shape_i\") f = theano.function([x], x.shape, mode=mode) topo = f.maker.fgraph.toposort()", "(False, False))() av = np.asarray(np.random.rand(5, 4), dtype=\"float32\") cv = gpuarray.asarray(", "\"__qualname__\"): Checker.__qualname__ = name return Checker def test_transfer_cpu_gpu(): a =", "[m_symb, k_symb], tt.tril(m_symb, k_symb), mode=mode_with_gpu ) result = f(m, k)", "np.asarray(rng.rand(5, 4), dtype=\"float32\") gv = gpuarray.array(av, context=get_context(test_ctx_name)) f = theano.function([a],", "Checker.__name__ = name if hasattr(Checker, \"__qualname__\"): Checker.__qualname__ = name return", "staticmethod(gpuarray_shared_constructor) allocs = [GpuAlloc(test_ctx_name), GpuAlloc(test_ctx_name), tt.Alloc()] def test_alloc_empty(): for dt", "f(a_val, 2).flags.c_contiguous class TestGPUReshape(TestReshape): def setup_method(self): self.shared = gpuarray_shared_constructor self.op", "np.asarray(f(N, M, k)) - np.array(1).astype(dtype) assert np.allclose(result, np.tri(N, M_, k,", "m_symb = tt.matrix(dtype=m.dtype) k_symb = tt.iscalar() f = theano.function( [m_symb,", "f.maker.fgraph.toposort() assert np.all(f(v) == (3, 4, 5)) if theano.config.mode !=", "# this is to avoid errors with limited devices self.floatX", "from theano.gpuarray.elemwise import GpuDimShuffle, GpuElemwise from theano.gpuarray.subtensor import GpuSubtensor from", "self.op == GpuReshape class TestGPUComparison(TestComparison): def setup_method(self): utt.seed_rng() self.mode =", "3, 5, -1) # N > M, k != 0", "x = GpuArrayType(dtype=\"float32\", broadcastable=[False, False, False])() v = gpuarray.zeros((3, 4,", "== (2, 3) assert out.dtype == dt f = theano.function(", "ouputs were %s)\" ) % (self.op, testname, description, inputs, variables)", "checks=None, mode_gpu=mode_with_gpu, mode_nogpu=mode_without_gpu, skip=False, eps=1e-10, ): if checks is None:", "= [GpuAlloc(test_ctx_name), GpuAlloc(test_ctx_name), tt.Alloc()] def test_alloc_empty(): for dt in [\"float32\",", "N == M, k != 0 check(dtype, 3, 3, 1)", "dt f = theano.function( [], [ GpuAllocEmpty(\"uint64\", test_ctx_name)(3, 2), GpuAllocEmpty(\"uint64\",", "!= 0: raise TypeError(\"Unexpected argument %s\", list(kwargs.keys())[0]) return gpuarray.array(r, dtype=dtype,", "] ) o1, o2 = f() assert np.allclose(o1, m.get_value(borrow=True)[:2]) assert", "k_symb], tt.triu(m_symb, k_symb), mode=mode_with_gpu ) result = f(m, k) assert", "less data. f = theano.function( [a, i], gpu_contiguous(a.reshape((5, 4))[::i]), mode=mode_with_gpu", "isinstance(mode_with_gpu, theano.compile.DebugMode): assert x.get_value(borrow=True, return_internal_type=True) is f(0) assert np.allclose(f(0), [3,", "sum([node.op == gpu_join for node in f_gpu.maker.fgraph.toposort()]) == 1 assert", "skip _checks = checks class Checker(utt.OptimizationTestMixin): op = staticmethod(_op) gpu_op", "0 check(dtype, 3, 5, 1) check(dtype, 3, 5, -1) #", "calling the \" \"Function\" ) % (self.gpu_op, testname) exc.args +=", "!= 0 check(dtype, 3, 5, 1) check(dtype, 3, 5, -1)", "test_ctx_name)(3, 2), ], ) out = f() assert out[0].shape ==", "host_from_gpu(g)) fv = f(gv) assert np.all(fv == av) def test_transfer_gpu_gpu():", "value, name=name, strict=strict, allow_downcast=allow_downcast, **kwargs ) except TypeError: continue def", "= theano.function( [a, b], tt.join(0, tt.zeros_like(a), tt.ones_like(b)) + 4, mode=mode_without_gpu", "if ref_e is None: err_msg = ( \"Test %s::%s: exception", "[a, b], tt.join(0, tt.zeros_like(a), tt.ones_like(b)) + 4, mode=mode_without_gpu ) f_gpu", "# M != N, k = 0 check(dtype, 3, 5)", "sum([node.op == tt.join_ for node in f.maker.fgraph.toposort()]) == 1 assert", "isinstance(node.op, self.split_op_class) for node in f.maker.fgraph.toposort() ] ) o1, o2", "_cases skip = _skip checks = _checks def setup_method(self): eval(self.__class__.__module__", "== 1 assert np.allclose(f(a_val, b_val), f_gpu2(a_val, b_val)) def test_gpueye(): def", ") f = theano.function([a], GpuFromHost(test_ctx_name)(a), mode=m) assert any(isinstance(x.op, GpuFromHost) for", "1 assert np.allclose(f(a_val, b_val), f_gpu2(a_val, b_val)) def test_gpueye(): def check(dtype,", "try: f_ref = inplace_func([], node_ref.outputs, mode=mode_nogpu) f_tst = inplace_func([], node_tst.outputs,", "except Exception as exc: err_msg = ( \"Test %s::%s: Error", "inputs, variables) Checker.__name__ = name if hasattr(Checker, \"__qualname__\"): Checker.__qualname__ =", "%s, ouputs were %s)\" ) % (self.op, testname, description, inputs,", "= pytest.importorskip(\"pygpu\") gpuarray = pygpu.gpuarray utt.seed_rng() rng = np.random.RandomState(seed=utt.fetch_seed()) def", "== 1 assert isinstance(topo[0].op, GpuToGpu) fv = f(gv) assert GpuArrayType.values_eq(fv,", "comprehensive suit of tests to # ensure correctness a =", "if we raised an exception of the same type we're", "dtype=\"float32\") gv = gpuarray.array(av, context=get_context(test_ctx_name)) mode = mode_with_gpu.excluding( \"cut_gpua_host_transfers\", \"local_cut_gpua_host_gpua\"", "self.shared = shared def test_gpusplit_opt(self): # Test that we move", "at the same time. rng = np.random.RandomState(seed=utt.fetch_seed()) m = self.shared(rng.rand(4,", "Don't import test classes otherwise they get tested as part", "= np.random.RandomState(seed=utt.fetch_seed()) m = self.shared(rng.rand(4, 6).astype(\"float16\")) o = tt.Split(2)(m, 0,", "element. s = tt.lscalar() data = np.array([3, 4, 5], dtype=theano.config.floatX)", "np.int32(7), np.int32(5)), ), ) class TestGPUAlloc(TestAlloc): dtype = \"float32\" mode", "5], dtype=theano.config.floatX) x = gpuarray_shared_constructor(data, borrow=True) z = tt.zeros((s,)) join", "= np.asarray(rng.rand(5, 8), dtype=\"float32\") gv = gpuarray.array(av, context=get_context(test_ctx_name)) av =", "= theano.function( [m_symb, k_symb], tt.tril(m_symb, k_symb), mode=mode_with_gpu ) result =", "o2 = f() assert np.allclose(o1, m.get_value(borrow=True)[:2]) assert np.allclose(o2, m.get_value(borrow=True)[2:]) def", "f(gv) assert GpuArrayType.values_eq(fv, gv) def test_transfer_strided(): # This is just", "4, 5), dtype=\"float32\", context=get_context(test_ctx_name)) f = theano.function([x], x.shape) topo =", "gpu_contiguous(a.reshape((5, 4))[::i]), mode=mode_with_gpu ) topo = f.maker.fgraph.toposort() assert any([isinstance(node.op, GpuSubtensor)", "= np.asarray(f(N, M, k)) - np.array(1).astype(dtype) assert np.allclose(result, np.tri(N, M_,", "node in f.maker.fgraph.toposort() ] ) o1, o2 = f() assert", "check(dtype, 3, 5, 3) check(dtype, 5, 3, -3) check(dtype, 3,", "np.all(f(v) == (3, 4, 5)) if theano.config.mode != \"FAST_COMPILE\": assert", "( self.op, testname, i, inputs, expected, expected.dtype, variable, variable.dtype, )", "== gpu_join for node in f_gpu2.maker.fgraph.toposort()]) == 1 assert np.allclose(f(a_val,", "not the same as the reference \" \"call (got: %s,", "g = gpuarray.empty(shp, dtype=x.dtype, context=get_context(test_ctx_name)) g[:] = x return g", "not support None as inputs even if this is #", "\"DEBUG_MODE\"] def shared(x, **kwargs): return gpuarray_shared_constructor(x, target=test_ctx_name, **kwargs) self.shared =", "Test Gpujoin to work inplace. # # This function tests", "assert GpuArrayType.values_eq(fv, gv) f = theano.function([g], host_from_gpu(g)) fv = f(gv)", "= f.maker.fgraph.toposort() assert np.all(f(v) == (3, 4, 5)) assert len(topo)", "mode=mode_with_gpu ) result = f(m, k) assert np.allclose(result, np.triu(m, k))", "f.maker.fgraph.toposort() assert np.all(f(v) == (3, 4, 5)) assert len(topo) ==", "expected %s)\" % (self.gpu_op, testname, type(exc), type(ref_e)) ) exc.args +=", "out, mode=mode_with_gpu) result = np.asarray(f(N, M, k)) - np.array(1).astype(dtype) assert", "x return g TestGpuAlloc = makeTester( name=\"GpuAllocTester\", # The +1", "name if hasattr(Checker, \"__qualname__\"): Checker.__qualname__ = name return Checker def", "-1) m = np.asarray(test_rng.rand(10, 5) * 2 - 1, dtype=dtype)", "== \"uint64\" assert ( len( [ node for node in", "\"uint64\" assert out[1].shape == (3, 2) assert out[1].dtype == \"uint64\"", "data = np.array([3, 4, 5], dtype=theano.config.floatX) x = gpuarray_shared_constructor(data, borrow=True)", "[m_symb, k_symb], tt.triu(m_symb, k_symb), mode=mode_with_gpu ) result = f(m, k)", "= np.asarray(rng.rand(5, 4), dtype=\"float32\") gv = gpuarray.array(av, context=get_context(test_ctx_name)) f =", "x = gpuarray_shared_constructor(data, borrow=True) z = tt.zeros((s,)) join = GpuJoin(view=0)", "inputs, outputs, mode=mode, allow_input_downcast=allow_input_downcast, accept_inplace=True, on_unused_input=on_unused_input, name=name, ) def fake_shared(value,", "k_symb], out, mode=mode_with_gpu) result = np.asarray(f(N, M, k)) - np.array(1).astype(dtype)", "trying to \" \"make a Function\" ) % (self.gpu_op, testname)", "mode_with_gpu.including( \"local_dot_to_dot22\", \"local_dot22_to_dot22scalar\", \"specialize\" ) a = tt.fmatrix(\"a\") ca =", "3, 5, 1) check(dtype, 3, 5, -1) # N >", "CPU # correct01_bcast=(rand(1), np.int32(7)), correct02=(rand(), np.int32(4), np.int32(7)), correct12=(rand(7), np.int32(4), np.int32(7)),", "assert np.allclose(result, np.tri(N, M_, k, dtype=dtype)) assert result.dtype == np.dtype(dtype)", ") o1, o2 = f() assert np.allclose(o1, m.get_value(borrow=True)[:2]) assert np.allclose(o2,", "self.gpu_op) ref_e = None try: expecteds = f_ref() except Exception", "a = tt.fmatrix(\"a\") ca = theano.gpuarray.type.GpuArrayType(\"float32\", (False, False))() av =", "tt.Alloc()] def test_alloc_empty(): for dt in [\"float32\", \"int8\"]: f =", "rng = np.random.RandomState(seed=utt.fetch_seed()) m = self.shared(rng.rand(4, 6).astype(\"float16\")) o = tt.Split(2)(m,", ") == 1 ) def test_shape(): x = GpuArrayType(dtype=\"float32\", broadcastable=[False,", "TestGpuAlloc = makeTester( name=\"GpuAllocTester\", # The +1 is there to", "avoid errors with limited devices self.floatX = \"float32\" self.hide_error =", "(2, 3) assert out.dtype == dt f = theano.function( [],", "3) check(dtype, 5, 3, -3) check(dtype, 3, 5, -3) check(dtype,", "tt.tri(N_symb, M_symb, k_symb, dtype=dtype) + np.array(1).astype(dtype) f = theano.function([N_symb, M_symb,", "av = av[:, ::2] gv = gv[:, ::2] f =", "np.all(fv == av) def gpu_alloc_expected(x, *shp): g = gpuarray.empty(shp, dtype=x.dtype,", "= gpuarray.asarray( np.random.rand(5, 4), dtype=\"float32\", context=get_context(test_ctx_name) ) f = theano.function([a],", "- 1 dtype = kwargs.pop(\"dtype\", theano.config.floatX) cls = kwargs.pop(\"cls\", None)", "(self.gpu_op, testname) exc.args += (err_msg,) raise self.assertFunctionContains1(f_tst, self.gpu_op) ref_e =", "for i, (variable, expected) in enumerate(zip(variables, expecteds)): condition = (", "f_gpu2.maker.fgraph.toposort()]) == 1 assert np.allclose(f(a_val, b_val), f_gpu2(a_val, b_val)) def test_gpueye():", "assert out.shape == (2, 3) assert out.dtype == dt f", "Test that the shape is lifted over hostfromgpu m =", "inputs even if this is # allowed. if M is", "isinstance(topo[1].op, theano.compile.Shape_i) assert isinstance(topo[2].op, tt.opt.MakeVector) assert tuple(f(cv)) == (5, 4)", "topo = f.maker.fgraph.toposort() assert len(topo) == 1 assert isinstance(topo[0].op, GpuToGpu)", "GpuFromHost, theano.compile.DeepCopyOp, GpuDimShuffle, GpuElemwise, tt.opt.Shape_i, tt.opt.MakeVector, ) assert self.op ==", "mode=self.mode) assert any( [ isinstance(node.op, self.split_op_class) for node in f.maker.fgraph.toposort()", "assert out[0].dtype == \"uint64\" assert out[1].shape == (3, 2) assert", "dtype=\"float32\") # The reshape is needed otherwise we make the", "over hostfromgpu m = mode_with_gpu.including( \"local_dot_to_dot22\", \"local_dot22_to_dot22scalar\", \"specialize\" ) a", "assert isinstance(topo[2].op, tt.opt.MakeVector) assert tuple(f(av)) == (5, 4) f =", "== av) def gpu_alloc_expected(x, *shp): g = gpuarray.empty(shp, dtype=x.dtype, context=get_context(test_ctx_name))", "assert f(a_val, 2).flags.c_contiguous class TestGPUReshape(TestReshape): def setup_method(self): self.shared = gpuarray_shared_constructor", "check in self.checks.items(): assert check(inputs, variables), ( \"Test %s::%s: Failed", "tt.opt.Shape_i) assert isinstance(topo[2].op, tt.opt.MakeVector) assert tuple(f(av)) == (5, 4) f", "the \" \"Function\" ) % (self.gpu_op, testname) exc.args += (err_msg,)", "== (3, 4, 5)) assert len(topo) == 1 assert isinstance(topo[0].op,", "assert tuple(f(av)) == (5, 4) f = theano.function([ca], host_from_gpu(ca), mode=m)", "= exc try: variables = f_tst() except Exception as exc:", "dtype=\"float32\") cv = gpuarray.asarray( np.random.rand(5, 4), dtype=\"float32\", context=get_context(test_ctx_name) ) f", "join = GpuJoin(view=0) c = join(0, x, z) f =", "subtensor on the CPU # to transfer less data. f", "isinstance(node.op, GpuAllocEmpty) ] ) == 1 ) def test_shape(): x", "a big one m = np.asarray(test_rng.rand(5000, 5000) * 2 -", "theano import theano.tensor as tt # Don't import test classes", "for node in f.maker.fgraph.toposort()]) for dtype in [\"float64\", \"float32\", \"int32\",", "\"local_cut_gpua_host_gpua\" ) f = theano.function([g], GpuToGpu(test_ctx_name)(g), mode=mode) topo = f.maker.fgraph.toposort()", "x.get_value(borrow=True, return_internal_type=True) is f(0) assert np.allclose(f(0), [3, 4, 5]) def", "CPU # to transfer less data. f = theano.function( [a,", "continue def rand_gpuarray(*shape, **kwargs): r = rng.rand(*shape) * 2 -", "inplace. # # This function tests the case when several", "3) assert out.dtype == dt f = theano.function( [], [", "test_gpueye(): def check(dtype, N, M_=None, k=0): # Theano does not", "err_msg = ( \"Test %s::%s: exception raised during test \"", "f.maker.fgraph.toposort() assert isinstance(topo[0].op, tt.opt.Shape_i) assert isinstance(topo[1].op, tt.opt.Shape_i) assert isinstance(topo[2].op, tt.opt.MakeVector)", "1 out = f() assert out.shape == (2, 3) assert", "= tt.fmatrix(\"a\") i = tt.iscalar(\"i\") a_val = np.asarray(np.random.rand(4, 5), dtype=\"float32\")", "= mode_with_gpu.excluding(\"constant_folding\") self.join_op = GpuJoin() self.split_op_class = GpuSplit # Use", "= ( HostFromGpu, GpuFromHost, theano.compile.DeepCopyOp, GpuDimShuffle, GpuElemwise, tt.opt.Shape_i, tt.opt.MakeVector, )", "\"local_dot_to_dot22\", \"local_dot22_to_dot22scalar\", \"specialize\" ) a = tt.fmatrix(\"a\") ca = theano.gpuarray.type.GpuArrayType(\"float32\",", "result = np.asarray(f(N, M, k)) - np.array(1).astype(dtype) assert np.allclose(result, np.eye(N,", "computation at the same time. rng = np.random.RandomState(seed=utt.fetch_seed()) m =", "assert np.allclose(result, np.tril(m, k)) assert result.dtype == np.dtype(dtype) assert any([isinstance(node.op,", "them are empty. In this case # Gpujoin should work", "TestGPUComparison(TestComparison): def setup_method(self): utt.seed_rng() self.mode = mode_with_gpu self.shared = gpuarray_shared_constructor", "\" \"Function\" ) % (self.gpu_op, testname) exc.args += (err_msg,) raise", "testname, inputs in cases.items(): for _ in range(len(inputs)): if type(inputs[_])", "def fake_shared(value, name=None, strict=False, allow_downcast=None, **kwargs): from theano.tensor.sharedvar import scalar_constructor,", "(dtype %s).\" % ( self.op, testname, i, inputs, expected, expected.dtype,", "%s gave the wrong \" \"value. With inputs %s, expected", "case when several elements are passed to the # Gpujoin", "None as inputs even if this is # allowed. if", "np.int32(2), np.int32(4), np.int32(7)), bad_shape12=(rand(7), np.int32(7), np.int32(5)), ), ) class TestGPUAlloc(TestAlloc):", "v = gpuarray.zeros((3, 4, 5), dtype=\"float32\", context=get_context(test_ctx_name)) f = theano.function([x],", "is # allowed. if M is None: M = N", "m = np.asarray(test_rng.rand(5000, 5000) * 2 - 1, dtype=dtype) check_l(m,", "argument %s\", list(kwargs.keys())[0]) return gpuarray.array(r, dtype=dtype, cls=cls, context=get_context(test_ctx_name)) def makeTester(", "1000, 0) check(dtype, 1000, 1000, -400) check(dtype, 1000, 1000, 400)", "-1) check_u(m, 0) check_u(m, 1) check_u(m, -1) def test_gputri(): def", "4) f = theano.function([ca], host_from_gpu(ca), mode=m) assert host_from_gpu in [x.op", "gpuarray.array(av, context=get_context(test_ctx_name)) f = theano.function([a], GpuFromHost(test_ctx_name)(a)) fv = f(av) assert", "a big one check(dtype, 1000, 1000, 0) check(dtype, 1000, 1000,", "in f.maker.fgraph.toposort()]) for dtype in [\"float32\", \"int32\", \"float16\"]: check(dtype, 3)", "exc try: variables = f_tst() except Exception as exc: if", "def setup_method(self): self.shared = gpuarray_shared_constructor self.op = GpuReshape self.mode =", "= GpuJoin() self.split_op_class = GpuSplit # Use join instead of", "tt.opt.Shape_i) assert isinstance(topo[2].op, tt.opt.Shape_i) assert isinstance(topo[3].op, tt.opt.MakeVector) mode = mode_with_gpu.excluding(\"local_shape_to_shape_i\")", "Gpujoin to work inplace. # # This function tests the", "test_transfer_gpu_gpu(): g = GpuArrayType( dtype=\"float32\", broadcastable=(False, False), context_name=test_ctx_name )() av", "a_val = np.asarray(np.random.rand(4, 5), dtype=\"float32\") # The reshape is needed", "0) check_u(m, 1) check_u(m, -1) def test_gputri(): def check(dtype, N,", "node_tst.outputs, mode=mode_gpu) except Exception as exc: err_msg = ( \"Test", "%s::%s: Error occurred while trying to \" \"make a Function\"", "], ) out = f() assert out[0].shape == (3, 2)", "float: inputs[_] = np.asarray(inputs[_], dtype=theano.config.floatX) self.run_case(testname, inputs) def run_case(self, testname,", "assert isinstance(topo[0].op, tt.Shape) def test_gpu_contiguous(): a = tt.fmatrix(\"a\") i =", "% (self.gpu_op, testname, type(exc), type(ref_e)) ) exc.args += (err_msg,) raise", "theano.function([ca], host_from_gpu(ca).shape, mode=m) topo = f.maker.fgraph.toposort() assert isinstance(topo[0].op, theano.compile.Shape_i) assert", "borrow=True) z = tt.zeros((s,)) join = GpuJoin(view=0) c = join(0,", "Exception as exc: ref_e = exc try: variables = f_tst()", "False))() av = np.asarray(np.random.rand(5, 4), dtype=\"float32\") cv = gpuarray.asarray( np.random.rand(5,", "GpuAllocEmpty(dt, context_name=test_ctx_name)(2, 3)) assert len(f.maker.fgraph.apply_nodes) == 1 out = f()", "in f.maker.fgraph.toposort() ] ) o1, o2 = f() assert np.allclose(o1,", "out[1].shape == (3, 2) assert out[1].dtype == \"uint64\" assert (", "( GpuAlloc, GpuAllocEmpty, GpuContiguous, GpuEye, GpuFromHost, GpuJoin, GpuReshape, GpuSplit, GpuToGpu,", "mode_gpu=mode_with_gpu, mode_nogpu=mode_without_gpu, skip=False, eps=1e-10, ): if checks is None: checks", "= np.array([3, 4, 5], dtype=theano.config.floatX) x = gpuarray_shared_constructor(data, borrow=True) z", "context=get_context(test_ctx_name)) def makeTester( name, op, gpu_op, cases, checks=None, mode_gpu=mode_with_gpu, mode_nogpu=mode_without_gpu,", "5, 3, -3) check(dtype, 3, 5, -3) check(dtype, 5, 3,", "TestGPUReshape(TestReshape): def setup_method(self): self.shared = gpuarray_shared_constructor self.op = GpuReshape self.mode", "raised during test \" \"call was not the same as", "self.mode = mode_with_gpu self.shared = gpuarray_shared_constructor self.dtypes = [\"float64\", \"float32\"]", "big one check(dtype, 1000, 1000, 0) check(dtype, 1000, 1000, -400)", "correct02=(rand(), np.int32(4), np.int32(7)), correct12=(rand(7), np.int32(4), np.int32(7)), correct13=(rand(7), np.int32(2), np.int32(4), np.int32(7)),", "= tt.iscalar() M_symb = tt.iscalar() k_symb = tt.iscalar() out =", "% (self.gpu_op, testname) exc.args += (err_msg,) raise else: # if", "testname, description, inputs, variables) Checker.__name__ = name if hasattr(Checker, \"__qualname__\"):", "isinstance(topo[2].op, tt.opt.Shape_i) assert isinstance(topo[3].op, tt.opt.MakeVector) mode = mode_with_gpu.excluding(\"local_shape_to_shape_i\") f =", "g = GpuArrayType(dtype=\"float32\", broadcastable=(False, False))(\"g\") av = np.asarray(rng.rand(5, 4), dtype=\"float32\")", "[GpuAlloc(test_ctx_name), GpuAlloc(test_ctx_name), tt.Alloc()] def test_alloc_empty(): for dt in [\"float32\", \"int8\"]:", "3, 5) check(dtype, 5, 3) # N == M, k", "variable.dtype, ) ) for description, check in self.checks.items(): assert check(inputs,", "in topo]) assert f(a_val, 1).flags.c_contiguous assert f(a_val, 2).flags.c_contiguous assert f(a_val,", "- 1, dtype=dtype) check_l(m, 0) check_l(m, 1) check_l(m, -1) check_u(m,", "= [theano.shared(inp) for inp in inputs] try: node_ref = safe_make_node(self.op,", "# Test that the shape is lifted over hostfromgpu m", "k != 0 check(dtype, 3, 3, 1) check(dtype, 3, 3,", "for dtype in [\"float64\", \"float32\", \"float16\"]: # try a big", "check_u(m, 0) check_u(m, 1) check_u(m, -1) def test_gputri(): def check(dtype,", "def test_alloc_empty(): for dt in [\"float32\", \"int8\"]: f = theano.function([],", "tt.zeros_like(a), tt.ones_like(b)), mode=mode_with_gpu ) f_gpu2 = theano.function( [a, b], tt.join(0,", "# Theano does not accept None as a tensor. #", "::2] f = theano.function([a], GpuFromHost(test_ctx_name)(a)) fv = f(av) assert GpuArrayType.values_eq(fv,", "6) check(dtype, 3, 5, -6) def test_hostfromgpu_shape_i(): # Test that", "check_u(m, 1) check_u(m, -1) def test_gputri(): def check(dtype, N, M_=None,", "== (3, 2) assert out[0].dtype == \"uint64\" assert out[1].shape ==", "tt.join(0, tt.zeros_like(a), tt.ones_like(b)) + 4, mode=mode_with_gpu ) assert sum([node.op ==", "GpuFromHost(test_ctx_name)(a)) fv = f(av) assert GpuArrayType.values_eq(fv, gv) f = theano.function([g],", "0: raise TypeError(\"Unexpected argument %s\", list(kwargs.keys())[0]) return gpuarray.array(r, dtype=dtype, cls=cls,", "= GpuArrayType( dtype=\"float32\", broadcastable=(False, False), context_name=test_ctx_name )() av = np.asarray(rng.rand(5,", "(self.op, testname, description, inputs, variables) Checker.__name__ = name if hasattr(Checker,", "support None as inputs even if this is # allowed.", "b_val), f_gpu2(a_val, b_val)) def test_gpueye(): def check(dtype, N, M_=None, k=0):", "= mode_with_gpu.including( \"local_dot_to_dot22\", \"local_dot22_to_dot22scalar\", \"specialize\" ) a = tt.fmatrix(\"a\") ca", "mode is None: mode = mode_with_gpu return theano.function( inputs, outputs,", "cases.items(): for _ in range(len(inputs)): if type(inputs[_]) is float: inputs[_]", "\" \"call was not the same as the reference \"", "host_from_gpu in [x.op for x in f.maker.fgraph.toposort()] f = theano.function([ca],", "f(a_val, 2).flags.c_contiguous assert f(a_val, 2).flags.c_contiguous class TestGPUReshape(TestReshape): def setup_method(self): self.shared", "f_tst() except Exception as exc: if ref_e is None: err_msg", "GpuAllocEmpty, GpuContiguous, GpuEye, GpuFromHost, GpuJoin, GpuReshape, GpuSplit, GpuToGpu, GpuTri, HostFromGpu,", "3, -3) check(dtype, 3, 5, -3) check(dtype, 5, 3, 6)", "for node in f_gpu2.maker.fgraph.toposort()]) == 1 assert np.allclose(f(a_val, b_val), f_gpu2(a_val,", "mode_with_gpu return theano.function( inputs, outputs, mode=mode, allow_input_downcast=allow_input_downcast, accept_inplace=True, on_unused_input=on_unused_input, name=name,", "GpuReshape, GpuSplit, GpuToGpu, GpuTri, HostFromGpu, gpu_contiguous, gpu_join, host_from_gpu, ) from", "= f() assert np.allclose(o1, m.get_value(borrow=True)[:2]) assert np.allclose(o2, m.get_value(borrow=True)[2:]) def test_gpujoin_gpualloc():", "# N < M, k != 0 check(dtype, 3, 5,", "topo = f.maker.fgraph.toposort() assert isinstance(topo[0].op, theano.compile.Shape_i) assert isinstance(topo[1].op, theano.compile.Shape_i) assert", "node in f.maker.fgraph.apply_nodes if isinstance(node.op, GpuAllocEmpty) ] ) == 1", "even if this is # allowed. if M is None:", "f(gv) assert np.all(fv == av) def test_transfer_gpu_gpu(): g = GpuArrayType(", "+ 4, mode=mode_with_gpu ) assert sum([node.op == tt.alloc for node", "\"specialize\" ) a = tt.fmatrix(\"a\") ca = theano.gpuarray.type.GpuArrayType(\"float32\", (False, False))()", "f = theano.function([a], GpuFromHost(test_ctx_name)(a).shape, mode=m) topo = f.maker.fgraph.toposort() assert isinstance(topo[0].op,", "return gpuarray_shared_constructor(x, target=test_ctx_name, **kwargs) self.shared = shared def test_gpusplit_opt(self): #", "assert ( sum([isinstance(node.op, GpuAlloc) for node in f_gpu2.maker.fgraph.toposort()]) == 2", "): if checks is None: checks = {} _op =", "o = tt.Split(2)(m, 0, [2, 2]) assert o[0].dtype == \"float16\"", "assert x.get_value(borrow=True, return_internal_type=True) is f(0) assert np.allclose(f(0), [3, 4, 5])", "check(dtype, 3, 5, -6) def test_hostfromgpu_shape_i(): # Test that the", "def shared(x, **kwargs): return gpuarray_shared_constructor(x, target=test_ctx_name, **kwargs) self.shared = shared", "= GpuReshape self.mode = mode_with_gpu self.ignore_topo = ( HostFromGpu, GpuFromHost,", "== GpuReshape class TestGPUComparison(TestComparison): def setup_method(self): utt.seed_rng() self.mode = mode_with_gpu", "assert np.allclose(f(a_val, b_val), f_gpu2(a_val, b_val)) def test_gpueye(): def check(dtype, N,", "tt.iscalar() k_symb = tt.iscalar() out = tt.eye(N_symb, M_symb, k_symb, dtype=dtype)", "tt.fmatrix(\"a\") a_val = np.asarray(np.random.rand(4, 5), dtype=\"float32\") b = tt.fmatrix(\"b\") b_val", "= op _gpu_op = gpu_op _cases = cases _skip =", "assert np.allclose(f(0), [3, 4, 5]) def test_gpu_tril_triu(): def check_l(m, k=0):", "import GpuSubtensor from theano.gpuarray.type import GpuArrayType, get_context, gpuarray_shared_constructor from theano.tensor", "isinstance(topo[2].op, tt.opt.MakeVector) assert tuple(f(av)) == (5, 4) f = theano.function([ca],", "4) def test_Gpujoin_inplace(): # Test Gpujoin to work inplace. #", "to the GPU. op=lambda *args: alloc(*args) + 1, gpu_op=GpuAlloc(test_ctx_name), cases=dict(", "f_gpu = theano.function( [a, b], tt.join(0, tt.zeros_like(a), tt.ones_like(b)), mode=mode_with_gpu )", "target=test_ctx_name, **kwargs) self.shared = shared def test_gpusplit_opt(self): # Test that", "tt.fmatrix(\"b\") b_val = np.asarray(np.random.rand(3, 5), dtype=\"float32\") f = theano.function( [a,", "= np.asarray(inputs[_], dtype=theano.config.floatX) self.run_case(testname, inputs) def run_case(self, testname, inputs): inputs_ref", "tt.opt.Shape_i) assert isinstance(topo[1].op, tt.opt.Shape_i) assert isinstance(topo[2].op, tt.opt.Shape_i) assert isinstance(topo[3].op, tt.opt.MakeVector)", "self.checks.items(): assert check(inputs, variables), ( \"Test %s::%s: Failed check: %s", "GpuArrayType( dtype=\"float32\", broadcastable=(False, False), context_name=test_ctx_name )() av = np.asarray(rng.rand(5, 4),", "in cases.items(): for _ in range(len(inputs)): if type(inputs[_]) is float:", "expected) in enumerate(zip(variables, expecteds)): condition = ( variable.dtype != expected.dtype", "( \"Test %s::%s: exception when calling the \" \"Function\" )", "libgpuarray has a much more comprehensive suit of tests to", "5, 3) check(dtype, 5, 3, -3) check(dtype, 3, 5, -3)", "in [\"float64\", \"float32\", \"int32\", \"float16\"]: # try a big one", "to allow the lift to the GPU. op=lambda *args: alloc(*args)", "%s \" \"(dtype %s), got %s (dtype %s).\" % (", "5, -3) check(dtype, 5, 3, 6) check(dtype, 3, 5, -6)", "This function tests the case when several elements are passed", "devices self.floatX = \"float32\" self.hide_error = theano.config.mode not in [\"DebugMode\",", "0 check(dtype, 3, 5) check(dtype, 5, 3) # N ==", "theano.function([x], x.shape, mode=mode) topo = f.maker.fgraph.toposort() assert np.all(f(v) == (3,", "sum([node.op == gpu_join for node in f_gpu2.maker.fgraph.toposort()]) == 1 assert", ") % (self.gpu_op, testname, inputs) exc.args += (err_msg,) raise try:", "in f_gpu2.maker.fgraph.toposort()]) == 2 ) assert sum([node.op == gpu_join for", "# ensure correctness a = tt.fmatrix(\"a\") g = GpuArrayType(dtype=\"float32\", broadcastable=(False,", "2 ) assert sum([node.op == gpu_join for node in f_gpu.maker.fgraph.toposort()])", "tt.fmatrix(\"a\") ca = theano.gpuarray.type.GpuArrayType(\"float32\", (False, False))() av = np.asarray(np.random.rand(5, 4),", "testname) exc.args += (err_msg,) raise else: # if we raised", "context=get_context(test_ctx_name)) f = theano.function([x], x.shape) topo = f.maker.fgraph.toposort() assert np.all(f(v)", "= gpuarray_shared_constructor self.op = GpuReshape self.mode = mode_with_gpu self.ignore_topo =", "node in topo]) assert any([isinstance(node.op, GpuContiguous) for node in topo])", "pytest import theano import theano.tensor as tt # Don't import", "k > N check(dtype, 5, 3, 3) check(dtype, 3, 5,", "for node in topo]) assert any([isinstance(node.op, GpuContiguous) for node in", "1000, 1000, 0) check(dtype, 1000, 1000, -400) check(dtype, 1000, 1000,", "f = theano.function([g], host_from_gpu(g)) fv = f(gv) assert np.all(fv ==", "cv = gpuarray.asarray( np.random.rand(5, 4), dtype=\"float32\", context=get_context(test_ctx_name) ) f =", "M, k != 0 check(dtype, 3, 5, 1) check(dtype, 3,", "test_rng = np.random.RandomState(seed=utt.fetch_seed()) for dtype in [\"float64\", \"float32\", \"float16\"]: #", "None: M = N N_symb = tt.iscalar() M_symb = tt.iscalar()", "Exception as exc: if ref_e is None: err_msg = (", "they get tested as part of the file from tests", "= inplace_func([], node_tst.outputs, mode=mode_gpu) except Exception as exc: err_msg =", "out = f() assert out[0].shape == (3, 2) assert out[0].dtype", "%s::%s: exception when calling the \" \"Function\" ) % (self.gpu_op,", "to \" \"make a Function\" ) % (self.gpu_op, testname) exc.args", "f_gpu2(a_val, b_val)) def test_gpueye(): def check(dtype, N, M_=None, k=0): #", "f.maker.fgraph.toposort()]) for dtype in [\"float64\", \"float32\", \"int32\", \"float16\"]: # try", "f.maker.fgraph.toposort()]) utt.seed_rng() test_rng = np.random.RandomState(seed=utt.fetch_seed()) for dtype in [\"float64\", \"float32\",", "dtype=dtype, cls=cls, context=get_context(test_ctx_name)) def makeTester( name, op, gpu_op, cases, checks=None,", "[ isinstance(node.op, self.split_op_class) for node in f.maker.fgraph.toposort() ] ) o1,", "safe_make_node from theano.gpuarray.basic_ops import ( GpuAlloc, GpuAllocEmpty, GpuContiguous, GpuEye, GpuFromHost,", "one check(dtype, 1000, 1000, 0) check(dtype, 1000, 1000, -400) check(dtype,", "inplace and the output should be the view of the", "isinstance(topo[1].op, tt.opt.Shape_i) assert isinstance(topo[2].op, tt.opt.MakeVector) assert tuple(f(av)) == (5, 4)", "theano.function( [], [ GpuAllocEmpty(\"uint64\", test_ctx_name)(3, 2), GpuAllocEmpty(\"uint64\", test_ctx_name)(3, 2), ],", "g = GpuArrayType( dtype=\"float32\", broadcastable=(False, False), context_name=test_ctx_name )() av =", "gpuarray.empty(shp, dtype=x.dtype, context=get_context(test_ctx_name)) g[:] = x return g TestGpuAlloc =", "GpuSubtensor) for node in topo]) assert any([isinstance(node.op, GpuContiguous) for node", "assert np.all(fv == av) def gpu_alloc_expected(x, *shp): g = gpuarray.empty(shp,", "the shape is lifted over hostfromgpu m = mode_with_gpu.including( \"local_dot_to_dot22\",", "= theano.function([], o, mode=self.mode) assert any( [ isinstance(node.op, self.split_op_class) for", "6).astype(\"float16\")) o = tt.Split(2)(m, 0, [2, 2]) assert o[0].dtype ==", "name=name, strict=strict, allow_downcast=allow_downcast, **kwargs ) except TypeError: continue def rand_gpuarray(*shape,", "3, -1) # N < M, k != 0 check(dtype,", "(self.gpu_op, testname, type(exc), type(ref_e)) ) exc.args += (err_msg,) raise for", "3, -1) # k > M, -k > N, k", "testname, type(exc), type(ref_e)) ) exc.args += (err_msg,) raise for i,", "0 check(dtype, 3, 3, 1) check(dtype, 3, 3, -1) #", "if type(inputs[_]) is float: inputs[_] = np.asarray(inputs[_], dtype=theano.config.floatX) self.run_case(testname, inputs)", "3) # M != N, k = 0 check(dtype, 3,", "are passed to the # Gpujoin function but all except", "False, False])() v = gpuarray.zeros((3, 4, 5), dtype=\"float32\", context=get_context(test_ctx_name)) f", "( \"Test %s::%s: Error occurred while making \" \"a node", "(err_msg,) raise self.assertFunctionContains1(f_tst, self.gpu_op) ref_e = None try: expecteds =", "if theano.config.mode != \"FAST_COMPILE\": assert len(topo) == 4 assert isinstance(topo[0].op,", "allowed. if M is None: M = N N_symb =", "= GpuJoin(view=0) c = join(0, x, z) f = theano.function([s],", "= f(m, k) assert np.allclose(result, np.tril(m, k)) assert result.dtype ==", "except Exception as exc: if ref_e is None: err_msg =", "np.random.RandomState(seed=utt.fetch_seed()) def inplace_func( inputs, outputs, mode=None, allow_input_downcast=False, on_unused_input=\"raise\", name=None, ):", "= np.random.RandomState(seed=utt.fetch_seed()) def inplace_func( inputs, outputs, mode=None, allow_input_downcast=False, on_unused_input=\"raise\", name=None,", "the reference \" \"call (got: %s, expected %s)\" % (self.gpu_op,", "assert isinstance(topo[1].op, tt.opt.Shape_i) assert isinstance(topo[2].op, tt.opt.Shape_i) assert isinstance(topo[3].op, tt.opt.MakeVector) mode", "the view of the # non-empty element. s = tt.lscalar()", "\"call (got: %s, expected %s)\" % (self.gpu_op, testname, type(exc), type(ref_e))", "None as a tensor. # So we must use a", "a = tt.fmatrix(\"a\") g = GpuArrayType(dtype=\"float32\", broadcastable=(False, False))(\"g\") av =", "GpuFromHost, GpuJoin, GpuReshape, GpuSplit, GpuToGpu, GpuTri, HostFromGpu, gpu_contiguous, gpu_join, host_from_gpu,", "mode_nogpu=mode_without_gpu, skip=False, eps=1e-10, ): if checks is None: checks =", "\"value. With inputs %s, expected %s \" \"(dtype %s), got", "== 1 assert isinstance(topo[0].op, tt.Shape) def test_gpu_contiguous(): a = tt.fmatrix(\"a\")", "check(dtype, 5, 3, 6) check(dtype, 3, 5, -6) def test_hostfromgpu_shape_i():", "[ node for node in f.maker.fgraph.apply_nodes if isinstance(node.op, GpuAllocEmpty) ]", "**kwargs) self.shared = shared def test_gpusplit_opt(self): # Test that we", "= np.asarray(rng.rand(5, 4), dtype=\"float32\") gv = gpuarray.array(av, context=get_context(test_ctx_name)) mode =", "node in topo]) assert f(a_val, 1).flags.c_contiguous assert f(a_val, 2).flags.c_contiguous assert", "context=get_context(test_ctx_name)) g[:] = x return g TestGpuAlloc = makeTester( name=\"GpuAllocTester\",", "fake_shared(value, name=None, strict=False, allow_downcast=None, **kwargs): from theano.tensor.sharedvar import scalar_constructor, tensor_constructor", "def makeTester( name, op, gpu_op, cases, checks=None, mode_gpu=mode_with_gpu, mode_nogpu=mode_without_gpu, skip=False,", "0 check(dtype, 5, 3, 1) check(dtype, 5, 3, -1) #", "GpuJoin() # this is to avoid errors with limited devices", "ref_e = exc try: variables = f_tst() except Exception as", "gpuarray_shared_constructor from theano.tensor import TensorType from theano.tensor.basic import alloc pygpu", "shared(x, **kwargs): return gpuarray_shared_constructor(x, target=test_ctx_name, **kwargs) self.shared = shared def", "GpuElemwise from theano.gpuarray.subtensor import GpuSubtensor from theano.gpuarray.type import GpuArrayType, get_context,", "result = f(m, k) assert np.allclose(result, np.triu(m, k)) assert result.dtype", "mode=mode_with_gpu ) assert sum([node.op == tt.alloc for node in f.maker.fgraph.toposort()])", "3) check(dtype, 3, 5, 3) check(dtype, 5, 3, -3) check(dtype,", "is needed otherwise we make the subtensor on the CPU", "len(kwargs) != 0: raise TypeError(\"Unexpected argument %s\", list(kwargs.keys())[0]) return gpuarray.array(r,", "tuple(f(cv)) == (5, 4) def test_Gpujoin_inplace(): # Test Gpujoin to", "\"float16\"]: check(dtype, 3) # M != N, k = 0", "0) check(dtype, 1000, 1000, -400) check(dtype, 1000, 1000, 400) check(dtype,", "ensure correctness a = tt.fmatrix(\"a\") g = GpuArrayType(dtype=\"float32\", broadcastable=(False, False))(\"g\")", "= [theano.shared(inp) for inp in inputs] inputs_tst = [theano.shared(inp) for", "GpuJoin, GpuReshape, GpuSplit, GpuToGpu, GpuTri, HostFromGpu, gpu_contiguous, gpu_join, host_from_gpu, )", "TestJoinAndSplit, TestReshape, ) from tests.tensor.utils import rand, safe_make_node from theano.gpuarray.basic_ops", "dtype=x.dtype, context=get_context(test_ctx_name)) g[:] = x return g TestGpuAlloc = makeTester(", "expected.shape or not TensorType.values_eq_approx(variable, expected) ) assert not condition, (", "alloc pygpu = pytest.importorskip(\"pygpu\") gpuarray = pygpu.gpuarray utt.seed_rng() rng =", "in inputs] inputs_tst = [theano.shared(inp) for inp in inputs] try:", "%s::%s: exception raised during test \" \"call was not the", "isinstance(topo[1].op, tt.opt.Shape_i) assert isinstance(topo[2].op, tt.opt.Shape_i) assert isinstance(topo[3].op, tt.opt.MakeVector) mode =", "\" \"value. With inputs %s, expected %s \" \"(dtype %s),", "dtype=\"float32\", context=get_context(test_ctx_name)) f = theano.function([x], x.shape) topo = f.maker.fgraph.toposort() assert", "# Currently DebugMode does not support None as inputs even", "import alloc pygpu = pytest.importorskip(\"pygpu\") gpuarray = pygpu.gpuarray utt.seed_rng() rng", "in (gpuarray_shared_constructor, tensor_constructor, scalar_constructor): try: return c( value, name=name, strict=strict,", "\" \"a node with inputs %s\" ) % (self.gpu_op, testname,", "if isinstance(exc, type(ref_e)): return else: err_msg = ( \"Test %s::%s:", "GpuTri) for node in f.maker.fgraph.toposort()]) for dtype in [\"float64\", \"float32\",", "of the # non-empty element. s = tt.lscalar() data =", "rand_gpuarray(*shape, **kwargs): r = rng.rand(*shape) * 2 - 1 dtype", "= shared def test_gpusplit_opt(self): # Test that we move the", "tt.iscalar(\"i\") a_val = np.asarray(np.random.rand(4, 5), dtype=\"float32\") # The reshape is", "in [\"float32\", \"int32\", \"float16\"]: check(dtype, 3) # M != N,", "f = theano.function([g], GpuToGpu(test_ctx_name)(g), mode=mode) topo = f.maker.fgraph.toposort() assert len(topo)", "tt.join_ for node in f.maker.fgraph.toposort()]) == 1 assert ( sum([isinstance(node.op,", "f = theano.function([ca], host_from_gpu(ca), mode=m) assert host_from_gpu in [x.op for", "mode_with_gpu.excluding( \"cut_gpua_host_transfers\", \"local_cut_gpua_host_gpua\" ) f = theano.function([g], GpuToGpu(test_ctx_name)(g), mode=mode) topo", "def test_Gpujoin_inplace(): # Test Gpujoin to work inplace. # #", "self.floatX = \"float32\" self.hide_error = theano.config.mode not in [\"DebugMode\", \"DEBUG_MODE\"]", "[\"float32\", \"int32\", \"float16\"]: check(dtype, 3) # M != N, k", "theano.gpuarray.subtensor import GpuSubtensor from theano.gpuarray.type import GpuArrayType, get_context, gpuarray_shared_constructor from", "= inplace_func([], node_ref.outputs, mode=mode_nogpu) f_tst = inplace_func([], node_tst.outputs, mode=mode_gpu) except", "hasattr(Checker, \"__qualname__\"): Checker.__qualname__ = name return Checker def test_transfer_cpu_gpu(): a", "= theano.function( [m_symb, k_symb], tt.triu(m_symb, k_symb), mode=mode_with_gpu ) result =", "theano.function([a], GpuFromHost(test_ctx_name)(a), mode=m) assert any(isinstance(x.op, GpuFromHost) for x in f.maker.fgraph.toposort())", "dtype = kwargs.pop(\"dtype\", theano.config.floatX) cls = kwargs.pop(\"cls\", None) if len(kwargs)", "to # ensure correctness a = tt.fmatrix(\"a\") g = GpuArrayType(dtype=\"float32\",", "= makeTester( name=\"GpuAllocTester\", # The +1 is there to allow", "in f.maker.fgraph.toposort()]) for dtype in [\"float64\", \"float32\", \"int32\", \"float16\"]: #", "f = theano.function([x], x.shape) topo = f.maker.fgraph.toposort() assert np.all(f(v) ==", "GpuJoin(view=0) c = join(0, x, z) f = theano.function([s], theano.Out(c,", "out[0].shape == (3, 2) assert out[0].dtype == \"uint64\" assert out[1].shape", "exc: err_msg = ( \"Test %s::%s: Error occurred while trying", "gpuarray.array(av, context=get_context(test_ctx_name)) av = av[:, ::2] gv = gv[:, ::2]", "5, 3, 6) check(dtype, 3, 5, -6) def test_hostfromgpu_shape_i(): #", "except one of them are empty. In this case #", "test_gpusplit_opt(self): # Test that we move the node to the", ") class TestGPUAlloc(TestAlloc): dtype = \"float32\" mode = mode_with_gpu shared", "+ np.array(1).astype(dtype) f = theano.function([N_symb, M_symb, k_symb], out, mode=mode_with_gpu) result", "gpu_join for node in f_gpu2.maker.fgraph.toposort()]) == 1 assert np.allclose(f(a_val, b_val),", "were %s, ouputs were %s)\" ) % (self.op, testname, description,", "expected %s \" \"(dtype %s), got %s (dtype %s).\" %", "bad_shape12=(rand(7), np.int32(7), np.int32(5)), ), ) class TestGPUAlloc(TestAlloc): dtype = \"float32\"", "theano.function([N_symb, M_symb, k_symb], out, mode=mode_with_gpu) result = np.asarray(f(N, M, k))", "check_u(m, 1) check_u(m, -1) m = np.asarray(test_rng.rand(10, 5) * 2", "_op = op _gpu_op = gpu_op _cases = cases _skip", "from tests.tensor.test_basic import ( TestAlloc, TestComparison, TestJoinAndSplit, TestReshape, ) from", "1) check_l(m, -1) check_u(m, 0) check_u(m, 1) check_u(m, -1) m", "8), dtype=\"float32\") gv = gpuarray.array(av, context=get_context(test_ctx_name)) av = av[:, ::2]", "variables) Checker.__name__ = name if hasattr(Checker, \"__qualname__\"): Checker.__qualname__ = name", "%s\", list(kwargs.keys())[0]) return gpuarray.array(r, dtype=dtype, cls=cls, context=get_context(test_ctx_name)) def makeTester( name,", "outputs, mode=mode, allow_input_downcast=allow_input_downcast, accept_inplace=True, on_unused_input=on_unused_input, name=name, ) def fake_shared(value, name=None,", "Test that we move the node to the GPU #", "N, k > M, k > N check(dtype, 5, 3,", "= np.asarray(np.random.rand(5, 4), dtype=\"float32\") cv = gpuarray.asarray( np.random.rand(5, 4), dtype=\"float32\",", "inputs] inputs_tst = [theano.shared(inp) for inp in inputs] try: node_ref", "GpuEye, GpuFromHost, GpuJoin, GpuReshape, GpuSplit, GpuToGpu, GpuTri, HostFromGpu, gpu_contiguous, gpu_join,", "def check_u(m, k=0): m_symb = tt.matrix(dtype=m.dtype) k_symb = tt.iscalar() f", "= theano.function([N_symb, M_symb, k_symb], out, mode=mode_with_gpu) result = np.asarray(f(N, M,", "wrong \" \"value. With inputs %s, expected %s \" \"(dtype", "assert host_from_gpu in [x.op for x in f.maker.fgraph.toposort()] f =", "Gpujoin function but all except one of them are empty.", "skip = _skip checks = _checks def setup_method(self): eval(self.__class__.__module__ +", "%s::%s: Failed check: %s \" \"(inputs were %s, ouputs were", "Checker(utt.OptimizationTestMixin): op = staticmethod(_op) gpu_op = staticmethod(_gpu_op) cases = _cases", "dtype=dtype) + np.array(1).astype(dtype) f = theano.function([N_symb, M_symb, k_symb], out, mode=mode_with_gpu)", "M_symb, k_symb], out, mode=mode_with_gpu) result = np.asarray(f(N, M, k)) -", "np.int32(4), np.int32(7)), correct23=(rand(4, 7), np.int32(2), np.int32(4), np.int32(7)), bad_shape12=(rand(7), np.int32(7), np.int32(5)),", "for node in f_gpu2.maker.fgraph.toposort()]) == 2 ) assert sum([node.op ==", "inputs_ref = [theano.shared(inp) for inp in inputs] inputs_tst = [theano.shared(inp)", "Failed check: %s \" \"(inputs were %s, ouputs were %s)\"", "allow_downcast=allow_downcast, **kwargs ) except TypeError: continue def rand_gpuarray(*shape, **kwargs): r", "k=0): # Theano does not accept None as a tensor.", "GpuToGpu, GpuTri, HostFromGpu, gpu_contiguous, gpu_join, host_from_gpu, ) from theano.gpuarray.elemwise import", "(3, 4, 5)) if theano.config.mode != \"FAST_COMPILE\": assert len(topo) ==", "f_ref = inplace_func([], node_ref.outputs, mode=mode_nogpu) f_tst = inplace_func([], node_tst.outputs, mode=mode_gpu)", "len(topo) == 1 assert isinstance(topo[0].op, tt.Shape) def test_gpu_contiguous(): a =", "This is just to ensure that it works in theano", "dtype=\"float32\") gv = gpuarray.array(av, context=get_context(test_ctx_name)) f = theano.function([a], GpuFromHost(test_ctx_name)(a)) fv", "result = f(m, k) assert np.allclose(result, np.tril(m, k)) assert result.dtype", "check(dtype, 3, 5, -3) check(dtype, 5, 3, 6) check(dtype, 3,", "checks class Checker(utt.OptimizationTestMixin): op = staticmethod(_op) gpu_op = staticmethod(_gpu_op) cases", "1 ) def test_shape(): x = GpuArrayType(dtype=\"float32\", broadcastable=[False, False, False])()", "%s, expected %s)\" % (self.gpu_op, testname, type(exc), type(ref_e)) ) exc.args", "safe_make_node(self.op, *inputs_ref) node_tst = safe_make_node(self.op, *inputs_tst) except Exception as exc:", "def test_gpujoin_gpualloc(): a = tt.fmatrix(\"a\") a_val = np.asarray(np.random.rand(4, 5), dtype=\"float32\")", "np.eye(N, M_, k, dtype=dtype)) assert result.dtype == np.dtype(dtype) assert any([isinstance(node.op,", "len(f.maker.fgraph.apply_nodes) == 1 out = f() assert out.shape == (2,", "= checks class Checker(utt.OptimizationTestMixin): op = staticmethod(_op) gpu_op = staticmethod(_gpu_op)", "f() assert out.shape == (2, 3) assert out.dtype == dt", "topo]) assert f(a_val, 1).flags.c_contiguous assert f(a_val, 2).flags.c_contiguous assert f(a_val, 2).flags.c_contiguous", "any([isinstance(node.op, GpuTri) for node in f.maker.fgraph.toposort()]) utt.seed_rng() test_rng = np.random.RandomState(seed=utt.fetch_seed())", "TestReshape, ) from tests.tensor.utils import rand, safe_make_node from theano.gpuarray.basic_ops import", "\"uint64\" assert ( len( [ node for node in f.maker.fgraph.apply_nodes", "from theano.tensor.basic import alloc pygpu = pytest.importorskip(\"pygpu\") gpuarray = pygpu.gpuarray", "np.int32(7)), correct13=(rand(7), np.int32(2), np.int32(4), np.int32(7)), correct23=(rand(4, 7), np.int32(2), np.int32(4), np.int32(7)),", "**kwargs): r = rng.rand(*shape) * 2 - 1 dtype =", "3, 3) check(dtype, 3, 5, 3) check(dtype, 5, 3, -3)", "node in f_gpu2.maker.fgraph.toposort()]) == 2 ) assert sum([node.op == gpu_join", "much more comprehensive suit of tests to # ensure correctness", "i, inputs, expected, expected.dtype, variable, variable.dtype, ) ) for description,", "Checker def test_transfer_cpu_gpu(): a = tt.fmatrix(\"a\") g = GpuArrayType(dtype=\"float32\", broadcastable=(False,", "all except one of them are empty. In this case", "tt.ones_like(b)), mode=mode_with_gpu ) f_gpu2 = theano.function( [a, b], tt.join(0, tt.zeros_like(a),", "except TypeError: continue def rand_gpuarray(*shape, **kwargs): r = rng.rand(*shape) *", "sum([node.op == tt.alloc for node in f.maker.fgraph.toposort()]) == 2 assert", "M, k != 0 check(dtype, 3, 3, 1) check(dtype, 3,", "+= (err_msg,) raise self.assertFunctionContains1(f_tst, self.gpu_op) ref_e = None try: expecteds", "== tt.join_ for node in f.maker.fgraph.toposort()]) == 1 assert (", ") def test_shape(): x = GpuArrayType(dtype=\"float32\", broadcastable=[False, False, False])() v", "gpu_join for node in f_gpu.maker.fgraph.toposort()]) == 1 assert ( sum([isinstance(node.op,", "in theano # libgpuarray has a much more comprehensive suit", "assert isinstance(topo[1].op, theano.compile.Shape_i) assert isinstance(topo[2].op, tt.opt.MakeVector) assert tuple(f(cv)) == (5,", "try: variables = f_tst() except Exception as exc: if ref_e", "= theano.config.mode not in [\"DebugMode\", \"DEBUG_MODE\"] def shared(x, **kwargs): return", "out = f() assert out.shape == (2, 3) assert out.dtype", "theano.function( [a, b], tt.join(0, tt.zeros_like(a), tt.ones_like(b)), mode=mode_with_gpu ) f_gpu2 =", "= tt.iscalar() k_symb = tt.iscalar() out = tt.tri(N_symb, M_symb, k_symb,", "np.int32(4), np.int32(7)), correct12=(rand(7), np.int32(4), np.int32(7)), correct13=(rand(7), np.int32(2), np.int32(4), np.int32(7)), correct23=(rand(4,", "self.ignore_topo = ( HostFromGpu, GpuFromHost, theano.compile.DeepCopyOp, GpuDimShuffle, GpuElemwise, tt.opt.Shape_i, tt.opt.MakeVector,", "it works in theano # libgpuarray has a much more", "gpu_op _cases = cases _skip = skip _checks = checks", "variable.dtype != expected.dtype or variable.shape != expected.shape or not TensorType.values_eq_approx(variable,", "not in [\"DebugMode\", \"DEBUG_MODE\"] def shared(x, **kwargs): return gpuarray_shared_constructor(x, target=test_ctx_name,", "M, k != 0 check(dtype, 5, 3, 1) check(dtype, 5,", "for node in f.maker.fgraph.toposort() ] ) o1, o2 = f()", "except Exception as exc: ref_e = exc try: variables =", "gpuarray_shared_constructor self.op = GpuReshape self.mode = mode_with_gpu self.ignore_topo = (", "f.maker.fgraph.apply_nodes if isinstance(node.op, GpuAllocEmpty) ] ) == 1 ) def", "2]) assert o[0].dtype == \"float16\" f = theano.function([], o, mode=self.mode)", "description, check in self.checks.items(): assert check(inputs, variables), ( \"Test %s::%s:", "k, dtype=dtype)) assert result.dtype == np.dtype(dtype) assert any([isinstance(node.op, GpuTri) for", ") % (self.op, testname, description, inputs, variables) Checker.__name__ = name", "%s\" ) % (self.gpu_op, testname, inputs) exc.args += (err_msg,) raise", "10) * 2 - 1, dtype=dtype) check_l(m, 0) check_l(m, 1)", "**kwargs ) except TypeError: continue def rand_gpuarray(*shape, **kwargs): r =", "host_from_gpu, ) from theano.gpuarray.elemwise import GpuDimShuffle, GpuElemwise from theano.gpuarray.subtensor import", "assert np.all(f(v) == (3, 4, 5)) if theano.config.mode != \"FAST_COMPILE\":", "tt.opt.MakeVector) mode = mode_with_gpu.excluding(\"local_shape_to_shape_i\") f = theano.function([x], x.shape, mode=mode) topo", "in [x.op for x in f.maker.fgraph.toposort()] f = theano.function([ca], host_from_gpu(ca).shape,", "tensor_constructor for c in (gpuarray_shared_constructor, tensor_constructor, scalar_constructor): try: return c(", "\"make a Function\" ) % (self.gpu_op, testname) exc.args += (err_msg,)", "return else: err_msg = ( \"Test %s::%s: exception raised during", "in f.maker.fgraph.toposort()]) utt.seed_rng() test_rng = np.random.RandomState(seed=utt.fetch_seed()) for dtype in [\"float64\",", "False))(\"g\") av = np.asarray(rng.rand(5, 4), dtype=\"float32\") gv = gpuarray.array(av, context=get_context(test_ctx_name))", "check_u(m, 0) check_u(m, 1) check_u(m, -1) m = np.asarray(test_rng.rand(10, 10)", "GpuArrayType(dtype=\"float32\", broadcastable=(False, False))(\"g\") av = np.asarray(rng.rand(5, 4), dtype=\"float32\") gv =", "exc: err_msg = ( \"Test %s::%s: Error occurred while making", "tt.opt.MakeVector, ) assert self.op == GpuReshape class TestGPUComparison(TestComparison): def setup_method(self):", "or variable.shape != expected.shape or not TensorType.values_eq_approx(variable, expected) ) assert", "k > M, k > N check(dtype, 5, 3, 3)", "-1) # N < M, k != 0 check(dtype, 3,", "op = staticmethod(_op) gpu_op = staticmethod(_gpu_op) cases = _cases skip", "utt.seed_rng() rng = np.random.RandomState(seed=utt.fetch_seed()) def inplace_func( inputs, outputs, mode=None, allow_input_downcast=False,", "correct13=(rand(7), np.int32(2), np.int32(4), np.int32(7)), correct23=(rand(4, 7), np.int32(2), np.int32(4), np.int32(7)), bad_shape12=(rand(7),", "of MakeVector since there is no MakeVector on GPU self.make_vector_op", "(self.gpu_op, testname, inputs) exc.args += (err_msg,) raise try: f_ref =", "a DeepCopyOp with possibly wrong results on the CPU #", "k_symb = tt.iscalar() out = tt.tri(N_symb, M_symb, k_symb, dtype=dtype) +", "= ( \"Test %s::%s: exception raised during test \" \"call", "= gpuarray_shared_constructor self.dtypes = [\"float64\", \"float32\"] class TestGPUJoinAndSplit(TestJoinAndSplit): def setup_method(self):", "None: err_msg = ( \"Test %s::%s: exception when calling the", "f.maker.fgraph.toposort() assert isinstance(topo[0].op, theano.compile.Shape_i) assert isinstance(topo[1].op, theano.compile.Shape_i) assert isinstance(topo[2].op, tt.opt.MakeVector)", "= staticmethod(_op) gpu_op = staticmethod(_gpu_op) cases = _cases skip =", "[x.op for x in f.maker.fgraph.toposort()] f = theano.function([ca], host_from_gpu(ca).shape, mode=m)", "class TestGPUComparison(TestComparison): def setup_method(self): utt.seed_rng() self.mode = mode_with_gpu self.shared =", "= gpuarray.array(av, context=get_context(test_ctx_name)) mode = mode_with_gpu.excluding( \"cut_gpua_host_transfers\", \"local_cut_gpua_host_gpua\" ) f", "\"cut_gpua_host_transfers\", \"local_cut_gpua_host_gpua\" ) f = theano.function([g], GpuToGpu(test_ctx_name)(g), mode=mode) topo =", "# just gives a DeepCopyOp with possibly wrong results on", "2 assert sum([node.op == tt.join_ for node in f.maker.fgraph.toposort()]) ==", "f = theano.function( [m_symb, k_symb], tt.tril(m_symb, k_symb), mode=mode_with_gpu ) result", "numpy as np import pytest import theano import theano.tensor as", "assert isinstance(topo[0].op, tt.opt.Shape_i) assert isinstance(topo[1].op, tt.opt.Shape_i) assert isinstance(topo[2].op, tt.opt.MakeVector) assert", "err_msg = ( \"Test %s::%s: Error occurred while trying to", "tt.fmatrix(\"a\") i = tt.iscalar(\"i\") a_val = np.asarray(np.random.rand(4, 5), dtype=\"float32\") #", "None: checks = {} _op = op _gpu_op = gpu_op", "np.dtype(dtype) assert any([isinstance(node.op, GpuTri) for node in f.maker.fgraph.toposort()]) utt.seed_rng() test_rng", "!= expected.shape or not TensorType.values_eq_approx(variable, expected) ) assert not condition,", "f(m, k) assert np.allclose(result, np.tril(m, k)) assert result.dtype == np.dtype(dtype)", "correct01=(rand(), np.int32(7)), # just gives a DeepCopyOp with possibly wrong", "class TestGPUJoinAndSplit(TestJoinAndSplit): def setup_method(self): self.mode = mode_with_gpu.excluding(\"constant_folding\") self.join_op = GpuJoin()", "Exception as exc: err_msg = ( \"Test %s::%s: Error occurred", "GpuContiguous) for node in topo]) assert f(a_val, 1).flags.c_contiguous assert f(a_val,", "def test_gpu_tril_triu(): def check_l(m, k=0): m_symb = tt.matrix(dtype=m.dtype) k_symb =", "( variable.dtype != expected.dtype or variable.shape != expected.shape or not", "Function\" ) % (self.gpu_op, testname) exc.args += (err_msg,) raise self.assertFunctionContains1(f_tst,", "== np.dtype(dtype) assert any([isinstance(node.op, GpuTri) for node in f.maker.fgraph.toposort()]) utt.seed_rng()", "GpuReshape class TestGPUComparison(TestComparison): def setup_method(self): utt.seed_rng() self.mode = mode_with_gpu self.shared", "result.dtype == np.dtype(dtype) assert any([isinstance(node.op, GpuEye) for node in f.maker.fgraph.toposort()])", "if len(kwargs) != 0: raise TypeError(\"Unexpected argument %s\", list(kwargs.keys())[0]) return", "check_u(m, 1) check_u(m, -1) m = np.asarray(test_rng.rand(10, 10) * 2", "5, -6) def test_hostfromgpu_shape_i(): # Test that the shape is", "the GPU. op=lambda *args: alloc(*args) + 1, gpu_op=GpuAlloc(test_ctx_name), cases=dict( correct01=(rand(),", "(3, 2) assert out[1].dtype == \"uint64\" assert ( len( [", "def check(dtype, N, M_=None, k=0): # Theano does not accept", "%s::%s: Output %s gave the wrong \" \"value. With inputs", "r = rng.rand(*shape) * 2 - 1 dtype = kwargs.pop(\"dtype\",", "N_symb = tt.iscalar() M_symb = tt.iscalar() k_symb = tt.iscalar() out", "\" \"(inputs were %s, ouputs were %s)\" ) % (self.op,", "in f_gpu.maker.fgraph.toposort()]) == 1 assert ( sum([isinstance(node.op, GpuAlloc) for node", "for x in f.maker.fgraph.toposort()) f = theano.function([a], GpuFromHost(test_ctx_name)(a).shape, mode=m) topo", "in inputs] try: node_ref = safe_make_node(self.op, *inputs_ref) node_tst = safe_make_node(self.op,", "f(av) assert GpuArrayType.values_eq(fv, gv) f = theano.function([g], host_from_gpu(g)) fv =", "assert len(topo) == 1 assert isinstance(topo[0].op, GpuToGpu) fv = f(gv)", "\"float32\", \"int32\", \"float16\"]: # try a big one check(dtype, 1000,", "we make the subtensor on the CPU # to transfer", "theano.function([g], host_from_gpu(g)) fv = f(gv) assert np.all(fv == av) def", "\"Test %s::%s: Error occurred while making \" \"a node with", "raise self.assertFunctionContains1(f_tst, self.gpu_op) ref_e = None try: expecteds = f_ref()", "GpuToGpu(test_ctx_name)(g), mode=mode) topo = f.maker.fgraph.toposort() assert len(topo) == 1 assert", "in f_gpu.maker.fgraph.toposort()]) == 2 ) assert sum([node.op == gpu_join for", "GpuAllocEmpty(\"uint64\", test_ctx_name)(3, 2), ], ) out = f() assert out[0].shape", "# Use join instead of MakeVector since there is no", "== np.dtype(dtype) assert any([isinstance(node.op, GpuTri) for node in f.maker.fgraph.toposort()]) def", "\"float16\"]: # try a big one check(dtype, 1000, 1000, 0)", "to the GPU # Also test float16 computation at the", "\"Test %s::%s: Error occurred while trying to \" \"make a", "== 1 assert ( sum([isinstance(node.op, GpuAlloc) for node in f_gpu2.maker.fgraph.toposort()])", "if M is None: M = N N_symb = tt.iscalar()", "def test_shape(): x = GpuArrayType(dtype=\"float32\", broadcastable=[False, False, False])() v =", "np.int32(7)), bad_shape12=(rand(7), np.int32(7), np.int32(5)), ), ) class TestGPUAlloc(TestAlloc): dtype =", "k = 0 check(dtype, 3, 5) check(dtype, 5, 3) #", "av) def gpu_alloc_expected(x, *shp): g = gpuarray.empty(shp, dtype=x.dtype, context=get_context(test_ctx_name)) g[:]", "= ( \"Test %s::%s: Error occurred while trying to \"", "isinstance(topo[0].op, theano.compile.Shape_i) assert isinstance(topo[1].op, theano.compile.Shape_i) assert isinstance(topo[2].op, tt.opt.MakeVector) assert tuple(f(cv))", "5), dtype=\"float32\") # The reshape is needed otherwise we make", "theano.config.mode != \"FAST_COMPILE\": assert len(topo) == 4 assert isinstance(topo[0].op, tt.opt.Shape_i)", ") for description, check in self.checks.items(): assert check(inputs, variables), (", "( sum([isinstance(node.op, GpuAlloc) for node in f_gpu2.maker.fgraph.toposort()]) == 2 )", "description, inputs, variables) Checker.__name__ = name if hasattr(Checker, \"__qualname__\"): Checker.__qualname__", "f_gpu2.maker.fgraph.toposort()]) == 2 ) assert sum([node.op == gpu_join for node", "theano.tensor.basic import alloc pygpu = pytest.importorskip(\"pygpu\") gpuarray = pygpu.gpuarray utt.seed_rng()", "assert out[0].shape == (3, 2) assert out[0].dtype == \"uint64\" assert", "assert f(a_val, 2).flags.c_contiguous assert f(a_val, 2).flags.c_contiguous class TestGPUReshape(TestReshape): def setup_method(self):", "= gpuarray.array(av, context=get_context(test_ctx_name)) f = theano.function([a], GpuFromHost(test_ctx_name)(a)) fv = f(av)", "1000, 1000, -400) check(dtype, 1000, 1000, 400) check(dtype, 5) #", "av[:, ::2] gv = gv[:, ::2] f = theano.function([a], GpuFromHost(test_ctx_name)(a))", "1000, -400) check(dtype, 1000, 1000, 400) check(dtype, 5) # M", "part of the file from tests import unittest_tools as utt", "on GPU self.make_vector_op = GpuJoin() # this is to avoid", "assert np.allclose(result, np.triu(m, k)) assert result.dtype == np.dtype(dtype) assert any([isinstance(node.op,", "test_gpu_tril_triu(): def check_l(m, k=0): m_symb = tt.matrix(dtype=m.dtype) k_symb = tt.iscalar()", "that the shape is lifted over hostfromgpu m = mode_with_gpu.including(", "exception of the same type we're good. if isinstance(exc, type(ref_e)):", "does not support None as inputs even if this is", "mode_with_gpu.excluding(\"constant_folding\") self.join_op = GpuJoin() self.split_op_class = GpuSplit # Use join", "import test classes otherwise they get tested as part of", "def inplace_func( inputs, outputs, mode=None, allow_input_downcast=False, on_unused_input=\"raise\", name=None, ): if", "i = tt.iscalar(\"i\") a_val = np.asarray(np.random.rand(4, 5), dtype=\"float32\") # The", "f.maker.fgraph.toposort()]) for dtype in [\"float32\", \"int32\", \"float16\"]: check(dtype, 3) #", "for _ in range(len(inputs)): if type(inputs[_]) is float: inputs[_] =", "any([isinstance(node.op, GpuSubtensor) for node in topo]) assert any([isinstance(node.op, GpuContiguous) for", "np.int32(7)), correct23=(rand(4, 7), np.int32(2), np.int32(4), np.int32(7)), bad_shape12=(rand(7), np.int32(7), np.int32(5)), ),", "( \"Test %s::%s: exception raised during test \" \"call was", "broadcastable=(False, False), context_name=test_ctx_name )() av = np.asarray(rng.rand(5, 4), dtype=\"float32\") gv", "classes otherwise they get tested as part of the file", "to ensure that it works in theano # libgpuarray has", "\"float32\" self.hide_error = theano.config.mode not in [\"DebugMode\", \"DEBUG_MODE\"] def shared(x,", "MakeVector on GPU self.make_vector_op = GpuJoin() # this is to", "import pytest import theano import theano.tensor as tt # Don't", "this is # allowed. if M is None: M =", "got %s (dtype %s).\" % ( self.op, testname, i, inputs,", "= GpuArrayType(dtype=\"float32\", broadcastable=(False, False))(\"g\") av = np.asarray(rng.rand(5, 8), dtype=\"float32\") gv", "test_Gpujoin_inplace(): # Test Gpujoin to work inplace. # # This", "assert any([isinstance(node.op, GpuEye) for node in f.maker.fgraph.toposort()]) for dtype in", "= theano.function( [a, b], tt.join(0, tt.zeros_like(a), tt.ones_like(b)) + 4, mode=mode_with_gpu", "isinstance(topo[0].op, tt.opt.Shape_i) assert isinstance(topo[1].op, tt.opt.Shape_i) assert isinstance(topo[2].op, tt.opt.Shape_i) assert isinstance(topo[3].op,", "tt.iscalar() k_symb = tt.iscalar() out = tt.tri(N_symb, M_symb, k_symb, dtype=dtype)", "import ( GpuAlloc, GpuAllocEmpty, GpuContiguous, GpuEye, GpuFromHost, GpuJoin, GpuReshape, GpuSplit,", "4, 5], dtype=theano.config.floatX) x = gpuarray_shared_constructor(data, borrow=True) z = tt.zeros((s,))", "%s).\" % ( self.op, testname, i, inputs, expected, expected.dtype, variable,", "5, 3) # N == M, k != 0 check(dtype,", "= ( \"Test %s::%s: exception when calling the \" \"Function\"", "is there to allow the lift to the GPU. op=lambda", "mode=None, allow_input_downcast=False, on_unused_input=\"raise\", name=None, ): if mode is None: mode", "tt.fmatrix(\"a\") g = GpuArrayType(dtype=\"float32\", broadcastable=(False, False))(\"g\") av = np.asarray(rng.rand(5, 4),", "m.get_value(borrow=True)[2:]) def test_gpujoin_gpualloc(): a = tt.fmatrix(\"a\") a_val = np.asarray(np.random.rand(4, 5),", "1 dtype = kwargs.pop(\"dtype\", theano.config.floatX) cls = kwargs.pop(\"cls\", None) if", "gpu_op = staticmethod(_gpu_op) cases = _cases skip = _skip checks", "= av[:, ::2] gv = gv[:, ::2] f = theano.function([a],", "dtype=dtype) check_l(m, 0) check_l(m, 1) check_l(m, -1) check_u(m, 0) check_u(m,", "class Checker(utt.OptimizationTestMixin): op = staticmethod(_op) gpu_op = staticmethod(_gpu_op) cases =", "GpuArrayType(dtype=\"float32\", broadcastable=[False, False, False])() v = gpuarray.zeros((3, 4, 5), dtype=\"float32\",", "if isinstance(node.op, GpuAllocEmpty) ] ) == 1 ) def test_shape():", "out = tt.eye(N_symb, M_symb, k_symb, dtype=dtype) + np.array(1).astype(dtype) f =", "result.dtype == np.dtype(dtype) assert any([isinstance(node.op, GpuTri) for node in f.maker.fgraph.toposort()])", "5) * 2 - 1, dtype=dtype) check_l(m, 0) check_l(m, 1)", "self.assertFunctionContains1(f_tst, self.gpu_op) ref_e = None try: expecteds = f_ref() except", "theano.function([], o, mode=self.mode) assert any( [ isinstance(node.op, self.split_op_class) for node", "M_=None, k=0): # Theano does not accept None as a", "= _cases skip = _skip checks = _checks def setup_method(self):", "assert isinstance(topo[3].op, tt.opt.MakeVector) mode = mode_with_gpu.excluding(\"local_shape_to_shape_i\") f = theano.function([x], x.shape,", "+= (err_msg,) raise try: f_ref = inplace_func([], node_ref.outputs, mode=mode_nogpu) f_tst", "tt.opt.Shape_i, tt.opt.MakeVector, ) assert self.op == GpuReshape class TestGPUComparison(TestComparison): def", "any(isinstance(x.op, GpuFromHost) for x in f.maker.fgraph.toposort()) f = theano.function([a], GpuFromHost(test_ctx_name)(a).shape,", "self.join_op = GpuJoin() self.split_op_class = GpuSplit # Use join instead", "k > M, -k > N, k > M, k", "== 1 ) def test_shape(): x = GpuArrayType(dtype=\"float32\", broadcastable=[False, False,", "= theano.function([s], theano.Out(c, borrow=True)) if not isinstance(mode_with_gpu, theano.compile.DebugMode): assert x.get_value(borrow=True,", "\".\" + self.__class__.__name__) def test_all(self): if skip: pytest.skip(skip) for testname,", "mode_with_gpu shared = staticmethod(gpuarray_shared_constructor) allocs = [GpuAlloc(test_ctx_name), GpuAlloc(test_ctx_name), tt.Alloc()] def", "4, 5]) def test_gpu_tril_triu(): def check_l(m, k=0): m_symb = tt.matrix(dtype=m.dtype)", "tt.join(0, tt.zeros_like(a), tt.ones_like(b)), mode=mode_with_gpu ) f_gpu2 = theano.function( [a, b],", "while trying to \" \"make a Function\" ) % (self.gpu_op,", "None) if len(kwargs) != 0: raise TypeError(\"Unexpected argument %s\", list(kwargs.keys())[0])", "k_symb), mode=mode_with_gpu ) result = f(m, k) assert np.allclose(result, np.triu(m,", "# k > M, -k > N, k > M,", "f_tst = inplace_func([], node_tst.outputs, mode=mode_gpu) except Exception as exc: err_msg", "2).flags.c_contiguous class TestGPUReshape(TestReshape): def setup_method(self): self.shared = gpuarray_shared_constructor self.op =", "[\"float32\", \"int8\"]: f = theano.function([], GpuAllocEmpty(dt, context_name=test_ctx_name)(2, 3)) assert len(f.maker.fgraph.apply_nodes)", "GpuTri, HostFromGpu, gpu_contiguous, gpu_join, host_from_gpu, ) from theano.gpuarray.elemwise import GpuDimShuffle,", "np import pytest import theano import theano.tensor as tt #", "setup_method(self): eval(self.__class__.__module__ + \".\" + self.__class__.__name__) def test_all(self): if skip:", "for testname, inputs in cases.items(): for _ in range(len(inputs)): if", "== av) def test_transfer_gpu_gpu(): g = GpuArrayType( dtype=\"float32\", broadcastable=(False, False),", "= pygpu.gpuarray utt.seed_rng() rng = np.random.RandomState(seed=utt.fetch_seed()) def inplace_func( inputs, outputs,", "= None try: expecteds = f_ref() except Exception as exc:", "= np.random.RandomState(seed=utt.fetch_seed()) for dtype in [\"float64\", \"float32\", \"float16\"]: # try", "otherwise we make the subtensor on the CPU # to", "mode = mode_with_gpu return theano.function( inputs, outputs, mode=mode, allow_input_downcast=allow_input_downcast, accept_inplace=True,", "GpuSplit, GpuToGpu, GpuTri, HostFromGpu, gpu_contiguous, gpu_join, host_from_gpu, ) from theano.gpuarray.elemwise", "np.asarray(rng.rand(5, 8), dtype=\"float32\") gv = gpuarray.array(av, context=get_context(test_ctx_name)) av = av[:,", "%s (dtype %s).\" % ( self.op, testname, i, inputs, expected,", "check_u(m, -1) def test_gputri(): def check(dtype, N, M_=None, k=0): #", "= GpuArrayType(dtype=\"float32\", broadcastable=[False, False, False])() v = gpuarray.zeros((3, 4, 5),", "non-empty element. s = tt.lscalar() data = np.array([3, 4, 5],", "GpuFromHost) for x in f.maker.fgraph.toposort()) f = theano.function([a], GpuFromHost(test_ctx_name)(a).shape, mode=m)", "np.random.rand(5, 4), dtype=\"float32\", context=get_context(test_ctx_name) ) f = theano.function([a], GpuFromHost(test_ctx_name)(a), mode=m)", "%s)\" % (self.gpu_op, testname, type(exc), type(ref_e)) ) exc.args += (err_msg,)", "av = np.asarray(np.random.rand(5, 4), dtype=\"float32\") cv = gpuarray.asarray( np.random.rand(5, 4),", "tt.tril(m_symb, k_symb), mode=mode_with_gpu ) result = f(m, k) assert np.allclose(result,", "run_case(self, testname, inputs): inputs_ref = [theano.shared(inp) for inp in inputs]", "k != 0 check(dtype, 5, 3, 1) check(dtype, 5, 3,", "expected, expected.dtype, variable, variable.dtype, ) ) for description, check in", "k=0): m_symb = tt.matrix(dtype=m.dtype) k_symb = tt.iscalar() f = theano.function(", "Currently DebugMode does not support None as inputs even if", "np.asarray(np.random.rand(4, 5), dtype=\"float32\") # The reshape is needed otherwise we", "3, 5, 3) check(dtype, 5, 3, -3) check(dtype, 3, 5,", ") def fake_shared(value, name=None, strict=False, allow_downcast=None, **kwargs): from theano.tensor.sharedvar import", "test_transfer_strided(): # This is just to ensure that it works", "rng.rand(*shape) * 2 - 1 dtype = kwargs.pop(\"dtype\", theano.config.floatX) cls", "raise try: f_ref = inplace_func([], node_ref.outputs, mode=mode_nogpu) f_tst = inplace_func([],", "instead of MakeVector since there is no MakeVector on GPU", "tt.fmatrix(\"a\") g = GpuArrayType(dtype=\"float32\", broadcastable=(False, False))(\"g\") av = np.asarray(rng.rand(5, 8),", "[\"float64\", \"float32\", \"int32\", \"float16\"]: # try a big one check(dtype,", "we move the node to the GPU # Also test", "GpuDimShuffle, GpuElemwise from theano.gpuarray.subtensor import GpuSubtensor from theano.gpuarray.type import GpuArrayType,", "function tests the case when several elements are passed to", "return theano.function( inputs, outputs, mode=mode, allow_input_downcast=allow_input_downcast, accept_inplace=True, on_unused_input=on_unused_input, name=name, )", "def setup_method(self): self.mode = mode_with_gpu.excluding(\"constant_folding\") self.join_op = GpuJoin() self.split_op_class =", "mode_with_gpu.excluding(\"local_shape_to_shape_i\") f = theano.function([x], x.shape, mode=mode) topo = f.maker.fgraph.toposort() assert", "f_gpu2 = theano.function( [a, b], tt.join(0, tt.zeros_like(a), tt.ones_like(b)) + 4,", "{} _op = op _gpu_op = gpu_op _cases = cases", "= _checks def setup_method(self): eval(self.__class__.__module__ + \".\" + self.__class__.__name__) def", "to the # Gpujoin function but all except one of", "= gpuarray.empty(shp, dtype=x.dtype, context=get_context(test_ctx_name)) g[:] = x return g TestGpuAlloc", "mode = mode_with_gpu shared = staticmethod(gpuarray_shared_constructor) allocs = [GpuAlloc(test_ctx_name), GpuAlloc(test_ctx_name),", "type(inputs[_]) is float: inputs[_] = np.asarray(inputs[_], dtype=theano.config.floatX) self.run_case(testname, inputs) def", "assert ( sum([isinstance(node.op, GpuAlloc) for node in f_gpu.maker.fgraph.toposort()]) == 2", "+= (err_msg,) raise else: # if we raised an exception", "= f(av) assert GpuArrayType.values_eq(fv, gv) f = theano.function([g], host_from_gpu(g)) fv", "enumerate(zip(variables, expecteds)): condition = ( variable.dtype != expected.dtype or variable.shape", "node in f_gpu2.maker.fgraph.toposort()]) == 1 assert np.allclose(f(a_val, b_val), f_gpu2(a_val, b_val))", ") assert sum([node.op == gpu_join for node in f_gpu.maker.fgraph.toposort()]) ==", "\"int8\"]: f = theano.function([], GpuAllocEmpty(dt, context_name=test_ctx_name)(2, 3)) assert len(f.maker.fgraph.apply_nodes) ==", "theano.compile.DeepCopyOp, GpuDimShuffle, GpuElemwise, tt.opt.Shape_i, tt.opt.MakeVector, ) assert self.op == GpuReshape", "assert np.all(f(v) == (3, 4, 5)) assert len(topo) == 1", "strict=strict, allow_downcast=allow_downcast, **kwargs ) except TypeError: continue def rand_gpuarray(*shape, **kwargs):", "with inputs %s\" ) % (self.gpu_op, testname, inputs) exc.args +=", "tt.iscalar() M_symb = tt.iscalar() k_symb = tt.iscalar() out = tt.tri(N_symb,", "GPU # Also test float16 computation at the same time.", "gpuarray_shared_constructor self.dtypes = [\"float64\", \"float32\"] class TestGPUJoinAndSplit(TestJoinAndSplit): def setup_method(self): self.mode", "the case when several elements are passed to the #", "tt.matrix(dtype=m.dtype) k_symb = tt.iscalar() f = theano.function( [m_symb, k_symb], tt.tril(m_symb,", "(gpuarray_shared_constructor, tensor_constructor, scalar_constructor): try: return c( value, name=name, strict=strict, allow_downcast=allow_downcast,", "a = tt.fmatrix(\"a\") i = tt.iscalar(\"i\") a_val = np.asarray(np.random.rand(4, 5),", "real value. M = M_ # Currently DebugMode does not", "== M, k != 0 check(dtype, 3, 3, 1) check(dtype,", "= theano.function( [a, i], gpu_contiguous(a.reshape((5, 4))[::i]), mode=mode_with_gpu ) topo =", "any([isinstance(node.op, GpuTri) for node in f.maker.fgraph.toposort()]) def check_u(m, k=0): m_symb", "err_msg = ( \"Test %s::%s: Error occurred while making \"", "k, dtype=dtype)) assert result.dtype == np.dtype(dtype) assert any([isinstance(node.op, GpuEye) for", "test_transfer_cpu_gpu(): a = tt.fmatrix(\"a\") g = GpuArrayType(dtype=\"float32\", broadcastable=(False, False))(\"g\") av", "mode=mode) topo = f.maker.fgraph.toposort() assert len(topo) == 1 assert isinstance(topo[0].op,", "assert any([isinstance(node.op, GpuSubtensor) for node in topo]) assert any([isinstance(node.op, GpuContiguous)", "= tt.fmatrix(\"a\") a_val = np.asarray(np.random.rand(4, 5), dtype=\"float32\") b = tt.fmatrix(\"b\")", "tests.gpuarray.config import mode_with_gpu, mode_without_gpu, test_ctx_name from tests.tensor.test_basic import ( TestAlloc,", "f_gpu.maker.fgraph.toposort()]) == 1 assert ( sum([isinstance(node.op, GpuAlloc) for node in", "3, 3, -1) # N < M, k != 0", "for node in f_gpu.maker.fgraph.toposort()]) == 2 ) assert sum([node.op ==", "\" \"call (got: %s, expected %s)\" % (self.gpu_op, testname, type(exc),", "x in f.maker.fgraph.toposort()) f = theano.function([a], GpuFromHost(test_ctx_name)(a).shape, mode=m) topo =", "= GpuSplit # Use join instead of MakeVector since there", "\"float32\", \"float16\"]: # try a big one m = np.asarray(test_rng.rand(5000,", "dtype=theano.config.floatX) self.run_case(testname, inputs) def run_case(self, testname, inputs): inputs_ref = [theano.shared(inp)", "exc: ref_e = exc try: variables = f_tst() except Exception", "GpuAllocEmpty(\"uint64\", test_ctx_name)(3, 2), GpuAllocEmpty(\"uint64\", test_ctx_name)(3, 2), ], ) out =", "# if we raised an exception of the same type", "np.int32(2), np.int32(4), np.int32(7)), correct23=(rand(4, 7), np.int32(2), np.int32(4), np.int32(7)), bad_shape12=(rand(7), np.int32(7),", "self.split_op_class = GpuSplit # Use join instead of MakeVector since", "x.shape, mode=mode) topo = f.maker.fgraph.toposort() assert np.all(f(v) == (3, 4,", "theano.config.floatX) cls = kwargs.pop(\"cls\", None) if len(kwargs) != 0: raise", "variable, variable.dtype, ) ) for description, check in self.checks.items(): assert", "mode=mode_with_gpu ) topo = f.maker.fgraph.toposort() assert any([isinstance(node.op, GpuSubtensor) for node", "of tests to # ensure correctness a = tt.fmatrix(\"a\") g", "gv = gpuarray.array(av, context=get_context(test_ctx_name)) f = theano.function([a], GpuFromHost(test_ctx_name)(a)) fv =", "tt.triu(m_symb, k_symb), mode=mode_with_gpu ) result = f(m, k) assert np.allclose(result,", ") assert sum([node.op == tt.alloc for node in f.maker.fgraph.toposort()]) ==", "needed otherwise we make the subtensor on the CPU #", "TensorType from theano.tensor.basic import alloc pygpu = pytest.importorskip(\"pygpu\") gpuarray =", "as exc: if ref_e is None: err_msg = ( \"Test", "assert self.op == GpuReshape class TestGPUComparison(TestComparison): def setup_method(self): utt.seed_rng() self.mode", "f(gv) assert np.all(fv == av) def gpu_alloc_expected(x, *shp): g =", "N < M, k != 0 check(dtype, 3, 5, 1)", "on the CPU # to transfer less data. f =", "host_from_gpu(ca), mode=m) assert host_from_gpu in [x.op for x in f.maker.fgraph.toposort()]", "are empty. In this case # Gpujoin should work inplace", "fv = f(gv) assert GpuArrayType.values_eq(fv, gv) def test_transfer_strided(): # This", "# allowed. if M is None: M = N N_symb", "output should be the view of the # non-empty element.", "= [\"float64\", \"float32\"] class TestGPUJoinAndSplit(TestJoinAndSplit): def setup_method(self): self.mode = mode_with_gpu.excluding(\"constant_folding\")", "good. if isinstance(exc, type(ref_e)): return else: err_msg = ( \"Test", "5)) assert len(topo) == 1 assert isinstance(topo[0].op, tt.Shape) def test_gpu_contiguous():", "np.int32(5)), ), ) class TestGPUAlloc(TestAlloc): dtype = \"float32\" mode =", "dtype=dtype)) assert result.dtype == np.dtype(dtype) assert any([isinstance(node.op, GpuEye) for node", "= theano.function([g], GpuToGpu(test_ctx_name)(g), mode=mode) topo = f.maker.fgraph.toposort() assert len(topo) ==", "node in f.maker.fgraph.toposort()]) for dtype in [\"float32\", \"int32\", \"float16\"]: check(dtype,", "for dtype in [\"float32\", \"int32\", \"float16\"]: check(dtype, 3) # M", "!= \"FAST_COMPILE\": assert len(topo) == 4 assert isinstance(topo[0].op, tt.opt.Shape_i) assert", "dtype=\"float32\") b = tt.fmatrix(\"b\") b_val = np.asarray(np.random.rand(3, 5), dtype=\"float32\") f", "inputs, outputs, mode=None, allow_input_downcast=False, on_unused_input=\"raise\", name=None, ): if mode is", "since there is no MakeVector on GPU self.make_vector_op = GpuJoin()", "return_internal_type=True) is f(0) assert np.allclose(f(0), [3, 4, 5]) def test_gpu_tril_triu():", "4, 5)) if theano.config.mode != \"FAST_COMPILE\": assert len(topo) == 4", "check(dtype, 3) # M != N, k = 0 check(dtype,", "for node in f_gpu.maker.fgraph.toposort()]) == 1 assert ( sum([isinstance(node.op, GpuAlloc)", "setup_method(self): self.mode = mode_with_gpu.excluding(\"constant_folding\") self.join_op = GpuJoin() self.split_op_class = GpuSplit", "Use join instead of MakeVector since there is no MakeVector", "check(dtype, 3, 5, -1) # N > M, k !=", "GpuEye) for node in f.maker.fgraph.toposort()]) for dtype in [\"float32\", \"int32\",", "if checks is None: checks = {} _op = op", ") from tests.tensor.utils import rand, safe_make_node from theano.gpuarray.basic_ops import (", "c in (gpuarray_shared_constructor, tensor_constructor, scalar_constructor): try: return c( value, name=name,", "mode_with_gpu self.shared = gpuarray_shared_constructor self.dtypes = [\"float64\", \"float32\"] class TestGPUJoinAndSplit(TestJoinAndSplit):", "= GpuJoin() # this is to avoid errors with limited", "on the CPU # correct01_bcast=(rand(1), np.int32(7)), correct02=(rand(), np.int32(4), np.int32(7)), correct12=(rand(7),", "-3) check(dtype, 3, 5, -3) check(dtype, 5, 3, 6) check(dtype,", "== dt f = theano.function( [], [ GpuAllocEmpty(\"uint64\", test_ctx_name)(3, 2),", "checks = _checks def setup_method(self): eval(self.__class__.__module__ + \".\" + self.__class__.__name__)", "same time. rng = np.random.RandomState(seed=utt.fetch_seed()) m = self.shared(rng.rand(4, 6).astype(\"float16\")) o", "is just to ensure that it works in theano #", "assert ( len( [ node for node in f.maker.fgraph.apply_nodes if", "= theano.function([g], host_from_gpu(g)) fv = f(gv) assert np.all(fv == av)", "allow the lift to the GPU. op=lambda *args: alloc(*args) +", "4), dtype=\"float32\") gv = gpuarray.array(av, context=get_context(test_ctx_name)) mode = mode_with_gpu.excluding( \"cut_gpua_host_transfers\",", ") out = f() assert out[0].shape == (3, 2) assert", "= tt.iscalar(\"i\") a_val = np.asarray(np.random.rand(4, 5), dtype=\"float32\") # The reshape", "Output %s gave the wrong \" \"value. With inputs %s,", "\"Test %s::%s: exception raised during test \" \"call was not", "from tests.tensor.utils import rand, safe_make_node from theano.gpuarray.basic_ops import ( GpuAlloc,", "context_name=test_ctx_name )() av = np.asarray(rng.rand(5, 4), dtype=\"float32\") gv = gpuarray.array(av,", "GpuArrayType.values_eq(fv, gv) def test_transfer_strided(): # This is just to ensure", "= N N_symb = tt.iscalar() M_symb = tt.iscalar() k_symb =", "-6) def test_hostfromgpu_shape_i(): # Test that the shape is lifted", "> M, k != 0 check(dtype, 5, 3, 1) check(dtype,", "theano.function([a], GpuFromHost(test_ctx_name)(a).shape, mode=m) topo = f.maker.fgraph.toposort() assert isinstance(topo[0].op, tt.opt.Shape_i) assert", "sum([isinstance(node.op, GpuAlloc) for node in f_gpu.maker.fgraph.toposort()]) == 2 ) assert", "tt.iscalar() out = tt.tri(N_symb, M_symb, k_symb, dtype=dtype) + np.array(1).astype(dtype) f", "= f.maker.fgraph.toposort() assert np.all(f(v) == (3, 4, 5)) if theano.config.mode", "np.int32(4), np.int32(7)), correct13=(rand(7), np.int32(2), np.int32(4), np.int32(7)), correct23=(rand(4, 7), np.int32(2), np.int32(4),", "the node to the GPU # Also test float16 computation", "np.allclose(result, np.tril(m, k)) assert result.dtype == np.dtype(dtype) assert any([isinstance(node.op, GpuTri)", "(err_msg,) raise try: f_ref = inplace_func([], node_ref.outputs, mode=mode_nogpu) f_tst =", "_checks = checks class Checker(utt.OptimizationTestMixin): op = staticmethod(_op) gpu_op =", "while making \" \"a node with inputs %s\" ) %", "checks is None: checks = {} _op = op _gpu_op", "= theano.function([a], GpuFromHost(test_ctx_name)(a).shape, mode=m) topo = f.maker.fgraph.toposort() assert isinstance(topo[0].op, tt.opt.Shape_i)", "just to ensure that it works in theano # libgpuarray", "type(ref_e)): return else: err_msg = ( \"Test %s::%s: exception raised", "# correct01_bcast=(rand(1), np.int32(7)), correct02=(rand(), np.int32(4), np.int32(7)), correct12=(rand(7), np.int32(4), np.int32(7)), correct13=(rand(7),", "allocs = [GpuAlloc(test_ctx_name), GpuAlloc(test_ctx_name), tt.Alloc()] def test_alloc_empty(): for dt in", "\"Test %s::%s: Output %s gave the wrong \" \"value. With", "assert o[0].dtype == \"float16\" f = theano.function([], o, mode=self.mode) assert", "GpuAlloc, GpuAllocEmpty, GpuContiguous, GpuEye, GpuFromHost, GpuJoin, GpuReshape, GpuSplit, GpuToGpu, GpuTri,", "# libgpuarray has a much more comprehensive suit of tests", "test_shape(): x = GpuArrayType(dtype=\"float32\", broadcastable=[False, False, False])() v = gpuarray.zeros((3,", "tuple(f(av)) == (5, 4) f = theano.function([ca], host_from_gpu(ca), mode=m) assert", "cases=dict( correct01=(rand(), np.int32(7)), # just gives a DeepCopyOp with possibly", "self.mode = mode_with_gpu self.ignore_topo = ( HostFromGpu, GpuFromHost, theano.compile.DeepCopyOp, GpuDimShuffle,", "is f(0) assert np.allclose(f(0), [3, 4, 5]) def test_gpu_tril_triu(): def", "theano.function( [m_symb, k_symb], tt.tril(m_symb, k_symb), mode=mode_with_gpu ) result = f(m,", "N, M_=None, k=0): # Theano does not accept None as", "isinstance(topo[0].op, tt.Shape) def test_gpu_contiguous(): a = tt.fmatrix(\"a\") i = tt.iscalar(\"i\")", "inputs] try: node_ref = safe_make_node(self.op, *inputs_ref) node_tst = safe_make_node(self.op, *inputs_tst)", "during test \" \"call was not the same as the", "tests.tensor.test_basic import ( TestAlloc, TestComparison, TestJoinAndSplit, TestReshape, ) from tests.tensor.utils", "correctness a = tt.fmatrix(\"a\") g = GpuArrayType(dtype=\"float32\", broadcastable=(False, False))(\"g\") av", "(self.gpu_op, testname) exc.args += (err_msg,) raise else: # if we", "k_symb = tt.iscalar() out = tt.eye(N_symb, M_symb, k_symb, dtype=dtype) +", "np.all(f(v) == (3, 4, 5)) assert len(topo) == 1 assert", "f = theano.function( [m_symb, k_symb], tt.triu(m_symb, k_symb), mode=mode_with_gpu ) result", "tt.ones_like(b)) + 4, mode=mode_without_gpu ) f_gpu = theano.function( [a, b],", "f_gpu.maker.fgraph.toposort()]) == 2 ) assert sum([node.op == gpu_join for node", "1) check_u(m, -1) m = np.asarray(test_rng.rand(10, 5) * 2 -", "range(len(inputs)): if type(inputs[_]) is float: inputs[_] = np.asarray(inputs[_], dtype=theano.config.floatX) self.run_case(testname,", "[\"float64\", \"float32\"] class TestGPUJoinAndSplit(TestJoinAndSplit): def setup_method(self): self.mode = mode_with_gpu.excluding(\"constant_folding\") self.join_op", "for node in f.maker.fgraph.apply_nodes if isinstance(node.op, GpuAllocEmpty) ] ) ==", "= ( variable.dtype != expected.dtype or variable.shape != expected.shape or", "isinstance(topo[2].op, tt.opt.MakeVector) assert tuple(f(cv)) == (5, 4) def test_Gpujoin_inplace(): #", "node_ref.outputs, mode=mode_nogpu) f_tst = inplace_func([], node_tst.outputs, mode=mode_gpu) except Exception as", "of them are empty. In this case # Gpujoin should", "f = theano.function([a], GpuFromHost(test_ctx_name)(a)) fv = f(av) assert GpuArrayType.values_eq(fv, gv)", "GpuDimShuffle, GpuElemwise, tt.opt.Shape_i, tt.opt.MakeVector, ) assert self.op == GpuReshape class", "assert sum([node.op == gpu_join for node in f_gpu.maker.fgraph.toposort()]) == 1", "theano.gpuarray.type import GpuArrayType, get_context, gpuarray_shared_constructor from theano.tensor import TensorType from", "from tests import unittest_tools as utt from tests.gpuarray.config import mode_with_gpu,", "\"Test %s::%s: Failed check: %s \" \"(inputs were %s, ouputs", "= tt.iscalar() k_symb = tt.iscalar() out = tt.eye(N_symb, M_symb, k_symb,", "= np.asarray(test_rng.rand(10, 5) * 2 - 1, dtype=dtype) check_l(m, 0)", "topo = f.maker.fgraph.toposort() assert isinstance(topo[0].op, tt.opt.Shape_i) assert isinstance(topo[1].op, tt.opt.Shape_i) assert", "# non-empty element. s = tt.lscalar() data = np.array([3, 4,", "4, 5)) assert len(topo) == 1 assert isinstance(topo[0].op, tt.Shape) def", "( HostFromGpu, GpuFromHost, theano.compile.DeepCopyOp, GpuDimShuffle, GpuElemwise, tt.opt.Shape_i, tt.opt.MakeVector, ) assert", "data. f = theano.function( [a, i], gpu_contiguous(a.reshape((5, 4))[::i]), mode=mode_with_gpu )", "test classes otherwise they get tested as part of the", "DebugMode does not support None as inputs even if this", "!= 0 check(dtype, 5, 3, 1) check(dtype, 5, 3, -1)", "\"FAST_COMPILE\": assert len(topo) == 4 assert isinstance(topo[0].op, tt.opt.Shape_i) assert isinstance(topo[1].op,", "out = tt.tri(N_symb, M_symb, k_symb, dtype=dtype) + np.array(1).astype(dtype) f =", "= theano.function( [a, b], tt.join(0, tt.zeros_like(a), tt.ones_like(b)), mode=mode_with_gpu ) f_gpu2", "c( value, name=name, strict=strict, allow_downcast=allow_downcast, **kwargs ) except TypeError: continue", "check_l(m, 0) check_l(m, 1) check_l(m, -1) check_u(m, 0) check_u(m, 1)", "np.all(fv == av) def test_transfer_gpu_gpu(): g = GpuArrayType( dtype=\"float32\", broadcastable=(False,", "b_val)) def test_gpueye(): def check(dtype, N, M_=None, k=0): # Theano", "check: %s \" \"(inputs were %s, ouputs were %s)\" )", "assert isinstance(topo[2].op, tt.opt.MakeVector) assert tuple(f(cv)) == (5, 4) def test_Gpujoin_inplace():", "check(dtype, 3, 5) check(dtype, 5, 3) # N == M,", "dtype in [\"float64\", \"float32\", \"float16\"]: # try a big one", "mode = mode_with_gpu.excluding( \"cut_gpua_host_transfers\", \"local_cut_gpua_host_gpua\" ) f = theano.function([g], GpuToGpu(test_ctx_name)(g),", "node in f.maker.fgraph.toposort()]) == 1 assert ( sum([isinstance(node.op, GpuAlloc) for", "np.dtype(dtype) assert any([isinstance(node.op, GpuTri) for node in f.maker.fgraph.toposort()]) def check_u(m,", "any([isinstance(node.op, GpuContiguous) for node in topo]) assert f(a_val, 1).flags.c_contiguous assert", "exception when calling the \" \"Function\" ) % (self.gpu_op, testname)", "variables), ( \"Test %s::%s: Failed check: %s \" \"(inputs were", "import theano.tensor as tt # Don't import test classes otherwise", "k_symb, dtype=dtype) + np.array(1).astype(dtype) f = theano.function([N_symb, M_symb, k_symb], out,", "M_symb = tt.iscalar() k_symb = tt.iscalar() out = tt.eye(N_symb, M_symb,", "the CPU # correct01_bcast=(rand(1), np.int32(7)), correct02=(rand(), np.int32(4), np.int32(7)), correct12=(rand(7), np.int32(4),", "Error occurred while making \" \"a node with inputs %s\"", "-1) # N > M, k != 0 check(dtype, 5,", "on_unused_input=\"raise\", name=None, ): if mode is None: mode = mode_with_gpu", "there to allow the lift to the GPU. op=lambda *args:", "f = theano.function([s], theano.Out(c, borrow=True)) if not isinstance(mode_with_gpu, theano.compile.DebugMode): assert", "theano.function( [m_symb, k_symb], tt.triu(m_symb, k_symb), mode=mode_with_gpu ) result = f(m,", "inp in inputs] try: node_ref = safe_make_node(self.op, *inputs_ref) node_tst =", "o[0].dtype == \"float16\" f = theano.function([], o, mode=self.mode) assert any(", "k_symb = tt.iscalar() f = theano.function( [m_symb, k_symb], tt.tril(m_symb, k_symb),", "tt.lscalar() data = np.array([3, 4, 5], dtype=theano.config.floatX) x = gpuarray_shared_constructor(data,", "= theano.function([], GpuAllocEmpty(dt, context_name=test_ctx_name)(2, 3)) assert len(f.maker.fgraph.apply_nodes) == 1 out", "for node in topo]) assert f(a_val, 1).flags.c_contiguous assert f(a_val, 2).flags.c_contiguous", "check(dtype, 5, 3, -3) check(dtype, 3, 5, -3) check(dtype, 5,", "= tt.tri(N_symb, M_symb, k_symb, dtype=dtype) + np.array(1).astype(dtype) f = theano.function([N_symb,", "5), dtype=\"float32\") b = tt.fmatrix(\"b\") b_val = np.asarray(np.random.rand(3, 5), dtype=\"float32\")", "assert out[1].dtype == \"uint64\" assert ( len( [ node for", "= tt.iscalar() f = theano.function( [m_symb, k_symb], tt.tril(m_symb, k_symb), mode=mode_with_gpu", "try a big one check(dtype, 1000, 1000, 0) check(dtype, 1000,", "op _gpu_op = gpu_op _cases = cases _skip = skip", "a Function\" ) % (self.gpu_op, testname) exc.args += (err_msg,) raise", "5) # M != N, k = 0 check(dtype, 3,", "M_symb, k_symb, dtype=dtype) + np.array(1).astype(dtype) f = theano.function([N_symb, M_symb, k_symb],", "= tt.iscalar() f = theano.function( [m_symb, k_symb], tt.triu(m_symb, k_symb), mode=mode_with_gpu", "but all except one of them are empty. In this", "k_symb = tt.iscalar() f = theano.function( [m_symb, k_symb], tt.triu(m_symb, k_symb),", "-1) m = np.asarray(test_rng.rand(10, 10) * 2 - 1, dtype=dtype)", "> N check(dtype, 5, 3, 3) check(dtype, 3, 5, 3)", "ca = theano.gpuarray.type.GpuArrayType(\"float32\", (False, False))() av = np.asarray(np.random.rand(5, 4), dtype=\"float32\")", "a_val = np.asarray(np.random.rand(4, 5), dtype=\"float32\") b = tt.fmatrix(\"b\") b_val =", "one of them are empty. In this case # Gpujoin", "% (self.gpu_op, testname, inputs) exc.args += (err_msg,) raise try: f_ref", "is None: mode = mode_with_gpu return theano.function( inputs, outputs, mode=mode,", "TypeError(\"Unexpected argument %s\", list(kwargs.keys())[0]) return gpuarray.array(r, dtype=dtype, cls=cls, context=get_context(test_ctx_name)) def", "[\"float64\", \"float32\", \"float16\"]: # try a big one m =", "eval(self.__class__.__module__ + \".\" + self.__class__.__name__) def test_all(self): if skip: pytest.skip(skip)", "makeTester( name=\"GpuAllocTester\", # The +1 is there to allow the", "out.dtype == dt f = theano.function( [], [ GpuAllocEmpty(\"uint64\", test_ctx_name)(3,", "move the node to the GPU # Also test float16", "accept None as a tensor. # So we must use", "GpuTri) for node in f.maker.fgraph.toposort()]) def check_u(m, k=0): m_symb =", "def test_hostfromgpu_shape_i(): # Test that the shape is lifted over", "] ) == 1 ) def test_shape(): x = GpuArrayType(dtype=\"float32\",", "tensor_constructor, scalar_constructor): try: return c( value, name=name, strict=strict, allow_downcast=allow_downcast, **kwargs", "[3, 4, 5]) def test_gpu_tril_triu(): def check_l(m, k=0): m_symb =", "def test_gpu_contiguous(): a = tt.fmatrix(\"a\") i = tt.iscalar(\"i\") a_val =", "% (self.gpu_op, testname) exc.args += (err_msg,) raise self.assertFunctionContains1(f_tst, self.gpu_op) ref_e", "a real value. M = M_ # Currently DebugMode does", "2 - 1, dtype=dtype) check_l(m, 0) check_l(m, 1) check_l(m, -1)", "from theano.gpuarray.type import GpuArrayType, get_context, gpuarray_shared_constructor from theano.tensor import TensorType", "f = theano.function([ca], host_from_gpu(ca).shape, mode=m) topo = f.maker.fgraph.toposort() assert isinstance(topo[0].op,", "f = theano.function([N_symb, M_symb, k_symb], out, mode=mode_with_gpu) result = np.asarray(f(N,", "\"float16\" f = theano.function([], o, mode=self.mode) assert any( [ isinstance(node.op,", "node_tst = safe_make_node(self.op, *inputs_tst) except Exception as exc: err_msg =", "pygpu.gpuarray utt.seed_rng() rng = np.random.RandomState(seed=utt.fetch_seed()) def inplace_func( inputs, outputs, mode=None,", "same as the reference \" \"call (got: %s, expected %s)\"", "assert any([isinstance(node.op, GpuTri) for node in f.maker.fgraph.toposort()]) utt.seed_rng() test_rng =", "= mode_with_gpu return theano.function( inputs, outputs, mode=mode, allow_input_downcast=allow_input_downcast, accept_inplace=True, on_unused_input=on_unused_input,", "scalar_constructor): try: return c( value, name=name, strict=strict, allow_downcast=allow_downcast, **kwargs )", "< M, k != 0 check(dtype, 3, 5, 1) check(dtype,", "more comprehensive suit of tests to # ensure correctness a", "tt.opt.MakeVector) assert tuple(f(av)) == (5, 4) f = theano.function([ca], host_from_gpu(ca),", "any([isinstance(node.op, GpuEye) for node in f.maker.fgraph.toposort()]) for dtype in [\"float32\",", "# try a big one m = np.asarray(test_rng.rand(5000, 5000) *", "lift to the GPU. op=lambda *args: alloc(*args) + 1, gpu_op=GpuAlloc(test_ctx_name),", "check(dtype, 5, 3, 3) check(dtype, 3, 5, 3) check(dtype, 5,", "check_l(m, 1) check_l(m, -1) check_u(m, 0) check_u(m, 1) check_u(m, -1)", "False))(\"g\") av = np.asarray(rng.rand(5, 8), dtype=\"float32\") gv = gpuarray.array(av, context=get_context(test_ctx_name))", "work inplace and the output should be the view of", "assert isinstance(topo[2].op, tt.opt.Shape_i) assert isinstance(topo[3].op, tt.opt.MakeVector) mode = mode_with_gpu.excluding(\"local_shape_to_shape_i\") f", "name=name, ) def fake_shared(value, name=None, strict=False, allow_downcast=None, **kwargs): from theano.tensor.sharedvar", "we must use a real value. M = M_ #", "reference \" \"call (got: %s, expected %s)\" % (self.gpu_op, testname,", "g[:] = x return g TestGpuAlloc = makeTester( name=\"GpuAllocTester\", #", "[a, b], tt.join(0, tt.zeros_like(a), tt.ones_like(b)) + 4, mode=mode_with_gpu ) assert", "mode=mode_gpu) except Exception as exc: err_msg = ( \"Test %s::%s:", "broadcastable=[False, False, False])() v = gpuarray.zeros((3, 4, 5), dtype=\"float32\", context=get_context(test_ctx_name))", "-1) def test_gputri(): def check(dtype, N, M_=None, k=0): # Theano", "inputs[_] = np.asarray(inputs[_], dtype=theano.config.floatX) self.run_case(testname, inputs) def run_case(self, testname, inputs):", ") f_gpu2 = theano.function( [a, b], tt.join(0, tt.zeros_like(a), tt.ones_like(b)) +", "case # Gpujoin should work inplace and the output should", "mode = mode_with_gpu.excluding(\"local_shape_to_shape_i\") f = theano.function([x], x.shape, mode=mode) topo =", "inplace_func([], node_ref.outputs, mode=mode_nogpu) f_tst = inplace_func([], node_tst.outputs, mode=mode_gpu) except Exception", "assert f(a_val, 1).flags.c_contiguous assert f(a_val, 2).flags.c_contiguous assert f(a_val, 2).flags.c_contiguous class", "In this case # Gpujoin should work inplace and the", "== np.dtype(dtype) assert any([isinstance(node.op, GpuTri) for node in f.maker.fgraph.toposort()]) for", "tt.ones_like(b)) + 4, mode=mode_with_gpu ) assert sum([node.op == tt.alloc for", "np.allclose(result, np.tri(N, M_, k, dtype=dtype)) assert result.dtype == np.dtype(dtype) assert", "a much more comprehensive suit of tests to # ensure", "theano.function( inputs, outputs, mode=mode, allow_input_downcast=allow_input_downcast, accept_inplace=True, on_unused_input=on_unused_input, name=name, ) def", "GpuArrayType, get_context, gpuarray_shared_constructor from theano.tensor import TensorType from theano.tensor.basic import", "get tested as part of the file from tests import", "import GpuDimShuffle, GpuElemwise from theano.gpuarray.subtensor import GpuSubtensor from theano.gpuarray.type import", "TypeError: continue def rand_gpuarray(*shape, **kwargs): r = rng.rand(*shape) * 2", ") assert not condition, ( \"Test %s::%s: Output %s gave", "assert tuple(f(cv)) == (5, 4) def test_Gpujoin_inplace(): # Test Gpujoin", "for node in f.maker.fgraph.toposort()]) == 2 assert sum([node.op == tt.join_", "out[1].dtype == \"uint64\" assert ( len( [ node for node", "theano.function([s], theano.Out(c, borrow=True)) if not isinstance(mode_with_gpu, theano.compile.DebugMode): assert x.get_value(borrow=True, return_internal_type=True)", "assert any(isinstance(x.op, GpuFromHost) for x in f.maker.fgraph.toposort()) f = theano.function([a],", "theano.function( [a, b], tt.join(0, tt.zeros_like(a), tt.ones_like(b)) + 4, mode=mode_without_gpu )", "view of the # non-empty element. s = tt.lscalar() data", "2 ) assert sum([node.op == gpu_join for node in f_gpu2.maker.fgraph.toposort()])", "Error occurred while trying to \" \"make a Function\" )", "np.allclose(result, np.triu(m, k)) assert result.dtype == np.dtype(dtype) assert any([isinstance(node.op, GpuTri)", "-k > N, k > M, k > N check(dtype,", "M, k)) - np.array(1).astype(dtype) assert np.allclose(result, np.eye(N, M_, k, dtype=dtype))", "join instead of MakeVector since there is no MakeVector on", "**kwargs): from theano.tensor.sharedvar import scalar_constructor, tensor_constructor for c in (gpuarray_shared_constructor,", "GpuSubtensor from theano.gpuarray.type import GpuArrayType, get_context, gpuarray_shared_constructor from theano.tensor import", "# Test Gpujoin to work inplace. # # This function", "np.allclose(f(a_val, b_val), f_gpu2(a_val, b_val)) def test_gpueye(): def check(dtype, N, M_=None,", "np.int32(4), np.int32(7)), bad_shape12=(rand(7), np.int32(7), np.int32(5)), ), ) class TestGPUAlloc(TestAlloc): dtype", "= x return g TestGpuAlloc = makeTester( name=\"GpuAllocTester\", # The", "k) assert np.allclose(result, np.tril(m, k)) assert result.dtype == np.dtype(dtype) assert", "elements are passed to the # Gpujoin function but all", "x in f.maker.fgraph.toposort()] f = theano.function([ca], host_from_gpu(ca).shape, mode=m) topo =", "from theano.tensor.sharedvar import scalar_constructor, tensor_constructor for c in (gpuarray_shared_constructor, tensor_constructor,", "3, 5, -3) check(dtype, 5, 3, 6) check(dtype, 3, 5,", "* 2 - 1, dtype=dtype) check_l(m, 0) check_l(m, 1) check_l(m,", "\"Function\" ) % (self.gpu_op, testname) exc.args += (err_msg,) raise else:", "test float16 computation at the same time. rng = np.random.RandomState(seed=utt.fetch_seed())", "The reshape is needed otherwise we make the subtensor on", "theano.function( [a, b], tt.join(0, tt.zeros_like(a), tt.ones_like(b)) + 4, mode=mode_with_gpu )", "np.asarray(inputs[_], dtype=theano.config.floatX) self.run_case(testname, inputs) def run_case(self, testname, inputs): inputs_ref =", "(got: %s, expected %s)\" % (self.gpu_op, testname, type(exc), type(ref_e)) )", "def run_case(self, testname, inputs): inputs_ref = [theano.shared(inp) for inp in", "(err_msg,) raise for i, (variable, expected) in enumerate(zip(variables, expecteds)): condition", ") result = f(m, k) assert np.allclose(result, np.tril(m, k)) assert", "theano.tensor as tt # Don't import test classes otherwise they", "[2, 2]) assert o[0].dtype == \"float16\" f = theano.function([], o,", "= skip _checks = checks class Checker(utt.OptimizationTestMixin): op = staticmethod(_op)", "m = np.asarray(test_rng.rand(10, 10) * 2 - 1, dtype=dtype) check_l(m,", "in f.maker.fgraph.toposort()) f = theano.function([a], GpuFromHost(test_ctx_name)(a).shape, mode=m) topo = f.maker.fgraph.toposort()", "\" \"(dtype %s), got %s (dtype %s).\" % ( self.op,", "> M, k > N check(dtype, 5, 3, 3) check(dtype,", "k != 0 check(dtype, 3, 5, 1) check(dtype, 3, 5,", "testname, inputs) exc.args += (err_msg,) raise try: f_ref = inplace_func([],", "N, k = 0 check(dtype, 3, 5) check(dtype, 5, 3)", "tt.zeros((s,)) join = GpuJoin(view=0) c = join(0, x, z) f", "(3, 4, 5)) assert len(topo) == 1 assert isinstance(topo[0].op, tt.Shape)", "[a, b], tt.join(0, tt.zeros_like(a), tt.ones_like(b)), mode=mode_with_gpu ) f_gpu2 = theano.function(", "fv = f(gv) assert np.all(fv == av) def test_transfer_gpu_gpu(): g", "GpuToGpu) fv = f(gv) assert GpuArrayType.values_eq(fv, gv) def test_transfer_strided(): #", "tt.opt.Shape_i) assert isinstance(topo[3].op, tt.opt.MakeVector) mode = mode_with_gpu.excluding(\"local_shape_to_shape_i\") f = theano.function([x],", "err_msg = ( \"Test %s::%s: exception when calling the \"", "correct01_bcast=(rand(1), np.int32(7)), correct02=(rand(), np.int32(4), np.int32(7)), correct12=(rand(7), np.int32(4), np.int32(7)), correct13=(rand(7), np.int32(2),", "np.asarray(np.random.rand(4, 5), dtype=\"float32\") b = tt.fmatrix(\"b\") b_val = np.asarray(np.random.rand(3, 5),", "assert isinstance(topo[0].op, theano.compile.Shape_i) assert isinstance(topo[1].op, theano.compile.Shape_i) assert isinstance(topo[2].op, tt.opt.MakeVector) assert", "f = theano.function( [], [ GpuAllocEmpty(\"uint64\", test_ctx_name)(3, 2), GpuAllocEmpty(\"uint64\", test_ctx_name)(3,", "M_, k, dtype=dtype)) assert result.dtype == np.dtype(dtype) assert any([isinstance(node.op, GpuTri)", "5), dtype=\"float32\") f = theano.function( [a, b], tt.join(0, tt.zeros_like(a), tt.ones_like(b))", "when several elements are passed to the # Gpujoin function", "were %s)\" ) % (self.op, testname, description, inputs, variables) Checker.__name__", "topo = f.maker.fgraph.toposort() assert any([isinstance(node.op, GpuSubtensor) for node in topo])", "4), dtype=\"float32\", context=get_context(test_ctx_name) ) f = theano.function([a], GpuFromHost(test_ctx_name)(a), mode=m) assert", "= staticmethod(_gpu_op) cases = _cases skip = _skip checks =", "= safe_make_node(self.op, *inputs_tst) except Exception as exc: err_msg = (", "check(inputs, variables), ( \"Test %s::%s: Failed check: %s \" \"(inputs", "check(dtype, 1000, 1000, -400) check(dtype, 1000, 1000, 400) check(dtype, 5)", "class TestGPUReshape(TestReshape): def setup_method(self): self.shared = gpuarray_shared_constructor self.op = GpuReshape", "2 - 1 dtype = kwargs.pop(\"dtype\", theano.config.floatX) cls = kwargs.pop(\"cls\",", ") % (self.gpu_op, testname) exc.args += (err_msg,) raise self.assertFunctionContains1(f_tst, self.gpu_op)", "not condition, ( \"Test %s::%s: Output %s gave the wrong", "x.shape) topo = f.maker.fgraph.toposort() assert np.all(f(v) == (3, 4, 5))", "mode=mode) topo = f.maker.fgraph.toposort() assert np.all(f(v) == (3, 4, 5))", "join(0, x, z) f = theano.function([s], theano.Out(c, borrow=True)) if not", "in [\"float32\", \"int8\"]: f = theano.function([], GpuAllocEmpty(dt, context_name=test_ctx_name)(2, 3)) assert", "self.dtypes = [\"float64\", \"float32\"] class TestGPUJoinAndSplit(TestJoinAndSplit): def setup_method(self): self.mode =", "gpuarray_shared_constructor(x, target=test_ctx_name, **kwargs) self.shared = shared def test_gpusplit_opt(self): # Test", ") from theano.gpuarray.elemwise import GpuDimShuffle, GpuElemwise from theano.gpuarray.subtensor import GpuSubtensor", "= rng.rand(*shape) * 2 - 1 dtype = kwargs.pop(\"dtype\", theano.config.floatX)", "\"(dtype %s), got %s (dtype %s).\" % ( self.op, testname,", "allow_input_downcast=allow_input_downcast, accept_inplace=True, on_unused_input=on_unused_input, name=name, ) def fake_shared(value, name=None, strict=False, allow_downcast=None,", "mode=mode_with_gpu ) f_gpu2 = theano.function( [a, b], tt.join(0, tt.zeros_like(a), tt.ones_like(b))", "tt.zeros_like(a), tt.ones_like(b)) + 4, mode=mode_without_gpu ) f_gpu = theano.function( [a,", "the output should be the view of the # non-empty", "- np.array(1).astype(dtype) assert np.allclose(result, np.eye(N, M_, k, dtype=dtype)) assert result.dtype", "node for node in f.maker.fgraph.apply_nodes if isinstance(node.op, GpuAllocEmpty) ] )", "# Don't import test classes otherwise they get tested as", "raise else: # if we raised an exception of the", "%s, expected %s \" \"(dtype %s), got %s (dtype %s).\"", "topo = f.maker.fgraph.toposort() assert np.all(f(v) == (3, 4, 5)) assert", "node_ref = safe_make_node(self.op, *inputs_ref) node_tst = safe_make_node(self.op, *inputs_tst) except Exception", "1) check_l(m, -1) check_u(m, 0) check_u(m, 1) check_u(m, -1) def", "= gpu_op _cases = cases _skip = skip _checks =", "= tt.eye(N_symb, M_symb, k_symb, dtype=dtype) + np.array(1).astype(dtype) f = theano.function([N_symb,", "name=None, ): if mode is None: mode = mode_with_gpu return", "def test_transfer_strided(): # This is just to ensure that it", "dtype=dtype)) assert result.dtype == np.dtype(dtype) assert any([isinstance(node.op, GpuTri) for node", "node in f_gpu.maker.fgraph.toposort()]) == 2 ) assert sum([node.op == gpu_join", "HostFromGpu, gpu_contiguous, gpu_join, host_from_gpu, ) from theano.gpuarray.elemwise import GpuDimShuffle, GpuElemwise", "= kwargs.pop(\"dtype\", theano.config.floatX) cls = kwargs.pop(\"cls\", None) if len(kwargs) !=", "np.int32(7)), correct02=(rand(), np.int32(4), np.int32(7)), correct12=(rand(7), np.int32(4), np.int32(7)), correct13=(rand(7), np.int32(2), np.int32(4),", "dtype=theano.config.floatX) x = gpuarray_shared_constructor(data, borrow=True) z = tt.zeros((s,)) join =", "not TensorType.values_eq_approx(variable, expected) ) assert not condition, ( \"Test %s::%s:", "alloc(*args) + 1, gpu_op=GpuAlloc(test_ctx_name), cases=dict( correct01=(rand(), np.int32(7)), # just gives", "eps=1e-10, ): if checks is None: checks = {} _op", "== 2 ) assert sum([node.op == gpu_join for node in", "assert any( [ isinstance(node.op, self.split_op_class) for node in f.maker.fgraph.toposort() ]", "gpuarray_shared_constructor(data, borrow=True) z = tt.zeros((s,)) join = GpuJoin(view=0) c =", "GpuElemwise, tt.opt.Shape_i, tt.opt.MakeVector, ) assert self.op == GpuReshape class TestGPUComparison(TestComparison):", "on_unused_input=on_unused_input, name=name, ) def fake_shared(value, name=None, strict=False, allow_downcast=None, **kwargs): from", "theano.function([ca], host_from_gpu(ca), mode=m) assert host_from_gpu in [x.op for x in", "raise for i, (variable, expected) in enumerate(zip(variables, expecteds)): condition =", "self.mode = mode_with_gpu.excluding(\"constant_folding\") self.join_op = GpuJoin() self.split_op_class = GpuSplit #", "for node in f.maker.fgraph.toposort()]) def check_u(m, k=0): m_symb = tt.matrix(dtype=m.dtype)", "np.int32(7)), # just gives a DeepCopyOp with possibly wrong results", "g = GpuArrayType(dtype=\"float32\", broadcastable=(False, False))(\"g\") av = np.asarray(rng.rand(5, 8), dtype=\"float32\")", "4), dtype=\"float32\") gv = gpuarray.array(av, context=get_context(test_ctx_name)) f = theano.function([a], GpuFromHost(test_ctx_name)(a))", "tt.Shape) def test_gpu_contiguous(): a = tt.fmatrix(\"a\") i = tt.iscalar(\"i\") a_val", "5]) def test_gpu_tril_triu(): def check_l(m, k=0): m_symb = tt.matrix(dtype=m.dtype) k_symb", "exc: if ref_e is None: err_msg = ( \"Test %s::%s:", "try: expecteds = f_ref() except Exception as exc: ref_e =", "== np.dtype(dtype) assert any([isinstance(node.op, GpuEye) for node in f.maker.fgraph.toposort()]) for", "Gpujoin should work inplace and the output should be the", "inputs in cases.items(): for _ in range(len(inputs)): if type(inputs[_]) is", "c = join(0, x, z) f = theano.function([s], theano.Out(c, borrow=True))", "name, op, gpu_op, cases, checks=None, mode_gpu=mode_with_gpu, mode_nogpu=mode_without_gpu, skip=False, eps=1e-10, ):", "+ self.__class__.__name__) def test_all(self): if skip: pytest.skip(skip) for testname, inputs", "exc.args += (err_msg,) raise try: f_ref = inplace_func([], node_ref.outputs, mode=mode_nogpu)", "shared def test_gpusplit_opt(self): # Test that we move the node", "self.op, testname, i, inputs, expected, expected.dtype, variable, variable.dtype, ) )", "len(topo) == 4 assert isinstance(topo[0].op, tt.opt.Shape_i) assert isinstance(topo[1].op, tt.opt.Shape_i) assert", "should work inplace and the output should be the view", "inputs_tst = [theano.shared(inp) for inp in inputs] try: node_ref =", "# to transfer less data. f = theano.function( [a, i],", "1 assert isinstance(topo[0].op, GpuToGpu) fv = f(gv) assert GpuArrayType.values_eq(fv, gv)", "gpu_op=GpuAlloc(test_ctx_name), cases=dict( correct01=(rand(), np.int32(7)), # just gives a DeepCopyOp with", "exception raised during test \" \"call was not the same", "% ( self.op, testname, i, inputs, expected, expected.dtype, variable, variable.dtype,", "expecteds = f_ref() except Exception as exc: ref_e = exc", "= theano.function([x], x.shape, mode=mode) topo = f.maker.fgraph.toposort() assert np.all(f(v) ==", "np.array([3, 4, 5], dtype=theano.config.floatX) x = gpuarray_shared_constructor(data, borrow=True) z =", "work inplace. # # This function tests the case when", "self.shared = gpuarray_shared_constructor self.dtypes = [\"float64\", \"float32\"] class TestGPUJoinAndSplit(TestJoinAndSplit): def", "= tt.Split(2)(m, 0, [2, 2]) assert o[0].dtype == \"float16\" f", "with possibly wrong results on the CPU # correct01_bcast=(rand(1), np.int32(7)),", "test_gputri(): def check(dtype, N, M_=None, k=0): # Theano does not", "tt.iscalar() f = theano.function( [m_symb, k_symb], tt.tril(m_symb, k_symb), mode=mode_with_gpu )", "inputs): inputs_ref = [theano.shared(inp) for inp in inputs] inputs_tst =", "mode=m) topo = f.maker.fgraph.toposort() assert isinstance(topo[0].op, tt.opt.Shape_i) assert isinstance(topo[1].op, tt.opt.Shape_i)", "= \"float32\" mode = mode_with_gpu shared = staticmethod(gpuarray_shared_constructor) allocs =", "test_gpu_contiguous(): a = tt.fmatrix(\"a\") i = tt.iscalar(\"i\") a_val = np.asarray(np.random.rand(4,", "works in theano # libgpuarray has a much more comprehensive", "_gpu_op = gpu_op _cases = cases _skip = skip _checks", "m = mode_with_gpu.including( \"local_dot_to_dot22\", \"local_dot22_to_dot22scalar\", \"specialize\" ) a = tt.fmatrix(\"a\")", "z) f = theano.function([s], theano.Out(c, borrow=True)) if not isinstance(mode_with_gpu, theano.compile.DebugMode):", "N > M, k != 0 check(dtype, 5, 3, 1)", "theano.gpuarray.type.GpuArrayType(\"float32\", (False, False))() av = np.asarray(np.random.rand(5, 4), dtype=\"float32\") cv =", "correct12=(rand(7), np.int32(4), np.int32(7)), correct13=(rand(7), np.int32(2), np.int32(4), np.int32(7)), correct23=(rand(4, 7), np.int32(2),", "= GpuArrayType(dtype=\"float32\", broadcastable=(False, False))(\"g\") av = np.asarray(rng.rand(5, 4), dtype=\"float32\") gv", "5)) if theano.config.mode != \"FAST_COMPILE\": assert len(topo) == 4 assert", "So we must use a real value. M = M_", "inp in inputs] inputs_tst = [theano.shared(inp) for inp in inputs]", "borrow=True)) if not isinstance(mode_with_gpu, theano.compile.DebugMode): assert x.get_value(borrow=True, return_internal_type=True) is f(0)", "1 assert ( sum([isinstance(node.op, GpuAlloc) for node in f_gpu2.maker.fgraph.toposort()]) ==", "to work inplace. # # This function tests the case", "to avoid errors with limited devices self.floatX = \"float32\" self.hide_error", "self.split_op_class) for node in f.maker.fgraph.toposort() ] ) o1, o2 =", "self.shared(rng.rand(4, 6).astype(\"float16\")) o = tt.Split(2)(m, 0, [2, 2]) assert o[0].dtype", "variable.shape != expected.shape or not TensorType.values_eq_approx(variable, expected) ) assert not", "0, [2, 2]) assert o[0].dtype == \"float16\" f = theano.function([],", "np.allclose(o1, m.get_value(borrow=True)[:2]) assert np.allclose(o2, m.get_value(borrow=True)[2:]) def test_gpujoin_gpualloc(): a = tt.fmatrix(\"a\")", "assert result.dtype == np.dtype(dtype) assert any([isinstance(node.op, GpuTri) for node in", "TensorType.values_eq_approx(variable, expected) ) assert not condition, ( \"Test %s::%s: Output", "skip=False, eps=1e-10, ): if checks is None: checks = {}", "occurred while trying to \" \"make a Function\" ) %", "function but all except one of them are empty. In", "%s), got %s (dtype %s).\" % ( self.op, testname, i,", ") f_gpu = theano.function( [a, b], tt.join(0, tt.zeros_like(a), tt.ones_like(b)), mode=mode_with_gpu", "this case # Gpujoin should work inplace and the output", "= {} _op = op _gpu_op = gpu_op _cases =", "mode=mode_with_gpu ) result = f(m, k) assert np.allclose(result, np.tril(m, k))", "node in f.maker.fgraph.toposort()]) for dtype in [\"float64\", \"float32\", \"int32\", \"float16\"]:", "1) check(dtype, 5, 3, -1) # k > M, -k", "tests import unittest_tools as utt from tests.gpuarray.config import mode_with_gpu, mode_without_gpu,", "expected.dtype, variable, variable.dtype, ) ) for description, check in self.checks.items():", "def setup_method(self): utt.seed_rng() self.mode = mode_with_gpu self.shared = gpuarray_shared_constructor self.dtypes", "= \"float32\" self.hide_error = theano.config.mode not in [\"DebugMode\", \"DEBUG_MODE\"] def", "GpuArrayType(dtype=\"float32\", broadcastable=(False, False))(\"g\") av = np.asarray(rng.rand(5, 8), dtype=\"float32\") gv =", "b = tt.fmatrix(\"b\") b_val = np.asarray(np.random.rand(3, 5), dtype=\"float32\") f =", "assert any([isinstance(node.op, GpuTri) for node in f.maker.fgraph.toposort()]) def check_u(m, k=0):", "theano # libgpuarray has a much more comprehensive suit of", "GpuFromHost(test_ctx_name)(a), mode=m) assert any(isinstance(x.op, GpuFromHost) for x in f.maker.fgraph.toposort()) f", "== \"float16\" f = theano.function([], o, mode=self.mode) assert any( [", "expected.dtype or variable.shape != expected.shape or not TensorType.values_eq_approx(variable, expected) )", "raise TypeError(\"Unexpected argument %s\", list(kwargs.keys())[0]) return gpuarray.array(r, dtype=dtype, cls=cls, context=get_context(test_ctx_name))", "f.maker.fgraph.toposort() assert len(topo) == 1 assert isinstance(topo[0].op, GpuToGpu) fv =", "= f_tst() except Exception as exc: if ref_e is None:", "test \" \"call was not the same as the reference", "tests to # ensure correctness a = tt.fmatrix(\"a\") g =", "we're good. if isinstance(exc, type(ref_e)): return else: err_msg = (", "tt.Split(2)(m, 0, [2, 2]) assert o[0].dtype == \"float16\" f =", "wrong results on the CPU # correct01_bcast=(rand(1), np.int32(7)), correct02=(rand(), np.int32(4),", "node to the GPU # Also test float16 computation at", "\"int32\", \"float16\"]: # try a big one check(dtype, 1000, 1000,", "tt.eye(N_symb, M_symb, k_symb, dtype=dtype) + np.array(1).astype(dtype) f = theano.function([N_symb, M_symb,", "np.asarray(rng.rand(5, 4), dtype=\"float32\") gv = gpuarray.array(av, context=get_context(test_ctx_name)) mode = mode_with_gpu.excluding(", "this is to avoid errors with limited devices self.floatX =", "dtype = \"float32\" mode = mode_with_gpu shared = staticmethod(gpuarray_shared_constructor) allocs", "checks = {} _op = op _gpu_op = gpu_op _cases", "GpuAllocEmpty) ] ) == 1 ) def test_shape(): x =", "= theano.function([a], GpuFromHost(test_ctx_name)(a), mode=m) assert any(isinstance(x.op, GpuFromHost) for x in", "mode_without_gpu, test_ctx_name from tests.tensor.test_basic import ( TestAlloc, TestComparison, TestJoinAndSplit, TestReshape,", "i, (variable, expected) in enumerate(zip(variables, expecteds)): condition = ( variable.dtype", "k)) assert result.dtype == np.dtype(dtype) assert any([isinstance(node.op, GpuTri) for node", "5000) * 2 - 1, dtype=dtype) check_l(m, 0) check_l(m, 1)", "mode=mode, allow_input_downcast=allow_input_downcast, accept_inplace=True, on_unused_input=on_unused_input, name=name, ) def fake_shared(value, name=None, strict=False,", "rng = np.random.RandomState(seed=utt.fetch_seed()) def inplace_func( inputs, outputs, mode=None, allow_input_downcast=False, on_unused_input=\"raise\",", "test_gpujoin_gpualloc(): a = tt.fmatrix(\"a\") a_val = np.asarray(np.random.rand(4, 5), dtype=\"float32\") b", "f_ref() except Exception as exc: ref_e = exc try: variables", "k)) - np.array(1).astype(dtype) assert np.allclose(result, np.eye(N, M_, k, dtype=dtype)) assert", "Theano does not accept None as a tensor. # So", "safe_make_node(self.op, *inputs_tst) except Exception as exc: err_msg = ( \"Test", "(5, 4) def test_Gpujoin_inplace(): # Test Gpujoin to work inplace.", "np.asarray(test_rng.rand(10, 10) * 2 - 1, dtype=dtype) check_l(m, 0) check_l(m,", "does not accept None as a tensor. # So we", "as tt # Don't import test classes otherwise they get", "import numpy as np import pytest import theano import theano.tensor", ") % (self.gpu_op, testname) exc.args += (err_msg,) raise else: #", "assert len(f.maker.fgraph.apply_nodes) == 1 out = f() assert out.shape ==", "GpuReshape self.mode = mode_with_gpu self.ignore_topo = ( HostFromGpu, GpuFromHost, theano.compile.DeepCopyOp,", "for description, check in self.checks.items(): assert check(inputs, variables), ( \"Test", "makeTester( name, op, gpu_op, cases, checks=None, mode_gpu=mode_with_gpu, mode_nogpu=mode_without_gpu, skip=False, eps=1e-10,", "= ( \"Test %s::%s: Error occurred while making \" \"a", "assert isinstance(topo[0].op, GpuToGpu) fv = f(gv) assert GpuArrayType.values_eq(fv, gv) def", "= name return Checker def test_transfer_cpu_gpu(): a = tt.fmatrix(\"a\") g", "mode_with_gpu self.ignore_topo = ( HostFromGpu, GpuFromHost, theano.compile.DeepCopyOp, GpuDimShuffle, GpuElemwise, tt.opt.Shape_i,", "assert len(topo) == 4 assert isinstance(topo[0].op, tt.opt.Shape_i) assert isinstance(topo[1].op, tt.opt.Shape_i)", "as np import pytest import theano import theano.tensor as tt", "GPU self.make_vector_op = GpuJoin() # this is to avoid errors", "if mode is None: mode = mode_with_gpu return theano.function( inputs,", "= gpuarray_shared_constructor(data, borrow=True) z = tt.zeros((s,)) join = GpuJoin(view=0) c", "= tt.iscalar() out = tt.tri(N_symb, M_symb, k_symb, dtype=dtype) + np.array(1).astype(dtype)", "# Test that we move the node to the GPU", "# # This function tests the case when several elements", "tt.opt.Shape_i) assert isinstance(topo[1].op, tt.opt.Shape_i) assert isinstance(topo[2].op, tt.opt.MakeVector) assert tuple(f(av)) ==", "0) check_l(m, 1) check_l(m, -1) check_u(m, 0) check_u(m, 1) check_u(m,", "tt.zeros_like(a), tt.ones_like(b)) + 4, mode=mode_with_gpu ) assert sum([node.op == tt.alloc", "M = N N_symb = tt.iscalar() M_symb = tt.iscalar() k_symb", "def test_gpusplit_opt(self): # Test that we move the node to", "the file from tests import unittest_tools as utt from tests.gpuarray.config", "is float: inputs[_] = np.asarray(inputs[_], dtype=theano.config.floatX) self.run_case(testname, inputs) def run_case(self,", "in f.maker.fgraph.toposort()]) == 1 assert ( sum([isinstance(node.op, GpuAlloc) for node", "make the subtensor on the CPU # to transfer less", "\"a node with inputs %s\" ) % (self.gpu_op, testname, inputs)", "has a much more comprehensive suit of tests to #", "== (3, 2) assert out[1].dtype == \"uint64\" assert ( len(", "== 1 assert ( sum([isinstance(node.op, GpuAlloc) for node in f_gpu.maker.fgraph.toposort()])", "%s \" \"(inputs were %s, ouputs were %s)\" ) %", "is no MakeVector on GPU self.make_vector_op = GpuJoin() # this", "gave the wrong \" \"value. With inputs %s, expected %s", "%s)\" ) % (self.op, testname, description, inputs, variables) Checker.__name__ =", "the # non-empty element. s = tt.lscalar() data = np.array([3,", "the GPU # Also test float16 computation at the same", "check(dtype, 5, 3) # N == M, k != 0", "::2] gv = gv[:, ::2] f = theano.function([a], GpuFromHost(test_ctx_name)(a)) fv", "f.maker.fgraph.toposort() ] ) o1, o2 = f() assert np.allclose(o1, m.get_value(borrow=True)[:2])", "\"call was not the same as the reference \" \"call", "return gpuarray.array(r, dtype=dtype, cls=cls, context=get_context(test_ctx_name)) def makeTester( name, op, gpu_op,", "not isinstance(mode_with_gpu, theano.compile.DebugMode): assert x.get_value(borrow=True, return_internal_type=True) is f(0) assert np.allclose(f(0),", "assert any([isinstance(node.op, GpuContiguous) for node in topo]) assert f(a_val, 1).flags.c_contiguous", "= theano.function([a], GpuFromHost(test_ctx_name)(a)) fv = f(av) assert GpuArrayType.values_eq(fv, gv) f", "if not isinstance(mode_with_gpu, theano.compile.DebugMode): assert x.get_value(borrow=True, return_internal_type=True) is f(0) assert", "[theano.shared(inp) for inp in inputs] try: node_ref = safe_make_node(self.op, *inputs_ref)", "m = np.asarray(test_rng.rand(10, 5) * 2 - 1, dtype=dtype) check_l(m,", "shared = staticmethod(gpuarray_shared_constructor) allocs = [GpuAlloc(test_ctx_name), GpuAlloc(test_ctx_name), tt.Alloc()] def test_alloc_empty():", "1) check(dtype, 3, 3, -1) # N < M, k", "DeepCopyOp with possibly wrong results on the CPU # correct01_bcast=(rand(1),", "assert isinstance(topo[0].op, tt.opt.Shape_i) assert isinstance(topo[1].op, tt.opt.Shape_i) assert isinstance(topo[2].op, tt.opt.Shape_i) assert", "= cases _skip = skip _checks = checks class Checker(utt.OptimizationTestMixin):", "allow_input_downcast=False, on_unused_input=\"raise\", name=None, ): if mode is None: mode =", "len( [ node for node in f.maker.fgraph.apply_nodes if isinstance(node.op, GpuAllocEmpty)", "of the same type we're good. if isinstance(exc, type(ref_e)): return", "== 1 out = f() assert out.shape == (2, 3)", "a tensor. # So we must use a real value.", "staticmethod(_op) gpu_op = staticmethod(_gpu_op) cases = _cases skip = _skip", "for inp in inputs] inputs_tst = [theano.shared(inp) for inp in", "def gpu_alloc_expected(x, *shp): g = gpuarray.empty(shp, dtype=x.dtype, context=get_context(test_ctx_name)) g[:] =", "assert sum([node.op == gpu_join for node in f_gpu2.maker.fgraph.toposort()]) == 1", "return Checker def test_transfer_cpu_gpu(): a = tt.fmatrix(\"a\") g = GpuArrayType(dtype=\"float32\",", "GpuFromHost(test_ctx_name)(a).shape, mode=m) topo = f.maker.fgraph.toposort() assert isinstance(topo[0].op, tt.opt.Shape_i) assert isinstance(topo[1].op,", "try: return c( value, name=name, strict=strict, allow_downcast=allow_downcast, **kwargs ) except", "exc.args += (err_msg,) raise for i, (variable, expected) in enumerate(zip(variables,", "in [\"DebugMode\", \"DEBUG_MODE\"] def shared(x, **kwargs): return gpuarray_shared_constructor(x, target=test_ctx_name, **kwargs)", "in f.maker.fgraph.toposort()]) def check_u(m, k=0): m_symb = tt.matrix(dtype=m.dtype) k_symb =", "to transfer less data. f = theano.function( [a, i], gpu_contiguous(a.reshape((5,", "dt in [\"float32\", \"int8\"]: f = theano.function([], GpuAllocEmpty(dt, context_name=test_ctx_name)(2, 3))", "# The +1 is there to allow the lift to", "Also test float16 computation at the same time. rng =", "= f.maker.fgraph.toposort() assert any([isinstance(node.op, GpuSubtensor) for node in topo]) assert", "= f(gv) assert np.all(fv == av) def gpu_alloc_expected(x, *shp): g", "f.maker.fgraph.toposort()) f = theano.function([a], GpuFromHost(test_ctx_name)(a).shape, mode=m) topo = f.maker.fgraph.toposort() assert", "1000, 400) check(dtype, 5) # M != N, k =", "# N > M, k != 0 check(dtype, 5, 3,", "theano.function([a], GpuFromHost(test_ctx_name)(a)) fv = f(av) assert GpuArrayType.values_eq(fv, gv) f =", "theano.gpuarray.elemwise import GpuDimShuffle, GpuElemwise from theano.gpuarray.subtensor import GpuSubtensor from theano.gpuarray.type", "topo = f.maker.fgraph.toposort() assert np.all(f(v) == (3, 4, 5)) if", "= _skip checks = _checks def setup_method(self): eval(self.__class__.__module__ + \".\"", "= tt.fmatrix(\"b\") b_val = np.asarray(np.random.rand(3, 5), dtype=\"float32\") f = theano.function(", "pytest.importorskip(\"pygpu\") gpuarray = pygpu.gpuarray utt.seed_rng() rng = np.random.RandomState(seed=utt.fetch_seed()) def inplace_func(", "b], tt.join(0, tt.zeros_like(a), tt.ones_like(b)) + 4, mode=mode_without_gpu ) f_gpu =", "utt from tests.gpuarray.config import mode_with_gpu, mode_without_gpu, test_ctx_name from tests.tensor.test_basic import", "= tt.lscalar() data = np.array([3, 4, 5], dtype=theano.config.floatX) x =", "in self.checks.items(): assert check(inputs, variables), ( \"Test %s::%s: Failed check:", "1, gpu_op=GpuAlloc(test_ctx_name), cases=dict( correct01=(rand(), np.int32(7)), # just gives a DeepCopyOp", "2) assert out[1].dtype == \"uint64\" assert ( len( [ node", "rand, safe_make_node from theano.gpuarray.basic_ops import ( GpuAlloc, GpuAllocEmpty, GpuContiguous, GpuEye,", "\"Test %s::%s: exception when calling the \" \"Function\" ) %", "must use a real value. M = M_ # Currently", "1, dtype=dtype) check_l(m, 0) check_l(m, 1) check_l(m, -1) check_u(m, 0)", "tests.tensor.utils import rand, safe_make_node from theano.gpuarray.basic_ops import ( GpuAlloc, GpuAllocEmpty,", "= f(gv) assert np.all(fv == av) def test_transfer_gpu_gpu(): g =", "def test_gputri(): def check(dtype, N, M_=None, k=0): # Theano does", ") a = tt.fmatrix(\"a\") ca = theano.gpuarray.type.GpuArrayType(\"float32\", (False, False))() av", "correct23=(rand(4, 7), np.int32(2), np.int32(4), np.int32(7)), bad_shape12=(rand(7), np.int32(7), np.int32(5)), ), )", "variables = f_tst() except Exception as exc: if ref_e is", "testname) exc.args += (err_msg,) raise self.assertFunctionContains1(f_tst, self.gpu_op) ref_e = None", "MakeVector since there is no MakeVector on GPU self.make_vector_op =", "the same type we're good. if isinstance(exc, type(ref_e)): return else:", "o, mode=self.mode) assert any( [ isinstance(node.op, self.split_op_class) for node in", "assert sum([node.op == tt.alloc for node in f.maker.fgraph.toposort()]) == 2", "isinstance(topo[0].op, tt.opt.Shape_i) assert isinstance(topo[1].op, tt.opt.Shape_i) assert isinstance(topo[2].op, tt.opt.MakeVector) assert tuple(f(av))", "class TestGPUAlloc(TestAlloc): dtype = \"float32\" mode = mode_with_gpu shared =", "import TensorType from theano.tensor.basic import alloc pygpu = pytest.importorskip(\"pygpu\") gpuarray", "mode_with_gpu, mode_without_gpu, test_ctx_name from tests.tensor.test_basic import ( TestAlloc, TestComparison, TestJoinAndSplit,", "f.maker.fgraph.toposort() assert any([isinstance(node.op, GpuSubtensor) for node in topo]) assert any([isinstance(node.op,", "accept_inplace=True, on_unused_input=on_unused_input, name=name, ) def fake_shared(value, name=None, strict=False, allow_downcast=None, **kwargs):", "get_context, gpuarray_shared_constructor from theano.tensor import TensorType from theano.tensor.basic import alloc", "is None: err_msg = ( \"Test %s::%s: exception when calling", "i], gpu_contiguous(a.reshape((5, 4))[::i]), mode=mode_with_gpu ) topo = f.maker.fgraph.toposort() assert any([isinstance(node.op,", "dtype=\"float32\") f = theano.function( [a, b], tt.join(0, tt.zeros_like(a), tt.ones_like(b)) +", "lifted over hostfromgpu m = mode_with_gpu.including( \"local_dot_to_dot22\", \"local_dot22_to_dot22scalar\", \"specialize\" )", "check_u(m, k=0): m_symb = tt.matrix(dtype=m.dtype) k_symb = tt.iscalar() f =", "+ 1, gpu_op=GpuAlloc(test_ctx_name), cases=dict( correct01=(rand(), np.int32(7)), # just gives a", "assert np.allclose(result, np.eye(N, M_, k, dtype=dtype)) assert result.dtype == np.dtype(dtype)", "%s::%s: Error occurred while making \" \"a node with inputs", "3, 6) check(dtype, 3, 5, -6) def test_hostfromgpu_shape_i(): # Test", "TestComparison, TestJoinAndSplit, TestReshape, ) from tests.tensor.utils import rand, safe_make_node from", "from theano.gpuarray.subtensor import GpuSubtensor from theano.gpuarray.type import GpuArrayType, get_context, gpuarray_shared_constructor", "np.allclose(f(0), [3, 4, 5]) def test_gpu_tril_triu(): def check_l(m, k=0): m_symb", "gpu_alloc_expected(x, *shp): g = gpuarray.empty(shp, dtype=x.dtype, context=get_context(test_ctx_name)) g[:] = x", "\" \"make a Function\" ) % (self.gpu_op, testname) exc.args +=", "\"int32\", \"float16\"]: check(dtype, 3) # M != N, k =", "gives a DeepCopyOp with possibly wrong results on the CPU", "GpuAlloc) for node in f_gpu.maker.fgraph.toposort()]) == 2 ) assert sum([node.op", "m.get_value(borrow=True)[:2]) assert np.allclose(o2, m.get_value(borrow=True)[2:]) def test_gpujoin_gpualloc(): a = tt.fmatrix(\"a\") a_val", "_cases = cases _skip = skip _checks = checks class", "5, 3, 1) check(dtype, 5, 3, -1) # k >", "test_all(self): if skip: pytest.skip(skip) for testname, inputs in cases.items(): for", "from theano.tensor import TensorType from theano.tensor.basic import alloc pygpu =", "theano.function( [a, i], gpu_contiguous(a.reshape((5, 4))[::i]), mode=mode_with_gpu ) topo = f.maker.fgraph.toposort()", "3) # N == M, k != 0 check(dtype, 3,", "self.__class__.__name__) def test_all(self): if skip: pytest.skip(skip) for testname, inputs in", "[a, i], gpu_contiguous(a.reshape((5, 4))[::i]), mode=mode_with_gpu ) topo = f.maker.fgraph.toposort() assert", "GpuAlloc(test_ctx_name), tt.Alloc()] def test_alloc_empty(): for dt in [\"float32\", \"int8\"]: f", "assert out[1].shape == (3, 2) assert out[1].dtype == \"uint64\" assert", "None: mode = mode_with_gpu return theano.function( inputs, outputs, mode=mode, allow_input_downcast=allow_input_downcast,", "exc.args += (err_msg,) raise self.assertFunctionContains1(f_tst, self.gpu_op) ref_e = None try:", "one m = np.asarray(test_rng.rand(5000, 5000) * 2 - 1, dtype=dtype)", "check(dtype, N, M_=None, k=0): # Theano does not accept None", "just gives a DeepCopyOp with possibly wrong results on the", "is None: M = N N_symb = tt.iscalar() M_symb =", "1) check(dtype, 3, 5, -1) # N > M, k", "as inputs even if this is # allowed. if M", "shape is lifted over hostfromgpu m = mode_with_gpu.including( \"local_dot_to_dot22\", \"local_dot22_to_dot22scalar\",", "= gv[:, ::2] f = theano.function([a], GpuFromHost(test_ctx_name)(a)) fv = f(av)", "o1, o2 = f() assert np.allclose(o1, m.get_value(borrow=True)[:2]) assert np.allclose(o2, m.get_value(borrow=True)[2:])", "5), dtype=\"float32\", context=get_context(test_ctx_name)) f = theano.function([x], x.shape) topo = f.maker.fgraph.toposort()", "for node in f.maker.fgraph.toposort()]) for dtype in [\"float32\", \"int32\", \"float16\"]:", "7), np.int32(2), np.int32(4), np.int32(7)), bad_shape12=(rand(7), np.int32(7), np.int32(5)), ), ) class", "cases _skip = skip _checks = checks class Checker(utt.OptimizationTestMixin): op", "theano.tensor.sharedvar import scalar_constructor, tensor_constructor for c in (gpuarray_shared_constructor, tensor_constructor, scalar_constructor):", "staticmethod(_gpu_op) cases = _cases skip = _skip checks = _checks", "inplace_func( inputs, outputs, mode=None, allow_input_downcast=False, on_unused_input=\"raise\", name=None, ): if mode", "of the file from tests import unittest_tools as utt from", "# The reshape is needed otherwise we make the subtensor", "inputs %s, expected %s \" \"(dtype %s), got %s (dtype", "for dt in [\"float32\", \"int8\"]: f = theano.function([], GpuAllocEmpty(dt, context_name=test_ctx_name)(2,", "in f.maker.fgraph.toposort()]) == 2 assert sum([node.op == tt.join_ for node", "4, mode=mode_with_gpu ) assert sum([node.op == tt.alloc for node in", "theano.gpuarray.basic_ops import ( GpuAlloc, GpuAllocEmpty, GpuContiguous, GpuEye, GpuFromHost, GpuJoin, GpuReshape,", "testname, inputs): inputs_ref = [theano.shared(inp) for inp in inputs] inputs_tst", "assert np.allclose(o1, m.get_value(borrow=True)[:2]) assert np.allclose(o2, m.get_value(borrow=True)[2:]) def test_gpujoin_gpualloc(): a =", "= mode_with_gpu self.shared = gpuarray_shared_constructor self.dtypes = [\"float64\", \"float32\"] class", "f.maker.fgraph.toposort()]) == 2 assert sum([node.op == tt.join_ for node in", "# So we must use a real value. M =", "f = theano.function([], o, mode=self.mode) assert any( [ isinstance(node.op, self.split_op_class)", "k)) - np.array(1).astype(dtype) assert np.allclose(result, np.tri(N, M_, k, dtype=dtype)) assert", "(variable, expected) in enumerate(zip(variables, expecteds)): condition = ( variable.dtype !=", "a = tt.fmatrix(\"a\") a_val = np.asarray(np.random.rand(4, 5), dtype=\"float32\") b =", "transfer less data. f = theano.function( [a, i], gpu_contiguous(a.reshape((5, 4))[::i]),", "% (self.op, testname, description, inputs, variables) Checker.__name__ = name if", "# This function tests the case when several elements are", "inputs) def run_case(self, testname, inputs): inputs_ref = [theano.shared(inp) for inp", "np.array(1).astype(dtype) assert np.allclose(result, np.eye(N, M_, k, dtype=dtype)) assert result.dtype ==", "= M_ # Currently DebugMode does not support None as", "3, 5, -6) def test_hostfromgpu_shape_i(): # Test that the shape", "f() assert np.allclose(o1, m.get_value(borrow=True)[:2]) assert np.allclose(o2, m.get_value(borrow=True)[2:]) def test_gpujoin_gpualloc(): a", "when calling the \" \"Function\" ) % (self.gpu_op, testname) exc.args", "== gpu_join for node in f_gpu.maker.fgraph.toposort()]) == 1 assert (", "in topo]) assert any([isinstance(node.op, GpuContiguous) for node in topo]) assert", "passed to the # Gpujoin function but all except one", "= tt.zeros((s,)) join = GpuJoin(view=0) c = join(0, x, z)", "ensure that it works in theano # libgpuarray has a", "* 2 - 1 dtype = kwargs.pop(\"dtype\", theano.config.floatX) cls =", ") topo = f.maker.fgraph.toposort() assert any([isinstance(node.op, GpuSubtensor) for node in", "> N, k > M, k > N check(dtype, 5,", "the same time. rng = np.random.RandomState(seed=utt.fetch_seed()) m = self.shared(rng.rand(4, 6).astype(\"float16\"))", "np.asarray(test_rng.rand(10, 5) * 2 - 1, dtype=dtype) check_l(m, 0) check_l(m,", "check(dtype, 3, 5, 1) check(dtype, 3, 5, -1) # N", "= theano.function([x], x.shape) topo = f.maker.fgraph.toposort() assert np.all(f(v) == (3,", "GpuContiguous, GpuEye, GpuFromHost, GpuJoin, GpuReshape, GpuSplit, GpuToGpu, GpuTri, HostFromGpu, gpu_contiguous,", "node in f.maker.fgraph.toposort()]) def check_u(m, k=0): m_symb = tt.matrix(dtype=m.dtype) k_symb", "tt.iscalar() out = tt.eye(N_symb, M_symb, k_symb, dtype=dtype) + np.array(1).astype(dtype) f", "def test_all(self): if skip: pytest.skip(skip) for testname, inputs in cases.items():", "av) def test_transfer_gpu_gpu(): g = GpuArrayType( dtype=\"float32\", broadcastable=(False, False), context_name=test_ctx_name", "skip: pytest.skip(skip) for testname, inputs in cases.items(): for _ in", "gv) f = theano.function([g], host_from_gpu(g)) fv = f(gv) assert np.all(fv", "check_u(m, -1) m = np.asarray(test_rng.rand(10, 10) * 2 - 1,", "= tt.iscalar() out = tt.eye(N_symb, M_symb, k_symb, dtype=dtype) + np.array(1).astype(dtype)", "M_symb = tt.iscalar() k_symb = tt.iscalar() out = tt.tri(N_symb, M_symb,", "= theano.function([ca], host_from_gpu(ca), mode=m) assert host_from_gpu in [x.op for x", "With inputs %s, expected %s \" \"(dtype %s), got %s", "def check_l(m, k=0): m_symb = tt.matrix(dtype=m.dtype) k_symb = tt.iscalar() f", "inputs) exc.args += (err_msg,) raise try: f_ref = inplace_func([], node_ref.outputs,", "*shp): g = gpuarray.empty(shp, dtype=x.dtype, context=get_context(test_ctx_name)) g[:] = x return", "Checker.__qualname__ = name return Checker def test_transfer_cpu_gpu(): a = tt.fmatrix(\"a\")", "M, k > N check(dtype, 5, 3, 3) check(dtype, 3,", "in range(len(inputs)): if type(inputs[_]) is float: inputs[_] = np.asarray(inputs[_], dtype=theano.config.floatX)", "f = theano.function([], GpuAllocEmpty(dt, context_name=test_ctx_name)(2, 3)) assert len(f.maker.fgraph.apply_nodes) == 1", "assert np.all(fv == av) def test_transfer_gpu_gpu(): g = GpuArrayType( dtype=\"float32\",", "theano.function([g], GpuToGpu(test_ctx_name)(g), mode=mode) topo = f.maker.fgraph.toposort() assert len(topo) == 1", "out[0].dtype == \"uint64\" assert out[1].shape == (3, 2) assert out[1].dtype", ") except TypeError: continue def rand_gpuarray(*shape, **kwargs): r = rng.rand(*shape)", "= f.maker.fgraph.toposort() assert isinstance(topo[0].op, tt.opt.Shape_i) assert isinstance(topo[1].op, tt.opt.Shape_i) assert isinstance(topo[2].op,", "1000, 1000, 400) check(dtype, 5) # M != N, k", "assert np.allclose(o2, m.get_value(borrow=True)[2:]) def test_gpujoin_gpualloc(): a = tt.fmatrix(\"a\") a_val =", "as utt from tests.gpuarray.config import mode_with_gpu, mode_without_gpu, test_ctx_name from tests.tensor.test_basic", "= f(gv) assert GpuArrayType.values_eq(fv, gv) def test_transfer_strided(): # This is", "test_ctx_name from tests.tensor.test_basic import ( TestAlloc, TestComparison, TestJoinAndSplit, TestReshape, )", "check(dtype, 5, 3, 1) check(dtype, 5, 3, -1) # k", "M, -k > N, k > M, k > N", "as the reference \" \"call (got: %s, expected %s)\" %", "f.maker.fgraph.toposort()] f = theano.function([ca], host_from_gpu(ca).shape, mode=m) topo = f.maker.fgraph.toposort() assert", "tested as part of the file from tests import unittest_tools", "# Gpujoin should work inplace and the output should be", "assert check(inputs, variables), ( \"Test %s::%s: Failed check: %s \"", "np.random.RandomState(seed=utt.fetch_seed()) m = self.shared(rng.rand(4, 6).astype(\"float16\")) o = tt.Split(2)(m, 0, [2,", "+ 4, mode=mode_without_gpu ) f_gpu = theano.function( [a, b], tt.join(0,", "av = np.asarray(rng.rand(5, 4), dtype=\"float32\") gv = gpuarray.array(av, context=get_context(test_ctx_name)) f", "2), GpuAllocEmpty(\"uint64\", test_ctx_name)(3, 2), ], ) out = f() assert", "results on the CPU # correct01_bcast=(rand(1), np.int32(7)), correct02=(rand(), np.int32(4), np.int32(7)),", "self.op = GpuReshape self.mode = mode_with_gpu self.ignore_topo = ( HostFromGpu,", "import ( TestAlloc, TestComparison, TestJoinAndSplit, TestReshape, ) from tests.tensor.utils import", "node in f_gpu.maker.fgraph.toposort()]) == 1 assert ( sum([isinstance(node.op, GpuAlloc) for", "b], tt.join(0, tt.zeros_like(a), tt.ones_like(b)) + 4, mode=mode_with_gpu ) assert sum([node.op", "context=get_context(test_ctx_name) ) f = theano.function([a], GpuFromHost(test_ctx_name)(a), mode=m) assert any(isinstance(x.op, GpuFromHost)", "2) assert out[0].dtype == \"uint64\" assert out[1].shape == (3, 2)", "5, -1) # N > M, k != 0 check(dtype,", "+1 is there to allow the lift to the GPU.", "= np.asarray(np.random.rand(4, 5), dtype=\"float32\") # The reshape is needed otherwise", "check_l(m, k=0): m_symb = tt.matrix(dtype=m.dtype) k_symb = tt.iscalar() f =", "GpuTri) for node in f.maker.fgraph.toposort()]) utt.seed_rng() test_rng = np.random.RandomState(seed=utt.fetch_seed()) for", "gpuarray.array(r, dtype=dtype, cls=cls, context=get_context(test_ctx_name)) def makeTester( name, op, gpu_op, cases,", "= 0 check(dtype, 3, 5) check(dtype, 5, 3) # N", "expected) ) assert not condition, ( \"Test %s::%s: Output %s", "): if mode is None: mode = mode_with_gpu return theano.function(", "5, 3, -1) # k > M, -k > N,", "tt.iscalar() M_symb = tt.iscalar() k_symb = tt.iscalar() out = tt.eye(N_symb,", "len(topo) == 1 assert isinstance(topo[0].op, GpuToGpu) fv = f(gv) assert", "the subtensor on the CPU # to transfer less data.", "mode=m) assert any(isinstance(x.op, GpuFromHost) for x in f.maker.fgraph.toposort()) f =", "np.tril(m, k)) assert result.dtype == np.dtype(dtype) assert any([isinstance(node.op, GpuTri) for", "1 assert ( sum([isinstance(node.op, GpuAlloc) for node in f_gpu.maker.fgraph.toposort()]) ==", "use a real value. M = M_ # Currently DebugMode", "if hasattr(Checker, \"__qualname__\"): Checker.__qualname__ = name return Checker def test_transfer_cpu_gpu():", "M_, k, dtype=dtype)) assert result.dtype == np.dtype(dtype) assert any([isinstance(node.op, GpuEye)", "for c in (gpuarray_shared_constructor, tensor_constructor, scalar_constructor): try: return c( value,", "== (5, 4) f = theano.function([ca], host_from_gpu(ca), mode=m) assert host_from_gpu", "or not TensorType.values_eq_approx(variable, expected) ) assert not condition, ( \"Test", "\"local_dot22_to_dot22scalar\", \"specialize\" ) a = tt.fmatrix(\"a\") ca = theano.gpuarray.type.GpuArrayType(\"float32\", (False,", "4 assert isinstance(topo[0].op, tt.opt.Shape_i) assert isinstance(topo[1].op, tt.opt.Shape_i) assert isinstance(topo[2].op, tt.opt.Shape_i)", "theano.function([x], x.shape) topo = f.maker.fgraph.toposort() assert np.all(f(v) == (3, 4,", "= f.maker.fgraph.toposort() assert isinstance(topo[0].op, theano.compile.Shape_i) assert isinstance(topo[1].op, theano.compile.Shape_i) assert isinstance(topo[2].op,", "gpuarray.asarray( np.random.rand(5, 4), dtype=\"float32\", context=get_context(test_ctx_name) ) f = theano.function([a], GpuFromHost(test_ctx_name)(a),", "reshape is needed otherwise we make the subtensor on the", "0) check_u(m, 1) check_u(m, -1) m = np.asarray(test_rng.rand(10, 10) *", "= join(0, x, z) f = theano.function([s], theano.Out(c, borrow=True)) if", "empty. In this case # Gpujoin should work inplace and", "self.hide_error = theano.config.mode not in [\"DebugMode\", \"DEBUG_MODE\"] def shared(x, **kwargs):", "tensor. # So we must use a real value. M", "_skip checks = _checks def setup_method(self): eval(self.__class__.__module__ + \".\" +", "M is None: M = N N_symb = tt.iscalar() M_symb", "= np.asarray(test_rng.rand(5000, 5000) * 2 - 1, dtype=dtype) check_l(m, 0)", "= np.asarray(np.random.rand(3, 5), dtype=\"float32\") f = theano.function( [a, b], tt.join(0,", "in f.maker.fgraph.toposort()] f = theano.function([ca], host_from_gpu(ca).shape, mode=m) topo = f.maker.fgraph.toposort()", "-3) check(dtype, 5, 3, 6) check(dtype, 3, 5, -6) def", "if skip: pytest.skip(skip) for testname, inputs in cases.items(): for _", "M, k)) - np.array(1).astype(dtype) assert np.allclose(result, np.tri(N, M_, k, dtype=dtype))", "np.allclose(result, np.eye(N, M_, k, dtype=dtype)) assert result.dtype == np.dtype(dtype) assert", "z = tt.zeros((s,)) join = GpuJoin(view=0) c = join(0, x,", "def setup_method(self): eval(self.__class__.__module__ + \".\" + self.__class__.__name__) def test_all(self): if", "for node in f.maker.fgraph.toposort()]) utt.seed_rng() test_rng = np.random.RandomState(seed=utt.fetch_seed()) for dtype", "== 2 assert sum([node.op == tt.join_ for node in f.maker.fgraph.toposort()])", "op=lambda *args: alloc(*args) + 1, gpu_op=GpuAlloc(test_ctx_name), cases=dict( correct01=(rand(), np.int32(7)), #", "otherwise they get tested as part of the file from", "import scalar_constructor, tensor_constructor for c in (gpuarray_shared_constructor, tensor_constructor, scalar_constructor): try:", "*inputs_ref) node_tst = safe_make_node(self.op, *inputs_tst) except Exception as exc: err_msg", "node in f.maker.fgraph.toposort()]) == 2 assert sum([node.op == tt.join_ for", "for dtype in [\"float64\", \"float32\", \"int32\", \"float16\"]: # try a", "= staticmethod(gpuarray_shared_constructor) allocs = [GpuAlloc(test_ctx_name), GpuAlloc(test_ctx_name), tt.Alloc()] def test_alloc_empty(): for", "gpu_op, cases, checks=None, mode_gpu=mode_with_gpu, mode_nogpu=mode_without_gpu, skip=False, eps=1e-10, ): if checks", "TestGPUAlloc(TestAlloc): dtype = \"float32\" mode = mode_with_gpu shared = staticmethod(gpuarray_shared_constructor)", "as exc: ref_e = exc try: variables = f_tst() except", "pytest.skip(skip) for testname, inputs in cases.items(): for _ in range(len(inputs)):", "3, 3, 1) check(dtype, 3, 3, -1) # N <", "dtype in [\"float32\", \"int32\", \"float16\"]: check(dtype, 3) # M !=", "def test_gpueye(): def check(dtype, N, M_=None, k=0): # Theano does", "node in f.maker.fgraph.toposort()]) utt.seed_rng() test_rng = np.random.RandomState(seed=utt.fetch_seed()) for dtype in", "f(m, k) assert np.allclose(result, np.triu(m, k)) assert result.dtype == np.dtype(dtype)", "gpuarray.array(av, context=get_context(test_ctx_name)) mode = mode_with_gpu.excluding( \"cut_gpua_host_transfers\", \"local_cut_gpua_host_gpua\" ) f =", "outputs, mode=None, allow_input_downcast=False, on_unused_input=\"raise\", name=None, ): if mode is None:", "f = theano.function([x], x.shape, mode=mode) topo = f.maker.fgraph.toposort() assert np.all(f(v)", "= tt.fmatrix(\"a\") ca = theano.gpuarray.type.GpuArrayType(\"float32\", (False, False))() av = np.asarray(np.random.rand(5,", ") ) for description, check in self.checks.items(): assert check(inputs, variables),", "mode=m) topo = f.maker.fgraph.toposort() assert isinstance(topo[0].op, theano.compile.Shape_i) assert isinstance(topo[1].op, theano.compile.Shape_i)", "= kwargs.pop(\"cls\", None) if len(kwargs) != 0: raise TypeError(\"Unexpected argument", "np.tri(N, M_, k, dtype=dtype)) assert result.dtype == np.dtype(dtype) assert any([isinstance(node.op,", "= mode_with_gpu.excluding( \"cut_gpua_host_transfers\", \"local_cut_gpua_host_gpua\" ) f = theano.function([g], GpuToGpu(test_ctx_name)(g), mode=mode)", "import mode_with_gpu, mode_without_gpu, test_ctx_name from tests.tensor.test_basic import ( TestAlloc, TestComparison,", "should be the view of the # non-empty element. s", "return c( value, name=name, strict=strict, allow_downcast=allow_downcast, **kwargs ) except TypeError:", "possibly wrong results on the CPU # correct01_bcast=(rand(1), np.int32(7)), correct02=(rand(),", "mode=mode_with_gpu) result = np.asarray(f(N, M, k)) - np.array(1).astype(dtype) assert np.allclose(result,", "( \"Test %s::%s: Failed check: %s \" \"(inputs were %s,", "with limited devices self.floatX = \"float32\" self.hide_error = theano.config.mode not", "kwargs.pop(\"cls\", None) if len(kwargs) != 0: raise TypeError(\"Unexpected argument %s\",", "check(dtype, 3, 3, -1) # N < M, k !=", "sum([isinstance(node.op, GpuAlloc) for node in f_gpu2.maker.fgraph.toposort()]) == 2 ) assert", "ref_e = None try: expecteds = f_ref() except Exception as", "np.triu(m, k)) assert result.dtype == np.dtype(dtype) assert any([isinstance(node.op, GpuTri) for", "fv = f(av) assert GpuArrayType.values_eq(fv, gv) f = theano.function([g], host_from_gpu(g))", "2), ], ) out = f() assert out[0].shape == (3,", "import theano import theano.tensor as tt # Don't import test", "inputs %s\" ) % (self.gpu_op, testname, inputs) exc.args += (err_msg,)", "def test_transfer_cpu_gpu(): a = tt.fmatrix(\"a\") g = GpuArrayType(dtype=\"float32\", broadcastable=(False, False))(\"g\")", "= self.shared(rng.rand(4, 6).astype(\"float16\")) o = tt.Split(2)(m, 0, [2, 2]) assert", "*args: alloc(*args) + 1, gpu_op=GpuAlloc(test_ctx_name), cases=dict( correct01=(rand(), np.int32(7)), # just", "check(dtype, 3, 3, 1) check(dtype, 3, 3, -1) # N", "1 assert isinstance(topo[0].op, tt.Shape) def test_gpu_contiguous(): a = tt.fmatrix(\"a\") i", "setup_method(self): self.shared = gpuarray_shared_constructor self.op = GpuReshape self.mode = mode_with_gpu", "the wrong \" \"value. With inputs %s, expected %s \"", "np.asarray(np.random.rand(3, 5), dtype=\"float32\") f = theano.function( [a, b], tt.join(0, tt.zeros_like(a),", "was not the same as the reference \" \"call (got:", "for inp in inputs] try: node_ref = safe_make_node(self.op, *inputs_ref) node_tst", "from theano.gpuarray.basic_ops import ( GpuAlloc, GpuAllocEmpty, GpuContiguous, GpuEye, GpuFromHost, GpuJoin,", "any( [ isinstance(node.op, self.split_op_class) for node in f.maker.fgraph.toposort() ] )", ") result = f(m, k) assert np.allclose(result, np.triu(m, k)) assert", "np.asarray(f(N, M, k)) - np.array(1).astype(dtype) assert np.allclose(result, np.eye(N, M_, k,", "f = theano.function( [a, i], gpu_contiguous(a.reshape((5, 4))[::i]), mode=mode_with_gpu ) topo", "tt.alloc for node in f.maker.fgraph.toposort()]) == 2 assert sum([node.op ==", "exc.args += (err_msg,) raise else: # if we raised an", "f = theano.function( [a, b], tt.join(0, tt.zeros_like(a), tt.ones_like(b)) + 4,", "(3, 2) assert out[0].dtype == \"uint64\" assert out[1].shape == (3,", "= tt.matrix(dtype=m.dtype) k_symb = tt.iscalar() f = theano.function( [m_symb, k_symb],", "assert not condition, ( \"Test %s::%s: Output %s gave the", "= safe_make_node(self.op, *inputs_ref) node_tst = safe_make_node(self.op, *inputs_tst) except Exception as", "= name if hasattr(Checker, \"__qualname__\"): Checker.__qualname__ = name return Checker", "in enumerate(zip(variables, expecteds)): condition = ( variable.dtype != expected.dtype or", "as part of the file from tests import unittest_tools as", "context=get_context(test_ctx_name)) mode = mode_with_gpu.excluding( \"cut_gpua_host_transfers\", \"local_cut_gpua_host_gpua\" ) f = theano.function([g],", "N N_symb = tt.iscalar() M_symb = tt.iscalar() k_symb = tt.iscalar()", "= tt.fmatrix(\"a\") g = GpuArrayType(dtype=\"float32\", broadcastable=(False, False))(\"g\") av = np.asarray(rng.rand(5,", "!= N, k = 0 check(dtype, 3, 5) check(dtype, 5,", "be the view of the # non-empty element. s =", "test_alloc_empty(): for dt in [\"float32\", \"int8\"]: f = theano.function([], GpuAllocEmpty(dt,", "= f() assert out[0].shape == (3, 2) assert out[0].dtype ==", "400) check(dtype, 5) # M != N, k = 0", "= mode_with_gpu self.ignore_topo = ( HostFromGpu, GpuFromHost, theano.compile.DeepCopyOp, GpuDimShuffle, GpuElemwise,", "== (3, 4, 5)) if theano.config.mode != \"FAST_COMPILE\": assert len(topo)", "check_l(m, -1) check_u(m, 0) check_u(m, 1) check_u(m, -1) def test_gputri():", "gv = gv[:, ::2] f = theano.function([a], GpuFromHost(test_ctx_name)(a)) fv =", "np.asarray(np.random.rand(5, 4), dtype=\"float32\") cv = gpuarray.asarray( np.random.rand(5, 4), dtype=\"float32\", context=get_context(test_ctx_name)", "5) check(dtype, 5, 3) # N == M, k !=", "utt.seed_rng() test_rng = np.random.RandomState(seed=utt.fetch_seed()) for dtype in [\"float64\", \"float32\", \"float16\"]:", "- np.array(1).astype(dtype) assert np.allclose(result, np.tri(N, M_, k, dtype=dtype)) assert result.dtype", "GpuArrayType.values_eq(fv, gv) f = theano.function([g], host_from_gpu(g)) fv = f(gv) assert", "same type we're good. if isinstance(exc, type(ref_e)): return else: err_msg", "topo]) assert any([isinstance(node.op, GpuContiguous) for node in topo]) assert f(a_val,", "assert out.dtype == dt f = theano.function( [], [ GpuAllocEmpty(\"uint64\",", "name return Checker def test_transfer_cpu_gpu(): a = tt.fmatrix(\"a\") g =", "tt # Don't import test classes otherwise they get tested", "!= expected.dtype or variable.shape != expected.shape or not TensorType.values_eq_approx(variable, expected)", "theano.compile.Shape_i) assert isinstance(topo[2].op, tt.opt.MakeVector) assert tuple(f(cv)) == (5, 4) def", "k_symb], tt.tril(m_symb, k_symb), mode=mode_with_gpu ) result = f(m, k) assert", "av = np.asarray(rng.rand(5, 4), dtype=\"float32\") gv = gpuarray.array(av, context=get_context(test_ctx_name)) mode", "try a big one m = np.asarray(test_rng.rand(5000, 5000) * 2", "m = self.shared(rng.rand(4, 6).astype(\"float16\")) o = tt.Split(2)(m, 0, [2, 2])", "check(dtype, 5) # M != N, k = 0 check(dtype,", "condition = ( variable.dtype != expected.dtype or variable.shape != expected.shape", "> M, -k > N, k > M, k >", "[], [ GpuAllocEmpty(\"uint64\", test_ctx_name)(3, 2), GpuAllocEmpty(\"uint64\", test_ctx_name)(3, 2), ], )", "from tests.gpuarray.config import mode_with_gpu, mode_without_gpu, test_ctx_name from tests.tensor.test_basic import (", "= gpuarray.array(av, context=get_context(test_ctx_name)) av = av[:, ::2] gv = gv[:,", "f.maker.fgraph.toposort()]) == 1 assert ( sum([isinstance(node.op, GpuAlloc) for node in", "3, 1) check(dtype, 5, 3, -1) # k > M,", "cls = kwargs.pop(\"cls\", None) if len(kwargs) != 0: raise TypeError(\"Unexpected", "if this is # allowed. if M is None: M", "broadcastable=(False, False))(\"g\") av = np.asarray(rng.rand(5, 8), dtype=\"float32\") gv = gpuarray.array(av,", "0) check_u(m, 1) check_u(m, -1) m = np.asarray(test_rng.rand(10, 5) *", "av = np.asarray(rng.rand(5, 8), dtype=\"float32\") gv = gpuarray.array(av, context=get_context(test_ctx_name)) av", "== 4 assert isinstance(topo[0].op, tt.opt.Shape_i) assert isinstance(topo[1].op, tt.opt.Shape_i) assert isinstance(topo[2].op,", "= mode_with_gpu shared = staticmethod(gpuarray_shared_constructor) allocs = [GpuAlloc(test_ctx_name), GpuAlloc(test_ctx_name), tt.Alloc()]", "not accept None as a tensor. # So we must", "= np.asarray(test_rng.rand(10, 10) * 2 - 1, dtype=dtype) check_l(m, 0)", "= np.asarray(f(N, M, k)) - np.array(1).astype(dtype) assert np.allclose(result, np.eye(N, M_,", "5, 1) check(dtype, 3, 5, -1) # N > M,", "gpu_join, host_from_gpu, ) from theano.gpuarray.elemwise import GpuDimShuffle, GpuElemwise from theano.gpuarray.subtensor", "host_from_gpu(ca).shape, mode=m) topo = f.maker.fgraph.toposort() assert isinstance(topo[0].op, theano.compile.Shape_i) assert isinstance(topo[1].op,", "errors with limited devices self.floatX = \"float32\" self.hide_error = theano.config.mode", "3, 1) check(dtype, 3, 3, -1) # N < M,", "\"float32\" mode = mode_with_gpu shared = staticmethod(gpuarray_shared_constructor) allocs = [GpuAlloc(test_ctx_name),", "theano.Out(c, borrow=True)) if not isinstance(mode_with_gpu, theano.compile.DebugMode): assert x.get_value(borrow=True, return_internal_type=True) is", "+ \".\" + self.__class__.__name__) def test_all(self): if skip: pytest.skip(skip) for", "self.shared = gpuarray_shared_constructor self.op = GpuReshape self.mode = mode_with_gpu self.ignore_topo", "in f_gpu2.maker.fgraph.toposort()]) == 1 assert np.allclose(f(a_val, b_val), f_gpu2(a_val, b_val)) def", "M != N, k = 0 check(dtype, 3, 5) check(dtype,", "value. M = M_ # Currently DebugMode does not support", "mode=m) assert host_from_gpu in [x.op for x in f.maker.fgraph.toposort()] f", "gpuarray = pygpu.gpuarray utt.seed_rng() rng = np.random.RandomState(seed=utt.fetch_seed()) def inplace_func( inputs,", "import GpuArrayType, get_context, gpuarray_shared_constructor from theano.tensor import TensorType from theano.tensor.basic", ")() av = np.asarray(rng.rand(5, 4), dtype=\"float32\") gv = gpuarray.array(av, context=get_context(test_ctx_name))", "np.dtype(dtype) assert any([isinstance(node.op, GpuEye) for node in f.maker.fgraph.toposort()]) for dtype", "an exception of the same type we're good. if isinstance(exc,", ") assert self.op == GpuReshape class TestGPUComparison(TestComparison): def setup_method(self): utt.seed_rng()", "check_u(m, -1) m = np.asarray(test_rng.rand(10, 5) * 2 - 1,", "out.shape == (2, 3) assert out.dtype == dt f =", "( \"Test %s::%s: Output %s gave the wrong \" \"value.", "the same as the reference \" \"call (got: %s, expected", "check(dtype, 1000, 1000, 0) check(dtype, 1000, 1000, -400) check(dtype, 1000,", "= np.asarray(np.random.rand(4, 5), dtype=\"float32\") b = tt.fmatrix(\"b\") b_val = np.asarray(np.random.rand(3,", "1) check_u(m, -1) m = np.asarray(test_rng.rand(10, 10) * 2 -", "theano.function([], GpuAllocEmpty(dt, context_name=test_ctx_name)(2, 3)) assert len(f.maker.fgraph.apply_nodes) == 1 out =", "-1) # k > M, -k > N, k >", "[ GpuAllocEmpty(\"uint64\", test_ctx_name)(3, 2), GpuAllocEmpty(\"uint64\", test_ctx_name)(3, 2), ], ) out", "setup_method(self): utt.seed_rng() self.mode = mode_with_gpu self.shared = gpuarray_shared_constructor self.dtypes =", "theano.compile.DebugMode): assert x.get_value(borrow=True, return_internal_type=True) is f(0) assert np.allclose(f(0), [3, 4,", "several elements are passed to the # Gpujoin function but", "inplace_func([], node_tst.outputs, mode=mode_gpu) except Exception as exc: err_msg = (", "( len( [ node for node in f.maker.fgraph.apply_nodes if isinstance(node.op,", "limited devices self.floatX = \"float32\" self.hide_error = theano.config.mode not in", "# This is just to ensure that it works in", "is lifted over hostfromgpu m = mode_with_gpu.including( \"local_dot_to_dot22\", \"local_dot22_to_dot22scalar\", \"specialize\"", "theano.compile.Shape_i) assert isinstance(topo[1].op, theano.compile.Shape_i) assert isinstance(topo[2].op, tt.opt.MakeVector) assert tuple(f(cv)) ==", "def rand_gpuarray(*shape, **kwargs): r = rng.rand(*shape) * 2 - 1", "time. rng = np.random.RandomState(seed=utt.fetch_seed()) m = self.shared(rng.rand(4, 6).astype(\"float16\")) o =", "TestAlloc, TestComparison, TestJoinAndSplit, TestReshape, ) from tests.tensor.utils import rand, safe_make_node", "\"float32\"] class TestGPUJoinAndSplit(TestJoinAndSplit): def setup_method(self): self.mode = mode_with_gpu.excluding(\"constant_folding\") self.join_op =", "np.random.RandomState(seed=utt.fetch_seed()) for dtype in [\"float64\", \"float32\", \"float16\"]: # try a", "type we're good. if isinstance(exc, type(ref_e)): return else: err_msg =", "!= 0 check(dtype, 3, 3, 1) check(dtype, 3, 3, -1)", "mode=mode_without_gpu ) f_gpu = theano.function( [a, b], tt.join(0, tt.zeros_like(a), tt.ones_like(b)),", "= f(m, k) assert np.allclose(result, np.triu(m, k)) assert result.dtype ==", "(5, 4) f = theano.function([ca], host_from_gpu(ca), mode=m) assert host_from_gpu in", "as exc: err_msg = ( \"Test %s::%s: Error occurred while", "broadcastable=(False, False))(\"g\") av = np.asarray(rng.rand(5, 4), dtype=\"float32\") gv = gpuarray.array(av,", "return g TestGpuAlloc = makeTester( name=\"GpuAllocTester\", # The +1 is", "def test_transfer_gpu_gpu(): g = GpuArrayType( dtype=\"float32\", broadcastable=(False, False), context_name=test_ctx_name )()", "gv) def test_transfer_strided(): # This is just to ensure that", ") exc.args += (err_msg,) raise for i, (variable, expected) in", "assert result.dtype == np.dtype(dtype) assert any([isinstance(node.op, GpuEye) for node in", "tt.matrix(dtype=m.dtype) k_symb = tt.iscalar() f = theano.function( [m_symb, k_symb], tt.triu(m_symb,", "+= (err_msg,) raise for i, (variable, expected) in enumerate(zip(variables, expecteds)):", "dtype=\"float32\", context=get_context(test_ctx_name) ) f = theano.function([a], GpuFromHost(test_ctx_name)(a), mode=m) assert any(isinstance(x.op,", "[\"DebugMode\", \"DEBUG_MODE\"] def shared(x, **kwargs): return gpuarray_shared_constructor(x, target=test_ctx_name, **kwargs) self.shared", "context=get_context(test_ctx_name)) f = theano.function([a], GpuFromHost(test_ctx_name)(a)) fv = f(av) assert GpuArrayType.values_eq(fv,", "None try: expecteds = f_ref() except Exception as exc: ref_e", "N check(dtype, 5, 3, 3) check(dtype, 3, 5, 3) check(dtype,", "dtype=\"float32\", broadcastable=(False, False), context_name=test_ctx_name )() av = np.asarray(rng.rand(5, 4), dtype=\"float32\")", "self.make_vector_op = GpuJoin() # this is to avoid errors with", "np.asarray(test_rng.rand(5000, 5000) * 2 - 1, dtype=dtype) check_l(m, 0) check_l(m,", "GpuJoin() self.split_op_class = GpuSplit # Use join instead of MakeVector", "np.dtype(dtype) assert any([isinstance(node.op, GpuTri) for node in f.maker.fgraph.toposort()]) for dtype", "hostfromgpu m = mode_with_gpu.including( \"local_dot_to_dot22\", \"local_dot22_to_dot22scalar\", \"specialize\" ) a =", "theano.config.mode not in [\"DebugMode\", \"DEBUG_MODE\"] def shared(x, **kwargs): return gpuarray_shared_constructor(x,", "the lift to the GPU. op=lambda *args: alloc(*args) + 1,", "x, z) f = theano.function([s], theano.Out(c, borrow=True)) if not isinstance(mode_with_gpu,", "context_name=test_ctx_name)(2, 3)) assert len(f.maker.fgraph.apply_nodes) == 1 out = f() assert", "f = theano.function([a], GpuFromHost(test_ctx_name)(a), mode=m) assert any(isinstance(x.op, GpuFromHost) for x", "strict=False, allow_downcast=None, **kwargs): from theano.tensor.sharedvar import scalar_constructor, tensor_constructor for c", "tt.iscalar() f = theano.function( [m_symb, k_symb], tt.triu(m_symb, k_symb), mode=mode_with_gpu )", "\"float16\"]: # try a big one m = np.asarray(test_rng.rand(5000, 5000)", "== tt.alloc for node in f.maker.fgraph.toposort()]) == 2 assert sum([node.op", "expecteds)): condition = ( variable.dtype != expected.dtype or variable.shape !=", "4, mode=mode_without_gpu ) f_gpu = theano.function( [a, b], tt.join(0, tt.zeros_like(a),", "for node in f.maker.fgraph.toposort()]) == 1 assert ( sum([isinstance(node.op, GpuAlloc)", "np.array(1).astype(dtype) f = theano.function([N_symb, M_symb, k_symb], out, mode=mode_with_gpu) result =", "list(kwargs.keys())[0]) return gpuarray.array(r, dtype=dtype, cls=cls, context=get_context(test_ctx_name)) def makeTester( name, op,", "check(dtype, 1000, 1000, 400) check(dtype, 5) # M != N,", "raised an exception of the same type we're good. if", "import rand, safe_make_node from theano.gpuarray.basic_ops import ( GpuAlloc, GpuAllocEmpty, GpuContiguous,", "\"(inputs were %s, ouputs were %s)\" ) % (self.op, testname,", "unittest_tools as utt from tests.gpuarray.config import mode_with_gpu, mode_without_gpu, test_ctx_name from", "is to avoid errors with limited devices self.floatX = \"float32\"", "assert len(topo) == 1 assert isinstance(topo[0].op, tt.Shape) def test_gpu_contiguous(): a", "= theano.gpuarray.type.GpuArrayType(\"float32\", (False, False))() av = np.asarray(np.random.rand(5, 4), dtype=\"float32\") cv", "pygpu = pytest.importorskip(\"pygpu\") gpuarray = pygpu.gpuarray utt.seed_rng() rng = np.random.RandomState(seed=utt.fetch_seed())", "isinstance(topo[0].op, GpuToGpu) fv = f(gv) assert GpuArrayType.values_eq(fv, gv) def test_transfer_strided():", "HostFromGpu, GpuFromHost, theano.compile.DeepCopyOp, GpuDimShuffle, GpuElemwise, tt.opt.Shape_i, tt.opt.MakeVector, ) assert self.op", "5, 3, 3) check(dtype, 3, 5, 3) check(dtype, 5, 3,", "cls=cls, context=get_context(test_ctx_name)) def makeTester( name, op, gpu_op, cases, checks=None, mode_gpu=mode_with_gpu,", "GpuSplit # Use join instead of MakeVector since there is", "isinstance(topo[3].op, tt.opt.MakeVector) mode = mode_with_gpu.excluding(\"local_shape_to_shape_i\") f = theano.function([x], x.shape, mode=mode)", "result = np.asarray(f(N, M, k)) - np.array(1).astype(dtype) assert np.allclose(result, np.tri(N,", ") assert sum([node.op == gpu_join for node in f_gpu2.maker.fgraph.toposort()]) ==", "k_symb), mode=mode_with_gpu ) result = f(m, k) assert np.allclose(result, np.tril(m,", "isinstance(exc, type(ref_e)): return else: err_msg = ( \"Test %s::%s: exception", "gv[:, ::2] f = theano.function([a], GpuFromHost(test_ctx_name)(a)) fv = f(av) assert", "there is no MakeVector on GPU self.make_vector_op = GpuJoin() #", "ref_e is None: err_msg = ( \"Test %s::%s: exception when", "check_l(m, -1) check_u(m, 0) check_u(m, 1) check_u(m, -1) m =", "M = M_ # Currently DebugMode does not support None", "gpu_contiguous, gpu_join, host_from_gpu, ) from theano.gpuarray.elemwise import GpuDimShuffle, GpuElemwise from", "import unittest_tools as utt from tests.gpuarray.config import mode_with_gpu, mode_without_gpu, test_ctx_name", "the CPU # to transfer less data. f = theano.function(", "tt.join(0, tt.zeros_like(a), tt.ones_like(b)) + 4, mode=mode_without_gpu ) f_gpu = theano.function(", "name=None, strict=False, allow_downcast=None, **kwargs): from theano.tensor.sharedvar import scalar_constructor, tensor_constructor for", "as a tensor. # So we must use a real", "type(exc), type(ref_e)) ) exc.args += (err_msg,) raise for i, (variable,", "GPU. op=lambda *args: alloc(*args) + 1, gpu_op=GpuAlloc(test_ctx_name), cases=dict( correct01=(rand(), np.int32(7)),", "= theano.function( [], [ GpuAllocEmpty(\"uint64\", test_ctx_name)(3, 2), GpuAllocEmpty(\"uint64\", test_ctx_name)(3, 2),", "== (5, 4) def test_Gpujoin_inplace(): # Test Gpujoin to work", "gv = gpuarray.array(av, context=get_context(test_ctx_name)) av = av[:, ::2] gv =", "node with inputs %s\" ) % (self.gpu_op, testname, inputs) exc.args", "== \"uint64\" assert out[1].shape == (3, 2) assert out[1].dtype ==", "cases = _cases skip = _skip checks = _checks def", "kwargs.pop(\"dtype\", theano.config.floatX) cls = kwargs.pop(\"cls\", None) if len(kwargs) != 0:", "that we move the node to the GPU # Also", "[theano.shared(inp) for inp in inputs] inputs_tst = [theano.shared(inp) for inp", "_checks def setup_method(self): eval(self.__class__.__module__ + \".\" + self.__class__.__name__) def test_all(self):", "suit of tests to # ensure correctness a = tt.fmatrix(\"a\")", "False])() v = gpuarray.zeros((3, 4, 5), dtype=\"float32\", context=get_context(test_ctx_name)) f =", "file from tests import unittest_tools as utt from tests.gpuarray.config import", "try: node_ref = safe_make_node(self.op, *inputs_ref) node_tst = safe_make_node(self.op, *inputs_tst) except", "occurred while making \" \"a node with inputs %s\" )" ]
[ "animation\", server, m: await server.assertCommand( \"/v1/lifx/command\", {\"command\": \"animation/info\"}, json_output={\"animations\": {},", "json_output={ \"animations\": identities, \"paused\": [identity], \"resuming\": [identity2], }, ) #", "json_output={\"animations\": {}, \"paused\": []}, ) identity = \"first\" got =", "\"animations\": identities, \"paused\": [identity], \"resuming\": [identity2], }, ) # pause", "Events from photons_app import helpers as hp from photons_canvas.points.simple_messages import", "{\"command\": \"animation/info\"}, json_output={\"animations\": {}, \"paused\": []}, ) got = await", "{\"pause\": [identity]}}, ) await m.add(5) store.clear() await m.add(5) assert store.count(Events.INCOMING(tile,", "{ \"ball_colors\": \"<ManyColor:[((0, 360), (1000.0, 1000.0), (1000.0, 1000.0), (3500.0, 3500.0))]>\",", "info = await server.assertCommand( \"/v1/lifx/command\", {\"command\": \"animation/info\"}, ) assert info[\"animations\"]", "store.clone() @pytest.fixture() def final_future(): fut = hp.create_future() try: yield fut", "async def sender(devices, final_future): async with devices.for_test(final_future) as sender: yield", "\"/v1/lifx/command\", {\"command\": \"animation/pause\", \"args\": {\"pause\": identity2}}, json_output={ \"animations\": identities, \"paused\":", "\"animation/info\", \"args\": {\"identity\": identity}} ) info[\"animations\"][identity][\"current_animation\"][\"started\"] = mock.ANY assert info[\"animations\"][identity]", "server_wrapper, FakeTime, MockedCallLater, sender, final_future): with FakeTime() as t: async", "= [identity, identity2] assert got[\"animations\"] == identities assert got[\"started\"] ==", "(3500.0, 3500.0))]>\", \"fade_amount\": 0.02, \"num_balls\": 5, \"rate\": \"<Rate 0.9 ->", "\"animation/pause\", \"args\": {\"pause\": identities}}, json_output={\"animations\": identities, \"paused\": identities, \"pausing\": identities},", "@pytest.fixture() def final_future(): fut = hp.create_future() try: yield fut finally:", "{\"identity\": identity2}}, ) assert \"animations\" in got identities = [identity,", "[[\"balls\", {\"every\": 3}]]}}, ) identity = got[\"started\"] await first_set_64 now", "@pytest.fixture(autouse=True) def set_async_timeout(request): request.applymarker(pytest.mark.async_timeout(15)) describe \"Animation Commands\": async it \"can", "\"pausing\": [identity2], }, ) # resume await server.assertCommand( \"/v1/lifx/command\", {\"command\":", "\"animation/start\", \"args\": {\"animations\": [[\"balls\", {\"every\": 0.3}]]}}, ) identity = got[\"started\"]", "{\"command\": \"animation/info\"}) assert info[\"paused\"] == [] assert identity in info[\"animations\"]", "range options\" in got async it \"can control an animation\",", ") identity = got[\"started\"] await first_set_64 now = store.count(Events.INCOMING(tile, io,", "Set64 from unittest import mock import pytest @pytest.fixture() def store_clone():", "[identity] # stop await server.assertCommand( \"/v1/lifx/command\", {\"command\": \"animation/stop\", \"args\": {\"stop\":", "assert b\"dice animation\" in got assert b\"This animation has the", "{identity: mock.ANY, identity2: mock.ANY}, \"paused\": []} # pause await server.assertCommand(", "# start got = await server.assertCommand( \"/v1/lifx/command\", {\"command\": \"animation/start\", \"args\":", "assert store.count(Events.INCOMING(tile, io, pkt=Set64)) == 0 # info await server.assertCommand(", "m.add(5) store.clear() await m.add(5) assert store.count(Events.INCOMING(tile, io, pkt=Set64)) == 0", "{}, \"paused\": []}, ) identity = \"first\" got = await", "m: async with server_wrapper(store_clone, sender, final_future) as server: yield server,", "{identity: mock.ANY, identity2: mock.ANY} assert info[\"paused\"] == [identity] # stop", "first_set_64 = tile.attrs.event_waiter.wait_for_incoming(io, Set64) # start got = await server.assertCommand(", "pause await server.assertCommand( \"/v1/lifx/command\", {\"command\": \"animation/pause\", \"args\": {\"pause\": [identity]}}, )", "{\"pause\": identity}}, json_output={\"animations\": identities, \"paused\": [identity], \"pausing\": [identity]}, ) await", "fut = hp.create_future() try: yield fut finally: fut.cancel() @pytest.fixture() async", "server.assertCommand( \"/v1/lifx/command\", {\"command\": \"animation/info\"}, ) assert info[\"animations\"] == {identity2: mock.ANY}", "(1000.0, 1000.0), (1000.0, 1000.0), (3500.0, 3500.0))]>\", \"fade_amount\": 0.02, \"num_balls\": 5,", "info == {\"animations\": {identity: mock.ANY, identity2: mock.ANY}, \"paused\": []} #", "store.clear() await m.add(5) store.clear() await m.add(5) assert store.count(Events.INCOMING(tile, io, pkt=Set64))", "info[\"animations\"] == {identity: mock.ANY, identity2: mock.ANY} assert info[\"paused\"] == [identity]", "import store, load_commands from photons_app.mimic.event import Events from photons_app import", "server.assertCommand( \"/v1/lifx/command\", {\"command\": \"animation/stop\", \"args\": {\"stop\": identity}}, json_output={ \"animations\": [identity,", "it \"pausing an animation actually pauses the animation\", devices, server,", "[identity]}}, ) await m.add(5) store.clear() await m.add(5) assert store.count(Events.INCOMING(tile, io,", "pkt=Set64)) == 0 # info await server.assertCommand( \"/v1/lifx/command\", {\"command\": \"animation/info\"},", "in got identities = [identity, identity2] assert got[\"animations\"] == identities", "> now identity = got[\"started\"] await m.add(5) assert store.count(Events.INCOMING(tile, io,", "the following options:\" in got assert b\"colour range options\" in", "[identity, identity2], \"paused\": [identity], \"stopping\": [identity], }, ) await m.add(0.5)", "info[\"animations\"][identity][\"options\"][\"noisy_network\"] == 0 specific = await server.assertCommand( \"/v1/lifx/command\", {\"command\": \"animation/info\",", "= await server.assertCommand( \"/v1/lifx/command\", {\"command\": \"animation/info\"}, ) assert info[\"animations\"] ==", "io, pkt=Set64)) > 0 # stop await server.assertCommand( \"/v1/lifx/command\", {\"command\":", "\"animation/info\"}, json_output={\"animations\": {}, \"paused\": []}, ) identity = \"first\" got", "in info[\"animations\"] assert info[\"animations\"][identity][\"animations_ran\"] == 1 assert info[\"animations\"][identity][\"current_animation\"] == {", "\"pausing an animation actually pauses the animation\", devices, server, m:", "server.assertCommand( \"/v1/lifx/command\", {\"command\": \"animation/resume\", \"args\": {\"resume\": identity2}}, json_output={ \"animations\": identities,", "an animation actually pauses the animation\", devices, server, m: tile", "got identities = [identity, identity2] assert got[\"animations\"] == identities assert", "got assert b\"To see options for a particular animation, run", "identities}}, json_output={ \"animations\": identities, \"paused\": [], \"resuming\": identities, }, )", "== 0 # info await server.assertCommand( \"/v1/lifx/command\", {\"command\": \"animation/info\"}, json_output={\"animations\":", "async it \"can control an animation\", server, m: await server.assertCommand(", "assert b\"Available animations include\" in got assert b\"* dice\" in", "json_output={ \"animations\": [identity, identity2], \"paused\": [identity], \"stopping\": [identity], }, )", "await server.assertCommand( \"/v1/lifx/command\", {\"command\": \"animation/start\", \"args\": {\"identity\": identity}}, ) assert", "info[\"animations\"][identity][\"options\"][\"pauser\"] assert info[\"animations\"][identity][\"options\"][\"noisy_network\"] == 0 specific = await server.assertCommand( \"/v1/lifx/command\",", "server.assertCommand( \"/v1/lifx/command\", {\"command\": \"animation/info\"}, json_output={\"animations\": {}, \"paused\": []}, ) async", "identities, \"pausing\": [identity2], }, ) # resume await server.assertCommand( \"/v1/lifx/command\",", ") await m.add(5) assert store.count(Events.INCOMING(tile, io, pkt=Set64)) > 0 #", "} assert info[\"animations\"][identity][\"options\"][\"combined\"] assert \"unlocked\" in info[\"animations\"][identity][\"options\"][\"pauser\"] assert info[\"animations\"][identity][\"options\"][\"noisy_network\"] ==", "{ \"name\": \"balls\", \"options\": { \"ball_colors\": \"<ManyColor:[((0, 360), (1000.0, 1000.0),", "json_output={\"animations\": {}, \"paused\": []}, ) got = await server.assertCommand( \"/v1/lifx/command\",", "assert \"animations\" in got assert got[\"animations\"] == [identity] assert got[\"started\"]", "{\"animations\": {identity: mock.ANY, identity2: mock.ANY}, \"paused\": []} # pause await", ") # resume await server.assertCommand( \"/v1/lifx/command\", {\"command\": \"animation/resume\", \"args\": {\"resume\":", "\"paused\": [identity], \"resuming\": [identity2], }, ) # pause multiple await", "server.assertCommand( \"/v1/lifx/command\", {\"command\": \"animation/start\", \"args\": {\"animations\": [[\"balls\", {\"every\": 0.3}]]}}, )", "m.add(5) assert store.count(Events.INCOMING(tile, io, pkt=Set64)) == 0 # info await", "got[\"animations\"] == [identity] assert got[\"started\"] == identity identity2 = \"second\"", "{\"command\": \"animation/pause\", \"args\": {\"pause\": identity2}}, json_output={ \"animations\": identities, \"paused\": identities,", "}, ) # resume await server.assertCommand( \"/v1/lifx/command\", {\"command\": \"animation/resume\", \"args\":", "\"animations\": identities, \"paused\": identities, \"pausing\": [identity2], }, ) # resume", "the animation\", devices, server, m: tile = devices[\"tile\"] io =", "await server.assertCommand( \"/v1/lifx/command\", {\"command\": \"animation/resume\", \"args\": {\"resume\": identities}}, json_output={ \"animations\":", "make_server[1] @pytest.fixture(autouse=True) def set_async_timeout(request): request.applymarker(pytest.mark.async_timeout(15)) describe \"Animation Commands\": async it", "def m(make_server): return make_server[1] @pytest.fixture(autouse=True) def set_async_timeout(request): request.applymarker(pytest.mark.async_timeout(15)) describe \"Animation", "resume await server.assertCommand( \"/v1/lifx/command\", {\"command\": \"animation/resume\", \"args\": {\"resume\": identity2}}, json_output={", "\"args\": {\"resume\": identity2}}, json_output={ \"animations\": identities, \"paused\": [identity], \"resuming\": [identity2],", "b\"colour range options\" in got async it \"can control an", "help\", server, m: await server.assertCommand( \"/v1/lifx/command\", {\"command\": \"animation/info\"}, json_output={\"animations\": {},", "\"args\": {\"pause\": [identity]}}, ) await m.add(5) store.clear() await m.add(5) assert", "info[\"paused\"] == [identity] # stop await server.assertCommand( \"/v1/lifx/command\", {\"command\": \"animation/stop\",", "= tile.attrs.event_waiter.wait_for_incoming(io, Set64) # start got = await server.assertCommand( \"/v1/lifx/command\",", "\"animation/info\"}, ) assert info[\"animations\"] == {identity: mock.ANY, identity2: mock.ANY} assert", "await m.add(5) assert store.count(Events.INCOMING(tile, io, pkt=Set64)) > 0 # stop", "assert info[\"animations\"][identity][\"current_animation\"] == { \"name\": \"balls\", \"options\": { \"ball_colors\": \"<ManyColor:[((0,", "await server.assertCommand( \"/v1/lifx/command\", {\"command\": \"animation/resume\", \"args\": {\"resume\": identity2}}, json_output={ \"animations\":", "\"animation/start\", \"args\": {\"animations\": [[\"balls\", {\"every\": 3}]]}}, ) identity = got[\"started\"]", ") assert info[\"animations\"] == {identity: mock.ANY, identity2: mock.ANY} assert info[\"paused\"]", "photons_app.mimic.event import Events from photons_app import helpers as hp from", "\"animations\" in got identities = [identity, identity2] assert got[\"animations\"] ==", "{\"pause\": identities}}, json_output={\"animations\": identities, \"paused\": identities, \"pausing\": identities}, ) #", "\"paused\": identities, \"pausing\": identities}, ) # resume await server.assertCommand( \"/v1/lifx/command\",", "in got assert got[\"animations\"] == [identity] assert got[\"started\"] == identity", "now > 0 await m.add(5) now2 = store.count(Events.INCOMING(tile, io, pkt=Set64))", "await m.add(5) now2 = store.count(Events.INCOMING(tile, io, pkt=Set64)) assert now2 >", "server.assertCommand( \"/v1/lifx/command\", {\"command\": \"animation/info\"}, ) assert info[\"animations\"] == {identity: mock.ANY,", "def sender(devices, final_future): async with devices.for_test(final_future) as sender: yield sender", "assert \"unlocked\" in info[\"animations\"][identity][\"options\"][\"pauser\"] assert info[\"animations\"][identity][\"options\"][\"noisy_network\"] == 0 specific =", "FakeTime() as t: async with MockedCallLater(t) as m: async with", "assert info[\"animations\"][identity][\"options\"][\"noisy_network\"] == 0 specific = await server.assertCommand( \"/v1/lifx/command\", {\"command\":", "server.assertCommand( \"/v1/lifx/command\", {\"command\": \"animation/info\"}, json_output={\"animations\": {}, \"paused\": []}, ) identity", "\"args\": {\"animations\": [[\"balls\", {\"every\": 3}]]}}, ) identity = got[\"started\"] await", "server(make_server): return make_server[0] @pytest.fixture() def m(make_server): return make_server[1] @pytest.fixture(autouse=True) def", "await server.assertCommand(\"/v1/lifx/command\", {\"command\": \"animation/info\"}) assert info[\"paused\"] == [] assert identity", "== identity identity2 = \"second\" got = await server.assertCommand( \"/v1/lifx/command\",", "info[\"paused\"] == [] async it \"pausing an animation actually pauses", "animations include\" in got assert b\"* dice\" in got assert", "m.add(0.5) # info info = await server.assertCommand( \"/v1/lifx/command\", {\"command\": \"animation/info\"},", "io, pkt=Set64)) == 0 # info await server.assertCommand( \"/v1/lifx/command\", {\"command\":", "identities = [identity, identity2] assert got[\"animations\"] == identities assert got[\"started\"]", "json_output={\"animations\": identities, \"paused\": [identity], \"pausing\": [identity]}, ) await server.assertCommand( \"/v1/lifx/command\",", "return make_server[1] @pytest.fixture(autouse=True) def set_async_timeout(request): request.applymarker(pytest.mark.async_timeout(15)) describe \"Animation Commands\": async", "from photons_canvas.points.simple_messages import Set64 from unittest import mock import pytest", "\"args\": {\"animations\": [[\"balls\", {\"every\": 0.3}]]}}, ) identity = got[\"started\"] info", "{\"command\": \"animation/resume\", \"args\": {\"resume\": identities}}, json_output={ \"animations\": identities, \"paused\": [],", "as server: yield server, m @pytest.fixture() def server(make_server): return make_server[0]", "FakeTime, MockedCallLater, sender, final_future): with FakeTime() as t: async with", "[]}, ) identity = \"first\" got = await server.assertCommand( \"/v1/lifx/command\",", "= store.count(Events.INCOMING(tile, io, pkt=Set64)) assert now2 > now identity =", "got assert b\"This animation has the following options:\" in got", "\"animation/start\", \"args\": {\"identity\": identity}}, ) assert \"animations\" in got assert", "= await server.assertCommand(\"/v1/lifx/command\", {\"command\": \"animation/info\"}) assert info[\"paused\"] == [] assert", "it \"can get information\", server, m: # start got =", "{}, \"paused\": []}, ) async it \"can get information\", server,", "\"animation/info\"}) assert info[\"paused\"] == [] assert identity in info[\"animations\"] assert", "async it \"can get information\", server, m: # start got", "# resume await server.assertCommand( \"/v1/lifx/command\", {\"command\": \"animation/resume\", \"args\": {\"resume\": identity2}},", "unittest import mock import pytest @pytest.fixture() def store_clone(): load_commands() return", "animation actually pauses the animation\", devices, server, m: tile =", "= devices[\"tile\"] io = tile.io[\"MEMORY\"] store = devices.store(tile) store.clear() first_set_64", "devices[\"tile\"] io = tile.io[\"MEMORY\"] store = devices.store(tile) store.clear() first_set_64 =", "{}, \"paused\": []}, ) got = await server.assertCommand( \"/v1/lifx/command\", {\"command\":", "await server.assertCommand( \"/v1/lifx/command\", {\"command\": \"animation/help\", \"args\": {\"animation_name\": \"dice\"}}, ) assert", "assert info == {\"animations\": {identity: mock.ANY, identity2: mock.ANY}, \"paused\": []}", "in got async it \"can control an animation\", server, m:", "assert info[\"paused\"] == [] async it \"pausing an animation actually", "pkt=Set64)) > 0 # stop await server.assertCommand( \"/v1/lifx/command\", {\"command\": \"animation/stop\",", "get information\", server, m: # start got = await server.assertCommand(", "assert b\"colour range options\" in got async it \"can control", "\"fade_amount\": 0.02, \"num_balls\": 5, \"rate\": \"<Rate 0.9 -> 1>\", },", "[identity], \"stopping\": [identity], }, ) await m.add(0.5) # info info", "= hp.create_future() try: yield fut finally: fut.cancel() @pytest.fixture() async def", "store.count(Events.INCOMING(tile, io, pkt=Set64)) assert now2 > now identity = got[\"started\"]", "\"Animation Commands\": async it \"can get info and help\", server,", "[identity], \"resuming\": [identity2], }, ) # pause multiple await server.assertCommand(", "devices.store(tile) store.clear() first_set_64 = tile.attrs.event_waiter.wait_for_incoming(io, Set64) # start got =", "await server.assertCommand( \"/v1/lifx/command\", {\"command\": \"animation/info\"}, ) assert info[\"animations\"] == {identity2:", "\"args\": {\"identity\": identity}}, ) assert \"animations\" in got assert got[\"animations\"]", "# resume await server.assertCommand( \"/v1/lifx/command\", {\"command\": \"animation/resume\", \"args\": {\"resume\": [identity]}},", "assert b\"This animation has the following options:\" in got assert", "identity}}, json_output={ \"animations\": [identity, identity2], \"paused\": [identity], \"stopping\": [identity], },", "final_future(): fut = hp.create_future() try: yield fut finally: fut.cancel() @pytest.fixture()", ") await m.add(5) store.clear() await m.add(5) assert store.count(Events.INCOMING(tile, io, pkt=Set64))", "server, m: # start got = await server.assertCommand( \"/v1/lifx/command\", {\"command\":", "5, \"rate\": \"<Rate 0.9 -> 1>\", }, \"started\": mock.ANY, }", "{\"command\": \"animation/pause\", \"args\": {\"pause\": identities}}, json_output={\"animations\": identities, \"paused\": identities, \"pausing\":", "Set64) # start got = await server.assertCommand( \"/v1/lifx/command\", {\"command\": \"animation/start\",", "pytest @pytest.fixture() def store_clone(): load_commands() return store.clone() @pytest.fixture() def final_future():", "\"/v1/lifx/command\", {\"command\": \"animation/help\"}, ) assert b\"Available animations include\" in got", "{\"identity\": identity}} ) info[\"animations\"][identity][\"current_animation\"][\"started\"] = mock.ANY assert info[\"animations\"][identity] == specific", "== identities assert got[\"started\"] == identity2 info = await server.assertCommand(", "\"animation/stop\", \"args\": {\"stop\": [identity]}}, ) store.clear() await m.add(5) store.clear() await", "identities, \"paused\": [identity], \"pausing\": [identity]}, ) await server.assertCommand( \"/v1/lifx/command\", {\"command\":", "store.count(Events.INCOMING(tile, io, pkt=Set64)) > 0 # stop await server.assertCommand( \"/v1/lifx/command\",", "[identity, identity2] assert got[\"animations\"] == identities assert got[\"started\"] == identity2", "\"/v1/lifx/command\", {\"command\": \"animation/info\"}, ) assert info == {\"animations\": {identity: mock.ANY,", "0 # stop await server.assertCommand( \"/v1/lifx/command\", {\"command\": \"animation/stop\", \"args\": {\"stop\":", "m.add(5) assert store.count(Events.INCOMING(tile, io, pkt=Set64)) > now # pause await", "it \"can get info and help\", server, m: await server.assertCommand(", "got[\"started\"] await m.add(5) assert store.count(Events.INCOMING(tile, io, pkt=Set64)) > now #", "identities, \"paused\": [identity], \"pausing\": [identity]}, ) # info info =", "options\" in got async it \"can control an animation\", server,", "json_output={\"animations\": identities, \"paused\": identities, \"pausing\": identities}, ) # resume await", "{\"command\": \"animation/pause\", \"args\": {\"pause\": identity}}, json_output={\"animations\": identities, \"paused\": [identity], \"pausing\":", "1 assert info[\"animations\"][identity][\"current_animation\"] == { \"name\": \"balls\", \"options\": { \"ball_colors\":", "load_commands from photons_app.mimic.event import Events from photons_app import helpers as", "identities, \"paused\": identities, \"pausing\": identities}, ) # resume await server.assertCommand(", "store.count(Events.INCOMING(tile, io, pkt=Set64)) == 0 # resume await server.assertCommand( \"/v1/lifx/command\",", "0 # info await server.assertCommand( \"/v1/lifx/command\", {\"command\": \"animation/info\"}, json_output={\"animations\": {},", "as hp from photons_canvas.points.simple_messages import Set64 from unittest import mock", "got[\"started\"] == identity2 info = await server.assertCommand( \"/v1/lifx/command\", {\"command\": \"animation/info\"},", "\"<ManyColor:[((0, 360), (1000.0, 1000.0), (1000.0, 1000.0), (3500.0, 3500.0))]>\", \"fade_amount\": 0.02,", "assert info[\"animations\"] == {identity: mock.ANY, identity2: mock.ANY} assert info[\"paused\"] ==", "{\"command\": \"animation/help\", \"args\": {\"animation_name\": \"dice\"}}, ) assert b\"dice animation\" in", "\"animation/resume\", \"args\": {\"resume\": identity2}}, json_output={ \"animations\": identities, \"paused\": [identity], \"resuming\":", "\"animation/info\"}, ) assert info == {\"animations\": {identity: mock.ANY, identity2: mock.ANY},", "\"resuming\": [identity2], }, ) # pause multiple await server.assertCommand( \"/v1/lifx/command\",", "\"options\": { \"ball_colors\": \"<ManyColor:[((0, 360), (1000.0, 1000.0), (1000.0, 1000.0), (3500.0,", "= await server.assertCommand( \"/v1/lifx/command\", {\"command\": \"animation/info\"}, ) assert info ==", "await m.add(5) assert store.count(Events.INCOMING(tile, io, pkt=Set64)) == 0 # info", "== {\"animations\": {identity: mock.ANY, identity2: mock.ANY}, \"paused\": []} # pause", "now identity = got[\"started\"] await m.add(5) assert store.count(Events.INCOMING(tile, io, pkt=Set64))", "360), (1000.0, 1000.0), (1000.0, 1000.0), (3500.0, 3500.0))]>\", \"fade_amount\": 0.02, \"num_balls\":", "= devices.store(tile) store.clear() first_set_64 = tile.attrs.event_waiter.wait_for_incoming(io, Set64) # start got", "@pytest.fixture() def server(make_server): return make_server[0] @pytest.fixture() def m(make_server): return make_server[1]", "devices.for_test(final_future) as sender: yield sender @pytest.fixture() async def make_server(store_clone, server_wrapper,", "b\"To see options for a particular animation, run this again\"", "with devices.for_test(final_future) as sender: yield sender @pytest.fixture() async def make_server(store_clone,", "== [] assert identity in info[\"animations\"] assert info[\"animations\"][identity][\"animations_ran\"] == 1", "with FakeTime() as t: async with MockedCallLater(t) as m: async", "await m.add(5) store.clear() await m.add(5) assert store.count(Events.INCOMING(tile, io, pkt=Set64)) ==", "{\"pause\": identity2}}, json_output={ \"animations\": identities, \"paused\": identities, \"pausing\": [identity2], },", "photons_canvas.points.simple_messages import Set64 from unittest import mock import pytest @pytest.fixture()", "sender, final_future): with FakeTime() as t: async with MockedCallLater(t) as", "\"stopping\": [identity], }, ) await m.add(0.5) # info info =", "[]}, ) got = await server.assertCommand( \"/v1/lifx/command\", {\"command\": \"animation/help\"}, )", "# info info = await server.assertCommand( \"/v1/lifx/command\", {\"command\": \"animation/info\"}, )", "again\" in got got = await server.assertCommand( \"/v1/lifx/command\", {\"command\": \"animation/help\",", "@pytest.fixture() def store_clone(): load_commands() return store.clone() @pytest.fixture() def final_future(): fut", "run this again\" in got got = await server.assertCommand( \"/v1/lifx/command\",", "\"started\": mock.ANY, } assert info[\"animations\"][identity][\"options\"][\"combined\"] assert \"unlocked\" in info[\"animations\"][identity][\"options\"][\"pauser\"] assert", "await first_set_64 now = store.count(Events.INCOMING(tile, io, pkt=Set64)) assert now >", ") async it \"can get information\", server, m: # start", "identity}}, json_output={\"animations\": identities, \"paused\": [identity], \"pausing\": [identity]}, ) await server.assertCommand(", "io, pkt=Set64)) == 0 # resume await server.assertCommand( \"/v1/lifx/command\", {\"command\":", "final_future): with FakeTime() as t: async with MockedCallLater(t) as m:", "[], \"resuming\": identities, }, ) # pause await server.assertCommand( \"/v1/lifx/command\",", "info = await server.assertCommand(\"/v1/lifx/command\", {\"command\": \"animation/info\"}) assert info[\"paused\"] == []", "= await server.assertCommand( \"/v1/lifx/command\", {\"command\": \"animation/start\", \"args\": {\"identity\": identity}}, )", "\"/v1/lifx/command\", {\"command\": \"animation/pause\", \"args\": {\"pause\": identities}}, json_output={\"animations\": identities, \"paused\": identities,", "t: async with MockedCallLater(t) as m: async with server_wrapper(store_clone, sender,", "= await server.assertCommand( \"/v1/lifx/command\", {\"command\": \"animation/start\", \"args\": {\"animations\": [[\"balls\", {\"every\":", "await server.assertCommand( \"/v1/lifx/command\", {\"command\": \"animation/info\"}, ) assert info == {\"animations\":", ") assert b\"dice animation\" in got assert b\"This animation has", "\"/v1/lifx/command\", {\"command\": \"animation/start\", \"args\": {\"identity\": identity2}}, ) assert \"animations\" in", "got assert got[\"animations\"] == [identity] assert got[\"started\"] == identity identity2", "# pause await server.assertCommand( \"/v1/lifx/command\", {\"command\": \"animation/pause\", \"args\": {\"pause\": [identity]}},", "got = await server.assertCommand( \"/v1/lifx/command\", {\"command\": \"animation/start\", \"args\": {\"identity\": identity}},", "json_output={ \"animations\": identities, \"paused\": [], \"resuming\": identities, }, ) #", "0 await m.add(5) now2 = store.count(Events.INCOMING(tile, io, pkt=Set64)) assert now2", "# info await server.assertCommand( \"/v1/lifx/command\", {\"command\": \"animation/info\"}, json_output={\"animations\": {}, \"paused\":", "\"/v1/lifx/command\", {\"command\": \"animation/info\"}, ) assert info[\"animations\"] == {identity2: mock.ANY} assert", "\"paused\": []}, ) identity = \"first\" got = await server.assertCommand(", "mock.ANY, } assert info[\"animations\"][identity][\"options\"][\"combined\"] assert \"unlocked\" in info[\"animations\"][identity][\"options\"][\"pauser\"] assert info[\"animations\"][identity][\"options\"][\"noisy_network\"]", "{\"every\": 3}]]}}, ) identity = got[\"started\"] await first_set_64 now =", "{\"stop\": identity}}, json_output={ \"animations\": [identity, identity2], \"paused\": [identity], \"stopping\": [identity],", "as sender: yield sender @pytest.fixture() async def make_server(store_clone, server_wrapper, FakeTime,", "make_server(store_clone, server_wrapper, FakeTime, MockedCallLater, sender, final_future): with FakeTime() as t:", "assert info[\"paused\"] == [] assert identity in info[\"animations\"] assert info[\"animations\"][identity][\"animations_ran\"]", "b\"dice animation\" in got assert b\"This animation has the following", "await server.assertCommand( \"/v1/lifx/command\", {\"command\": \"animation/pause\", \"args\": {\"pause\": identity}}, json_output={\"animations\": identities,", "server_wrapper(store_clone, sender, final_future) as server: yield server, m @pytest.fixture() def", "io = tile.io[\"MEMORY\"] store = devices.store(tile) store.clear() first_set_64 = tile.attrs.event_waiter.wait_for_incoming(io,", "sender: yield sender @pytest.fixture() async def make_server(store_clone, server_wrapper, FakeTime, MockedCallLater,", "assert got[\"started\"] == identity identity2 = \"second\" got = await", "identity identity2 = \"second\" got = await server.assertCommand( \"/v1/lifx/command\", {\"command\":", "server.assertCommand( \"/v1/lifx/command\", {\"command\": \"animation/start\", \"args\": {\"identity\": identity2}}, ) assert \"animations\"", "photons_app import helpers as hp from photons_canvas.points.simple_messages import Set64 from", "now # pause await server.assertCommand( \"/v1/lifx/command\", {\"command\": \"animation/pause\", \"args\": {\"pause\":", "tile = devices[\"tile\"] io = tile.io[\"MEMORY\"] store = devices.store(tile) store.clear()", "\"/v1/lifx/command\", {\"command\": \"animation/resume\", \"args\": {\"resume\": [identity]}}, ) await m.add(5) assert", "async with devices.for_test(final_future) as sender: yield sender @pytest.fixture() async def", "pkt=Set64)) == 0 # resume await server.assertCommand( \"/v1/lifx/command\", {\"command\": \"animation/resume\",", "assert store.count(Events.INCOMING(tile, io, pkt=Set64)) == 0 # resume await server.assertCommand(", "# coding: spec from interactor.commander.store import store, load_commands from photons_app.mimic.event", "\"animation/start\", \"args\": {\"identity\": identity2}}, ) assert \"animations\" in got identities", "actually pauses the animation\", devices, server, m: tile = devices[\"tile\"]", "\"paused\": []} # pause await server.assertCommand( \"/v1/lifx/command\", {\"command\": \"animation/pause\", \"args\":", "<gh_stars>10-100 # coding: spec from interactor.commander.store import store, load_commands from", "request.applymarker(pytest.mark.async_timeout(15)) describe \"Animation Commands\": async it \"can get info and", "{\"animations\": [[\"balls\", {\"every\": 0.3}]]}}, ) identity = got[\"started\"] info =", ") await m.add(0.5) # info info = await server.assertCommand( \"/v1/lifx/command\",", "identity in info[\"animations\"] assert info[\"animations\"][identity][\"animations_ran\"] == 1 assert info[\"animations\"][identity][\"current_animation\"] ==", "\"args\": {\"pause\": identity}}, json_output={\"animations\": identities, \"paused\": [identity], \"pausing\": [identity]}, )", "> 0 await m.add(5) now2 = store.count(Events.INCOMING(tile, io, pkt=Set64)) assert", "identity2}}, json_output={ \"animations\": identities, \"paused\": [identity], \"resuming\": [identity2], }, )", "store.count(Events.INCOMING(tile, io, pkt=Set64)) > now # pause await server.assertCommand( \"/v1/lifx/command\",", "await server.assertCommand( \"/v1/lifx/command\", {\"command\": \"animation/info\", \"args\": {\"identity\": identity}} ) info[\"animations\"][identity][\"current_animation\"][\"started\"]", "server.assertCommand( \"/v1/lifx/command\", {\"command\": \"animation/stop\", \"args\": {\"stop\": [identity]}}, ) store.clear() await", "now2 = store.count(Events.INCOMING(tile, io, pkt=Set64)) assert now2 > now identity", ") got = await server.assertCommand( \"/v1/lifx/command\", {\"command\": \"animation/help\"}, ) assert", "[identity], \"pausing\": [identity]}, ) # info info = await server.assertCommand(", "server.assertCommand( \"/v1/lifx/command\", {\"command\": \"animation/info\"}, json_output={\"animations\": {}, \"paused\": []}, ) got", "pauses the animation\", devices, server, m: tile = devices[\"tile\"] io", "\"/v1/lifx/command\", {\"command\": \"animation/start\", \"args\": {\"animations\": [[\"balls\", {\"every\": 3}]]}}, ) identity", ") # pause multiple await server.assertCommand( \"/v1/lifx/command\", {\"command\": \"animation/pause\", \"args\":", "start got = await server.assertCommand( \"/v1/lifx/command\", {\"command\": \"animation/start\", \"args\": {\"animations\":", "[] async it \"pausing an animation actually pauses the animation\",", "json_output={\"animations\": {}, \"paused\": []}, ) async it \"can get information\",", "load_commands() return store.clone() @pytest.fixture() def final_future(): fut = hp.create_future() try:", "identity = \"first\" got = await server.assertCommand( \"/v1/lifx/command\", {\"command\": \"animation/start\",", "animation has the following options:\" in got assert b\"colour range", "identity2: mock.ANY}, \"paused\": []} # pause await server.assertCommand( \"/v1/lifx/command\", {\"command\":", "[identity], \"pausing\": [identity]}, ) await server.assertCommand( \"/v1/lifx/command\", {\"command\": \"animation/pause\", \"args\":", "{identity2: mock.ANY} assert info[\"paused\"] == [] async it \"pausing an", "devices, server, m: tile = devices[\"tile\"] io = tile.io[\"MEMORY\"] store", "\"can control an animation\", server, m: await server.assertCommand( \"/v1/lifx/command\", {\"command\":", "[identity2], }, ) # resume await server.assertCommand( \"/v1/lifx/command\", {\"command\": \"animation/resume\",", "got = await server.assertCommand( \"/v1/lifx/command\", {\"command\": \"animation/start\", \"args\": {\"animations\": [[\"balls\",", "\"animation/info\"}, json_output={\"animations\": {}, \"paused\": []}, ) got = await server.assertCommand(", "got = await server.assertCommand( \"/v1/lifx/command\", {\"command\": \"animation/help\", \"args\": {\"animation_name\": \"dice\"}},", "== { \"name\": \"balls\", \"options\": { \"ball_colors\": \"<ManyColor:[((0, 360), (1000.0,", "{\"command\": \"animation/stop\", \"args\": {\"stop\": identity}}, json_output={ \"animations\": [identity, identity2], \"paused\":", "1000.0), (3500.0, 3500.0))]>\", \"fade_amount\": 0.02, \"num_balls\": 5, \"rate\": \"<Rate 0.9", "await server.assertCommand( \"/v1/lifx/command\", {\"command\": \"animation/pause\", \"args\": {\"pause\": [identity]}}, ) await", "\"animations\" in got assert got[\"animations\"] == [identity] assert got[\"started\"] ==", ") identity = \"first\" got = await server.assertCommand( \"/v1/lifx/command\", {\"command\":", "has the following options:\" in got assert b\"colour range options\"", "identities, }, ) # pause await server.assertCommand( \"/v1/lifx/command\", {\"command\": \"animation/pause\",", "\"animations\": [identity, identity2], \"paused\": [identity], \"stopping\": [identity], }, ) await", "got got = await server.assertCommand( \"/v1/lifx/command\", {\"command\": \"animation/help\", \"args\": {\"animation_name\":", "b\"This animation has the following options:\" in got assert b\"colour", "identities, \"paused\": identities, \"pausing\": [identity2], }, ) # resume await", "yield fut finally: fut.cancel() @pytest.fixture() async def sender(devices, final_future): async", "fut finally: fut.cancel() @pytest.fixture() async def sender(devices, final_future): async with", "info and help\", server, m: await server.assertCommand( \"/v1/lifx/command\", {\"command\": \"animation/info\"},", "}, ) # pause await server.assertCommand( \"/v1/lifx/command\", {\"command\": \"animation/pause\", \"args\":", "set_async_timeout(request): request.applymarker(pytest.mark.async_timeout(15)) describe \"Animation Commands\": async it \"can get info", "mock.ANY} assert info[\"paused\"] == [identity] # stop await server.assertCommand( \"/v1/lifx/command\",", "\"can get information\", server, m: # start got = await", "\"/v1/lifx/command\", {\"command\": \"animation/info\"}, json_output={\"animations\": {}, \"paused\": []}, ) got =", ") assert info == {\"animations\": {identity: mock.ANY, identity2: mock.ANY}, \"paused\":", "with server_wrapper(store_clone, sender, final_future) as server: yield server, m @pytest.fixture()", "async with MockedCallLater(t) as m: async with server_wrapper(store_clone, sender, final_future)", "it \"can control an animation\", server, m: await server.assertCommand( \"/v1/lifx/command\",", "\"dice\"}}, ) assert b\"dice animation\" in got assert b\"This animation", "== [identity] assert got[\"started\"] == identity identity2 = \"second\" got", "got[\"animations\"] == identities assert got[\"started\"] == identity2 info = await", "{\"command\": \"animation/start\", \"args\": {\"animations\": [[\"balls\", {\"every\": 3}]]}}, ) identity =", "[identity]}}, ) await m.add(5) assert store.count(Events.INCOMING(tile, io, pkt=Set64)) > 0", "0.9 -> 1>\", }, \"started\": mock.ANY, } assert info[\"animations\"][identity][\"options\"][\"combined\"] assert", "[identity]}, ) # info info = await server.assertCommand( \"/v1/lifx/command\", {\"command\":", "await m.add(0.5) # info info = await server.assertCommand( \"/v1/lifx/command\", {\"command\":", "{\"command\": \"animation/resume\", \"args\": {\"resume\": identity2}}, json_output={ \"animations\": identities, \"paused\": [identity],", "async with server_wrapper(store_clone, sender, final_future) as server: yield server, m", "\"paused\": [identity], \"pausing\": [identity]}, ) # info info = await", "\"args\": {\"pause\": identities}}, json_output={\"animations\": identities, \"paused\": identities, \"pausing\": identities}, )", "identity2 = \"second\" got = await server.assertCommand( \"/v1/lifx/command\", {\"command\": \"animation/start\",", "def server(make_server): return make_server[0] @pytest.fixture() def m(make_server): return make_server[1] @pytest.fixture(autouse=True)", "\"animation/stop\", \"args\": {\"stop\": identity}}, json_output={ \"animations\": [identity, identity2], \"paused\": [identity],", "assert info[\"animations\"][identity][\"animations_ran\"] == 1 assert info[\"animations\"][identity][\"current_animation\"] == { \"name\": \"balls\",", "assert now > 0 await m.add(5) now2 = store.count(Events.INCOMING(tile, io,", "assert info[\"paused\"] == [identity] # stop await server.assertCommand( \"/v1/lifx/command\", {\"command\":", "resume await server.assertCommand( \"/v1/lifx/command\", {\"command\": \"animation/resume\", \"args\": {\"resume\": [identity]}}, )", "\"name\": \"balls\", \"options\": { \"ball_colors\": \"<ManyColor:[((0, 360), (1000.0, 1000.0), (1000.0,", "await server.assertCommand( \"/v1/lifx/command\", {\"command\": \"animation/stop\", \"args\": {\"stop\": [identity]}}, ) store.clear()", "pause multiple await server.assertCommand( \"/v1/lifx/command\", {\"command\": \"animation/pause\", \"args\": {\"pause\": identities}},", "# pause await server.assertCommand( \"/v1/lifx/command\", {\"command\": \"animation/pause\", \"args\": {\"pause\": identity}},", "from interactor.commander.store import store, load_commands from photons_app.mimic.event import Events from", "info[\"animations\"][identity][\"current_animation\"] == { \"name\": \"balls\", \"options\": { \"ball_colors\": \"<ManyColor:[((0, 360),", "with MockedCallLater(t) as m: async with server_wrapper(store_clone, sender, final_future) as", "== 1 assert info[\"animations\"][identity][\"current_animation\"] == { \"name\": \"balls\", \"options\": {", "assert b\"To see options for a particular animation, run this", "= await server.assertCommand( \"/v1/lifx/command\", {\"command\": \"animation/start\", \"args\": {\"identity\": identity2}}, )", "{\"command\": \"animation/info\"}, ) assert info == {\"animations\": {identity: mock.ANY, identity2:", ") await server.assertCommand( \"/v1/lifx/command\", {\"command\": \"animation/pause\", \"args\": {\"pause\": identity2}}, json_output={", "\"resuming\": identities, }, ) # pause await server.assertCommand( \"/v1/lifx/command\", {\"command\":", "m.add(5) assert store.count(Events.INCOMING(tile, io, pkt=Set64)) == 0 # resume await", "# resume await server.assertCommand( \"/v1/lifx/command\", {\"command\": \"animation/resume\", \"args\": {\"resume\": identities}},", "\"args\": {\"identity\": identity2}}, ) assert \"animations\" in got identities =", "await server.assertCommand( \"/v1/lifx/command\", {\"command\": \"animation/help\"}, ) assert b\"Available animations include\"", "== 0 specific = await server.assertCommand( \"/v1/lifx/command\", {\"command\": \"animation/info\", \"args\":", "animation, run this again\" in got got = await server.assertCommand(", "io, pkt=Set64)) assert now2 > now identity = got[\"started\"] await", "\"paused\": []}, ) got = await server.assertCommand( \"/v1/lifx/command\", {\"command\": \"animation/help\"},", "store = devices.store(tile) store.clear() first_set_64 = tile.attrs.event_waiter.wait_for_incoming(io, Set64) # start", "\"animations\": identities, \"paused\": [], \"resuming\": identities, }, ) # pause", "\"balls\", \"options\": { \"ball_colors\": \"<ManyColor:[((0, 360), (1000.0, 1000.0), (1000.0, 1000.0),", "mock.ANY, identity2: mock.ANY} assert info[\"paused\"] == [identity] # stop await", "\"args\": {\"resume\": identities}}, json_output={ \"animations\": identities, \"paused\": [], \"resuming\": identities,", "# stop await server.assertCommand( \"/v1/lifx/command\", {\"command\": \"animation/stop\", \"args\": {\"stop\": [identity]}},", "in got assert b\"To see options for a particular animation,", "}, ) # pause multiple await server.assertCommand( \"/v1/lifx/command\", {\"command\": \"animation/pause\",", "[identity] assert got[\"started\"] == identity identity2 = \"second\" got =", "in got assert b\"colour range options\" in got async it", "{\"resume\": [identity]}}, ) await m.add(5) assert store.count(Events.INCOMING(tile, io, pkt=Set64)) >", "= await server.assertCommand( \"/v1/lifx/command\", {\"command\": \"animation/info\", \"args\": {\"identity\": identity}} )", "\"animation/info\"}, ) assert info[\"animations\"] == {identity2: mock.ANY} assert info[\"paused\"] ==", "[]} # pause await server.assertCommand( \"/v1/lifx/command\", {\"command\": \"animation/pause\", \"args\": {\"pause\":", "[[\"balls\", {\"every\": 0.3}]]}}, ) identity = got[\"started\"] info = await", "= \"first\" got = await server.assertCommand( \"/v1/lifx/command\", {\"command\": \"animation/start\", \"args\":", "as t: async with MockedCallLater(t) as m: async with server_wrapper(store_clone,", "server.assertCommand( \"/v1/lifx/command\", {\"command\": \"animation/help\"}, ) assert b\"Available animations include\" in", "{\"every\": 0.3}]]}}, ) identity = got[\"started\"] info = await server.assertCommand(\"/v1/lifx/command\",", "got[\"started\"] await first_set_64 now = store.count(Events.INCOMING(tile, io, pkt=Set64)) assert now", "json_output={\"animations\": identities, \"paused\": [identity], \"pausing\": [identity]}, ) # info info", "sender @pytest.fixture() async def make_server(store_clone, server_wrapper, FakeTime, MockedCallLater, sender, final_future):", "sender(devices, final_future): async with devices.for_test(final_future) as sender: yield sender @pytest.fixture()", "json_output={ \"animations\": identities, \"paused\": identities, \"pausing\": [identity2], }, ) #", "assert store.count(Events.INCOMING(tile, io, pkt=Set64)) > now # pause await server.assertCommand(", "identities assert got[\"started\"] == identity2 info = await server.assertCommand( \"/v1/lifx/command\",", "await server.assertCommand( \"/v1/lifx/command\", {\"command\": \"animation/resume\", \"args\": {\"resume\": [identity]}}, ) await", "stop await server.assertCommand( \"/v1/lifx/command\", {\"command\": \"animation/stop\", \"args\": {\"stop\": [identity]}}, )", "@pytest.fixture() def m(make_server): return make_server[1] @pytest.fixture(autouse=True) def set_async_timeout(request): request.applymarker(pytest.mark.async_timeout(15)) describe", "(1000.0, 1000.0), (3500.0, 3500.0))]>\", \"fade_amount\": 0.02, \"num_balls\": 5, \"rate\": \"<Rate", "@pytest.fixture() async def sender(devices, final_future): async with devices.for_test(final_future) as sender:", "\"/v1/lifx/command\", {\"command\": \"animation/info\"}, ) assert info[\"animations\"] == {identity: mock.ANY, identity2:", "import helpers as hp from photons_canvas.points.simple_messages import Set64 from unittest", "info info = await server.assertCommand( \"/v1/lifx/command\", {\"command\": \"animation/info\"}, ) assert", "\"first\" got = await server.assertCommand( \"/v1/lifx/command\", {\"command\": \"animation/start\", \"args\": {\"identity\":", "def make_server(store_clone, server_wrapper, FakeTime, MockedCallLater, sender, final_future): with FakeTime() as", "got[\"started\"] == identity identity2 = \"second\" got = await server.assertCommand(", "try: yield fut finally: fut.cancel() @pytest.fixture() async def sender(devices, final_future):", "1>\", }, \"started\": mock.ANY, } assert info[\"animations\"][identity][\"options\"][\"combined\"] assert \"unlocked\" in", "identities}}, json_output={\"animations\": identities, \"paused\": identities, \"pausing\": identities}, ) # resume", "{\"command\": \"animation/info\"}, json_output={\"animations\": {}, \"paused\": []}, ) async it \"can", "\"args\": {\"stop\": [identity]}}, ) store.clear() await m.add(5) store.clear() await m.add(5)", "dice\" in got assert b\"To see options for a particular", "options:\" in got assert b\"colour range options\" in got async", "assert \"animations\" in got identities = [identity, identity2] assert got[\"animations\"]", "await server.assertCommand( \"/v1/lifx/command\", {\"command\": \"animation/stop\", \"args\": {\"stop\": identity}}, json_output={ \"animations\":", "\"paused\": []}, ) async it \"can get information\", server, m:", "{\"command\": \"animation/help\"}, ) assert b\"Available animations include\" in got assert", "\"/v1/lifx/command\", {\"command\": \"animation/pause\", \"args\": {\"pause\": [identity]}}, ) await m.add(5) store.clear()", "hp from photons_canvas.points.simple_messages import Set64 from unittest import mock import", "Commands\": async it \"can get info and help\", server, m:", "multiple await server.assertCommand( \"/v1/lifx/command\", {\"command\": \"animation/pause\", \"args\": {\"pause\": identities}}, json_output={\"animations\":", "identity}}, ) assert \"animations\" in got assert got[\"animations\"] == [identity]", "\"animation/pause\", \"args\": {\"pause\": identity2}}, json_output={ \"animations\": identities, \"paused\": identities, \"pausing\":", "{\"command\": \"animation/start\", \"args\": {\"identity\": identity}}, ) assert \"animations\" in got", "identities}, ) # resume await server.assertCommand( \"/v1/lifx/command\", {\"command\": \"animation/resume\", \"args\":", "got = await server.assertCommand( \"/v1/lifx/command\", {\"command\": \"animation/start\", \"args\": {\"identity\": identity2}},", "mock.ANY} assert info[\"paused\"] == [] async it \"pausing an animation", "\"pausing\": [identity]}, ) await server.assertCommand( \"/v1/lifx/command\", {\"command\": \"animation/pause\", \"args\": {\"pause\":", "= got[\"started\"] await first_set_64 now = store.count(Events.INCOMING(tile, io, pkt=Set64)) assert", "async def make_server(store_clone, server_wrapper, FakeTime, MockedCallLater, sender, final_future): with FakeTime()", "from photons_app.mimic.event import Events from photons_app import helpers as hp", "got async it \"can control an animation\", server, m: await", "m: tile = devices[\"tile\"] io = tile.io[\"MEMORY\"] store = devices.store(tile)", "{\"command\": \"animation/start\", \"args\": {\"identity\": identity2}}, ) assert \"animations\" in got", "identities, \"pausing\": identities}, ) # resume await server.assertCommand( \"/v1/lifx/command\", {\"command\":", "store.clear() await m.add(5) assert store.count(Events.INCOMING(tile, io, pkt=Set64)) == 0 #", "0.3}]]}}, ) identity = got[\"started\"] info = await server.assertCommand(\"/v1/lifx/command\", {\"command\":", "server, m: await server.assertCommand( \"/v1/lifx/command\", {\"command\": \"animation/info\"}, json_output={\"animations\": {}, \"paused\":", "= got[\"started\"] info = await server.assertCommand(\"/v1/lifx/command\", {\"command\": \"animation/info\"}) assert info[\"paused\"]", "describe \"Animation Commands\": async it \"can get info and help\",", "server.assertCommand( \"/v1/lifx/command\", {\"command\": \"animation/info\", \"args\": {\"identity\": identity}} ) info[\"animations\"][identity][\"current_animation\"][\"started\"] =", "return make_server[0] @pytest.fixture() def m(make_server): return make_server[1] @pytest.fixture(autouse=True) def set_async_timeout(request):", "{\"command\": \"animation/info\"}, ) assert info[\"animations\"] == {identity: mock.ANY, identity2: mock.ANY}", "{\"command\": \"animation/pause\", \"args\": {\"pause\": [identity]}}, ) await m.add(5) store.clear() await", "= tile.io[\"MEMORY\"] store = devices.store(tile) store.clear() first_set_64 = tile.attrs.event_waiter.wait_for_incoming(io, Set64)", "assert b\"* dice\" in got assert b\"To see options for", ") assert \"animations\" in got assert got[\"animations\"] == [identity] assert", "a particular animation, run this again\" in got got =", "import mock import pytest @pytest.fixture() def store_clone(): load_commands() return store.clone()", "{\"animation_name\": \"dice\"}}, ) assert b\"dice animation\" in got assert b\"This", "\"animation/info\"}, json_output={\"animations\": {}, \"paused\": []}, ) async it \"can get", "== {identity2: mock.ANY} assert info[\"paused\"] == [] async it \"pausing", "server.assertCommand( \"/v1/lifx/command\", {\"command\": \"animation/pause\", \"args\": {\"pause\": identity}}, json_output={\"animations\": identities, \"paused\":", "[identity], }, ) await m.add(0.5) # info info = await", "io, pkt=Set64)) assert now > 0 await m.add(5) now2 =", "\"paused\": identities, \"pausing\": [identity2], }, ) # resume await server.assertCommand(", ") assert \"animations\" in got identities = [identity, identity2] assert", "def final_future(): fut = hp.create_future() try: yield fut finally: fut.cancel()", "identity2: mock.ANY} assert info[\"paused\"] == [identity] # stop await server.assertCommand(", "options for a particular animation, run this again\" in got", "fut.cancel() @pytest.fixture() async def sender(devices, final_future): async with devices.for_test(final_future) as", "@pytest.fixture() async def make_server(store_clone, server_wrapper, FakeTime, MockedCallLater, sender, final_future): with", "assert got[\"animations\"] == identities assert got[\"started\"] == identity2 info =", "0 # resume await server.assertCommand( \"/v1/lifx/command\", {\"command\": \"animation/resume\", \"args\": {\"resume\":", "\"/v1/lifx/command\", {\"command\": \"animation/info\"}, json_output={\"animations\": {}, \"paused\": []}, ) async it", "\"paused\": [identity], \"pausing\": [identity]}, ) await server.assertCommand( \"/v1/lifx/command\", {\"command\": \"animation/pause\",", "server.assertCommand( \"/v1/lifx/command\", {\"command\": \"animation/help\", \"args\": {\"animation_name\": \"dice\"}}, ) assert b\"dice", "in got got = await server.assertCommand( \"/v1/lifx/command\", {\"command\": \"animation/help\", \"args\":", "{\"command\": \"animation/info\"}, ) assert info[\"animations\"] == {identity2: mock.ANY} assert info[\"paused\"]", ") store.clear() await m.add(5) store.clear() await m.add(5) assert store.count(Events.INCOMING(tile, io,", "server.assertCommand( \"/v1/lifx/command\", {\"command\": \"animation/start\", \"args\": {\"animations\": [[\"balls\", {\"every\": 3}]]}}, )", "information\", server, m: # start got = await server.assertCommand( \"/v1/lifx/command\",", "def store_clone(): load_commands() return store.clone() @pytest.fixture() def final_future(): fut =", "got = await server.assertCommand( \"/v1/lifx/command\", {\"command\": \"animation/help\"}, ) assert b\"Available", "pkt=Set64)) assert now > 0 await m.add(5) now2 = store.count(Events.INCOMING(tile,", "await server.assertCommand( \"/v1/lifx/command\", {\"command\": \"animation/start\", \"args\": {\"animations\": [[\"balls\", {\"every\": 0.3}]]}},", "as m: async with server_wrapper(store_clone, sender, final_future) as server: yield", "this again\" in got got = await server.assertCommand( \"/v1/lifx/command\", {\"command\":", "identity2 info = await server.assertCommand( \"/v1/lifx/command\", {\"command\": \"animation/info\"}, ) assert", "import Events from photons_app import helpers as hp from photons_canvas.points.simple_messages", "following options:\" in got assert b\"colour range options\" in got", "identities, \"paused\": [], \"resuming\": identities, }, ) # pause await", "an animation\", server, m: await server.assertCommand( \"/v1/lifx/command\", {\"command\": \"animation/info\"}, json_output={\"animations\":", ") # info info = await server.assertCommand( \"/v1/lifx/command\", {\"command\": \"animation/info\"},", "and help\", server, m: await server.assertCommand( \"/v1/lifx/command\", {\"command\": \"animation/info\"}, json_output={\"animations\":", "await server.assertCommand( \"/v1/lifx/command\", {\"command\": \"animation/pause\", \"args\": {\"pause\": identities}}, json_output={\"animations\": identities,", "mock import pytest @pytest.fixture() def store_clone(): load_commands() return store.clone() @pytest.fixture()", "mock.ANY}, \"paused\": []} # pause await server.assertCommand( \"/v1/lifx/command\", {\"command\": \"animation/pause\",", "sender, final_future) as server: yield server, m @pytest.fixture() def server(make_server):", "got[\"started\"] info = await server.assertCommand(\"/v1/lifx/command\", {\"command\": \"animation/info\"}) assert info[\"paused\"] ==", "pause await server.assertCommand( \"/v1/lifx/command\", {\"command\": \"animation/pause\", \"args\": {\"pause\": identity}}, json_output={\"animations\":", "\"/v1/lifx/command\", {\"command\": \"animation/resume\", \"args\": {\"resume\": identities}}, json_output={ \"animations\": identities, \"paused\":", "\"args\": {\"animation_name\": \"dice\"}}, ) assert b\"dice animation\" in got assert", "final_future) as server: yield server, m @pytest.fixture() def server(make_server): return", "info[\"animations\"][identity][\"animations_ran\"] == 1 assert info[\"animations\"][identity][\"current_animation\"] == { \"name\": \"balls\", \"options\":", "include\" in got assert b\"* dice\" in got assert b\"To", "# stop await server.assertCommand( \"/v1/lifx/command\", {\"command\": \"animation/stop\", \"args\": {\"stop\": identity}},", "\"can get info and help\", server, m: await server.assertCommand( \"/v1/lifx/command\",", ") assert info[\"animations\"] == {identity2: mock.ANY} assert info[\"paused\"] == []", "\"pausing\": identities}, ) # resume await server.assertCommand( \"/v1/lifx/command\", {\"command\": \"animation/resume\",", "{\"command\": \"animation/info\", \"args\": {\"identity\": identity}} ) info[\"animations\"][identity][\"current_animation\"][\"started\"] = mock.ANY assert", "store.clear() first_set_64 = tile.attrs.event_waiter.wait_for_incoming(io, Set64) # start got = await", "mock.ANY, identity2: mock.ANY}, \"paused\": []} # pause await server.assertCommand( \"/v1/lifx/command\",", ") identity = got[\"started\"] info = await server.assertCommand(\"/v1/lifx/command\", {\"command\": \"animation/info\"})", "info await server.assertCommand( \"/v1/lifx/command\", {\"command\": \"animation/info\"}, json_output={\"animations\": {}, \"paused\": []},", "3500.0))]>\", \"fade_amount\": 0.02, \"num_balls\": 5, \"rate\": \"<Rate 0.9 -> 1>\",", "assert identity in info[\"animations\"] assert info[\"animations\"][identity][\"animations_ran\"] == 1 assert info[\"animations\"][identity][\"current_animation\"]", "1000.0), (1000.0, 1000.0), (3500.0, 3500.0))]>\", \"fade_amount\": 0.02, \"num_balls\": 5, \"rate\":", "assert now2 > now identity = got[\"started\"] await m.add(5) assert", "= store.count(Events.INCOMING(tile, io, pkt=Set64)) assert now > 0 await m.add(5)", "= \"second\" got = await server.assertCommand( \"/v1/lifx/command\", {\"command\": \"animation/start\", \"args\":", "server, m: tile = devices[\"tile\"] io = tile.io[\"MEMORY\"] store =", "MockedCallLater(t) as m: async with server_wrapper(store_clone, sender, final_future) as server:", "server.assertCommand( \"/v1/lifx/command\", {\"command\": \"animation/pause\", \"args\": {\"pause\": identity2}}, json_output={ \"animations\": identities,", "\"/v1/lifx/command\", {\"command\": \"animation/pause\", \"args\": {\"pause\": identity}}, json_output={\"animations\": identities, \"paused\": [identity],", "{\"command\": \"animation/stop\", \"args\": {\"stop\": [identity]}}, ) store.clear() await m.add(5) store.clear()", "coding: spec from interactor.commander.store import store, load_commands from photons_app.mimic.event import", "now2 > now identity = got[\"started\"] await m.add(5) assert store.count(Events.INCOMING(tile,", "\"rate\": \"<Rate 0.9 -> 1>\", }, \"started\": mock.ANY, } assert", "\"/v1/lifx/command\", {\"command\": \"animation/info\", \"args\": {\"identity\": identity}} ) info[\"animations\"][identity][\"current_animation\"][\"started\"] = mock.ANY", "m @pytest.fixture() def server(make_server): return make_server[0] @pytest.fixture() def m(make_server): return", "final_future): async with devices.for_test(final_future) as sender: yield sender @pytest.fixture() async", "in info[\"animations\"][identity][\"options\"][\"pauser\"] assert info[\"animations\"][identity][\"options\"][\"noisy_network\"] == 0 specific = await server.assertCommand(", "[] assert identity in info[\"animations\"] assert info[\"animations\"][identity][\"animations_ran\"] == 1 assert", "await server.assertCommand( \"/v1/lifx/command\", {\"command\": \"animation/start\", \"args\": {\"identity\": identity2}}, ) assert", "identity2}}, ) assert \"animations\" in got identities = [identity, identity2]", "\"animation/resume\", \"args\": {\"resume\": [identity]}}, ) await m.add(5) assert store.count(Events.INCOMING(tile, io,", "server.assertCommand(\"/v1/lifx/command\", {\"command\": \"animation/info\"}) assert info[\"paused\"] == [] assert identity in", "= await server.assertCommand( \"/v1/lifx/command\", {\"command\": \"animation/help\", \"args\": {\"animation_name\": \"dice\"}}, )", "\"/v1/lifx/command\", {\"command\": \"animation/start\", \"args\": {\"identity\": identity}}, ) assert \"animations\" in", "\"ball_colors\": \"<ManyColor:[((0, 360), (1000.0, 1000.0), (1000.0, 1000.0), (3500.0, 3500.0))]>\", \"fade_amount\":", "identity2], \"paused\": [identity], \"stopping\": [identity], }, ) await m.add(0.5) #", "tile.io[\"MEMORY\"] store = devices.store(tile) store.clear() first_set_64 = tile.attrs.event_waiter.wait_for_incoming(io, Set64) #", "{\"animations\": [[\"balls\", {\"every\": 3}]]}}, ) identity = got[\"started\"] await first_set_64", "store.count(Events.INCOMING(tile, io, pkt=Set64)) assert now > 0 await m.add(5) now2", "{\"command\": \"animation/resume\", \"args\": {\"resume\": [identity]}}, ) await m.add(5) assert store.count(Events.INCOMING(tile,", "pkt=Set64)) > now # pause await server.assertCommand( \"/v1/lifx/command\", {\"command\": \"animation/pause\",", "from unittest import mock import pytest @pytest.fixture() def store_clone(): load_commands()", "hp.create_future() try: yield fut finally: fut.cancel() @pytest.fixture() async def sender(devices,", "m: # start got = await server.assertCommand( \"/v1/lifx/command\", {\"command\": \"animation/start\",", "get info and help\", server, m: await server.assertCommand( \"/v1/lifx/command\", {\"command\":", "server.assertCommand( \"/v1/lifx/command\", {\"command\": \"animation/resume\", \"args\": {\"resume\": [identity]}}, ) await m.add(5)", "\"/v1/lifx/command\", {\"command\": \"animation/resume\", \"args\": {\"resume\": identity2}}, json_output={ \"animations\": identities, \"paused\":", "identity2}}, json_output={ \"animations\": identities, \"paused\": identities, \"pausing\": [identity2], }, )", "\"animation/help\", \"args\": {\"animation_name\": \"dice\"}}, ) assert b\"dice animation\" in got", "store_clone(): load_commands() return store.clone() @pytest.fixture() def final_future(): fut = hp.create_future()", "\"args\": {\"pause\": identity2}}, json_output={ \"animations\": identities, \"paused\": identities, \"pausing\": [identity2],", "info[\"animations\"] == {identity2: mock.ANY} assert info[\"paused\"] == [] async it", "\"/v1/lifx/command\", {\"command\": \"animation/stop\", \"args\": {\"stop\": identity}}, json_output={ \"animations\": [identity, identity2],", "in got assert b\"This animation has the following options:\" in", "[identity2], }, ) # pause multiple await server.assertCommand( \"/v1/lifx/command\", {\"command\":", "animation\" in got assert b\"This animation has the following options:\"", "\"paused\": [identity], \"stopping\": [identity], }, ) await m.add(0.5) # info", "store, load_commands from photons_app.mimic.event import Events from photons_app import helpers", "io, pkt=Set64)) > now # pause await server.assertCommand( \"/v1/lifx/command\", {\"command\":", "interactor.commander.store import store, load_commands from photons_app.mimic.event import Events from photons_app", "== {identity: mock.ANY, identity2: mock.ANY} assert info[\"paused\"] == [identity] #", "await server.assertCommand( \"/v1/lifx/command\", {\"command\": \"animation/pause\", \"args\": {\"pause\": identity2}}, json_output={ \"animations\":", "identity}}, json_output={\"animations\": identities, \"paused\": [identity], \"pausing\": [identity]}, ) # info", "await server.assertCommand( \"/v1/lifx/command\", {\"command\": \"animation/info\"}, json_output={\"animations\": {}, \"paused\": []}, )", "{\"command\": \"animation/info\"}, json_output={\"animations\": {}, \"paused\": []}, ) identity = \"first\"", "specific = await server.assertCommand( \"/v1/lifx/command\", {\"command\": \"animation/info\", \"args\": {\"identity\": identity}}", "\"paused\": [], \"resuming\": identities, }, ) # pause await server.assertCommand(", "\"pausing\": [identity]}, ) # info info = await server.assertCommand( \"/v1/lifx/command\",", "{\"pause\": identity}}, json_output={\"animations\": identities, \"paused\": [identity], \"pausing\": [identity]}, ) #", "info = await server.assertCommand( \"/v1/lifx/command\", {\"command\": \"animation/info\"}, ) assert info", ") assert b\"Available animations include\" in got assert b\"* dice\"", "helpers as hp from photons_canvas.points.simple_messages import Set64 from unittest import", "server.assertCommand( \"/v1/lifx/command\", {\"command\": \"animation/resume\", \"args\": {\"resume\": identities}}, json_output={ \"animations\": identities,", "\"second\" got = await server.assertCommand( \"/v1/lifx/command\", {\"command\": \"animation/start\", \"args\": {\"identity\":", "-> 1>\", }, \"started\": mock.ANY, } assert info[\"animations\"][identity][\"options\"][\"combined\"] assert \"unlocked\"", "m: await server.assertCommand( \"/v1/lifx/command\", {\"command\": \"animation/info\"}, json_output={\"animations\": {}, \"paused\": []},", "identity = got[\"started\"] info = await server.assertCommand(\"/v1/lifx/command\", {\"command\": \"animation/info\"}) assert", "}, ) await m.add(0.5) # info info = await server.assertCommand(", "MockedCallLater, sender, final_future): with FakeTime() as t: async with MockedCallLater(t)", "control an animation\", server, m: await server.assertCommand( \"/v1/lifx/command\", {\"command\": \"animation/info\"},", "== [identity] # stop await server.assertCommand( \"/v1/lifx/command\", {\"command\": \"animation/stop\", \"args\":", "await server.assertCommand( \"/v1/lifx/command\", {\"command\": \"animation/info\"}, ) assert info[\"animations\"] == {identity:", "info[\"paused\"] == [] assert identity in info[\"animations\"] assert info[\"animations\"][identity][\"animations_ran\"] ==", "got assert b\"* dice\" in got assert b\"To see options", "{\"stop\": [identity]}}, ) store.clear() await m.add(5) store.clear() await m.add(5) assert", "identities, \"paused\": [identity], \"resuming\": [identity2], }, ) # pause multiple", "server.assertCommand( \"/v1/lifx/command\", {\"command\": \"animation/info\"}, ) assert info == {\"animations\": {identity:", "async it \"can get info and help\", server, m: await", "\"animation/help\"}, ) assert b\"Available animations include\" in got assert b\"*", "make_server[0] @pytest.fixture() def m(make_server): return make_server[1] @pytest.fixture(autouse=True) def set_async_timeout(request): request.applymarker(pytest.mark.async_timeout(15))", "= await server.assertCommand( \"/v1/lifx/command\", {\"command\": \"animation/help\"}, ) assert b\"Available animations", "info[\"animations\"] assert info[\"animations\"][identity][\"animations_ran\"] == 1 assert info[\"animations\"][identity][\"current_animation\"] == { \"name\":", "yield sender @pytest.fixture() async def make_server(store_clone, server_wrapper, FakeTime, MockedCallLater, sender,", "server.assertCommand( \"/v1/lifx/command\", {\"command\": \"animation/pause\", \"args\": {\"pause\": identities}}, json_output={\"animations\": identities, \"paused\":", "pkt=Set64)) assert now2 > now identity = got[\"started\"] await m.add(5)", "identity2] assert got[\"animations\"] == identities assert got[\"started\"] == identity2 info", "\"/v1/lifx/command\", {\"command\": \"animation/stop\", \"args\": {\"stop\": [identity]}}, ) store.clear() await m.add(5)", "[identity]}, ) await server.assertCommand( \"/v1/lifx/command\", {\"command\": \"animation/pause\", \"args\": {\"pause\": identity2}},", "in got assert b\"* dice\" in got assert b\"To see", ") # pause await server.assertCommand( \"/v1/lifx/command\", {\"command\": \"animation/pause\", \"args\": {\"pause\":", "\"args\": {\"resume\": [identity]}}, ) await m.add(5) assert store.count(Events.INCOMING(tile, io, pkt=Set64))", "first_set_64 now = store.count(Events.INCOMING(tile, io, pkt=Set64)) assert now > 0", "{\"identity\": identity}}, ) assert \"animations\" in got assert got[\"animations\"] ==", "assert got[\"started\"] == identity2 info = await server.assertCommand( \"/v1/lifx/command\", {\"command\":", "\"/v1/lifx/command\", {\"command\": \"animation/start\", \"args\": {\"animations\": [[\"balls\", {\"every\": 0.3}]]}}, ) identity", "stop await server.assertCommand( \"/v1/lifx/command\", {\"command\": \"animation/stop\", \"args\": {\"stop\": identity}}, json_output={", "identity = got[\"started\"] await m.add(5) assert store.count(Events.INCOMING(tile, io, pkt=Set64)) >", "\"animation/pause\", \"args\": {\"pause\": identity}}, json_output={\"animations\": identities, \"paused\": [identity], \"pausing\": [identity]},", "def set_async_timeout(request): request.applymarker(pytest.mark.async_timeout(15)) describe \"Animation Commands\": async it \"can get", "\"/v1/lifx/command\", {\"command\": \"animation/help\", \"args\": {\"animation_name\": \"dice\"}}, ) assert b\"dice animation\"", "server.assertCommand( \"/v1/lifx/command\", {\"command\": \"animation/start\", \"args\": {\"identity\": identity}}, ) assert \"animations\"", "== identity2 info = await server.assertCommand( \"/v1/lifx/command\", {\"command\": \"animation/info\"}, )", "assert info[\"animations\"][identity][\"options\"][\"combined\"] assert \"unlocked\" in info[\"animations\"][identity][\"options\"][\"pauser\"] assert info[\"animations\"][identity][\"options\"][\"noisy_network\"] == 0", "0.02, \"num_balls\": 5, \"rate\": \"<Rate 0.9 -> 1>\", }, \"started\":", "import pytest @pytest.fixture() def store_clone(): load_commands() return store.clone() @pytest.fixture() def", "\"animation/pause\", \"args\": {\"pause\": [identity]}}, ) await m.add(5) store.clear() await m.add(5)", "yield server, m @pytest.fixture() def server(make_server): return make_server[0] @pytest.fixture() def", "info[\"animations\"][identity][\"options\"][\"combined\"] assert \"unlocked\" in info[\"animations\"][identity][\"options\"][\"pauser\"] assert info[\"animations\"][identity][\"options\"][\"noisy_network\"] == 0 specific", "import Set64 from unittest import mock import pytest @pytest.fixture() def", "\"/v1/lifx/command\", {\"command\": \"animation/info\"}, json_output={\"animations\": {}, \"paused\": []}, ) identity =", "\"num_balls\": 5, \"rate\": \"<Rate 0.9 -> 1>\", }, \"started\": mock.ANY,", "from photons_app import helpers as hp from photons_canvas.points.simple_messages import Set64", "{\"resume\": identity2}}, json_output={ \"animations\": identities, \"paused\": [identity], \"resuming\": [identity2], },", "# pause multiple await server.assertCommand( \"/v1/lifx/command\", {\"command\": \"animation/pause\", \"args\": {\"pause\":", "[]}, ) async it \"can get information\", server, m: #", "\"animation/resume\", \"args\": {\"resume\": identities}}, json_output={ \"animations\": identities, \"paused\": [], \"resuming\":", "m(make_server): return make_server[1] @pytest.fixture(autouse=True) def set_async_timeout(request): request.applymarker(pytest.mark.async_timeout(15)) describe \"Animation Commands\":", "b\"Available animations include\" in got assert b\"* dice\" in got", "await m.add(5) assert store.count(Events.INCOMING(tile, io, pkt=Set64)) > now # pause", "assert info[\"animations\"] == {identity2: mock.ANY} assert info[\"paused\"] == [] async", "server, m @pytest.fixture() def server(make_server): return make_server[0] @pytest.fixture() def m(make_server):", "tile.attrs.event_waiter.wait_for_incoming(io, Set64) # start got = await server.assertCommand( \"/v1/lifx/command\", {\"command\":", "\"args\": {\"identity\": identity}} ) info[\"animations\"][identity][\"current_animation\"][\"started\"] = mock.ANY assert info[\"animations\"][identity] ==", "finally: fut.cancel() @pytest.fixture() async def sender(devices, final_future): async with devices.for_test(final_future)", "resume await server.assertCommand( \"/v1/lifx/command\", {\"command\": \"animation/resume\", \"args\": {\"resume\": identities}}, json_output={", "> now # pause await server.assertCommand( \"/v1/lifx/command\", {\"command\": \"animation/pause\", \"args\":", "identity = got[\"started\"] await first_set_64 now = store.count(Events.INCOMING(tile, io, pkt=Set64))", "m.add(5) now2 = store.count(Events.INCOMING(tile, io, pkt=Set64)) assert now2 > now", "b\"* dice\" in got assert b\"To see options for a", "{\"resume\": identities}}, json_output={ \"animations\": identities, \"paused\": [], \"resuming\": identities, },", "== [] async it \"pausing an animation actually pauses the", "store.count(Events.INCOMING(tile, io, pkt=Set64)) == 0 # info await server.assertCommand( \"/v1/lifx/command\",", "await server.assertCommand( \"/v1/lifx/command\", {\"command\": \"animation/start\", \"args\": {\"animations\": [[\"balls\", {\"every\": 3}]]}},", "= got[\"started\"] await m.add(5) assert store.count(Events.INCOMING(tile, io, pkt=Set64)) > now", "server: yield server, m @pytest.fixture() def server(make_server): return make_server[0] @pytest.fixture()", "server.assertCommand( \"/v1/lifx/command\", {\"command\": \"animation/pause\", \"args\": {\"pause\": [identity]}}, ) await m.add(5)", "3}]]}}, ) identity = got[\"started\"] await first_set_64 now = store.count(Events.INCOMING(tile,", "\"args\": {\"stop\": identity}}, json_output={ \"animations\": [identity, identity2], \"paused\": [identity], \"stopping\":", "\"unlocked\" in info[\"animations\"][identity][\"options\"][\"pauser\"] assert info[\"animations\"][identity][\"options\"][\"noisy_network\"] == 0 specific = await", "m.add(5) assert store.count(Events.INCOMING(tile, io, pkt=Set64)) > 0 # stop await", "assert store.count(Events.INCOMING(tile, io, pkt=Set64)) > 0 # stop await server.assertCommand(", "got assert b\"colour range options\" in got async it \"can", "assert got[\"animations\"] == [identity] assert got[\"started\"] == identity identity2 =", "\"<Rate 0.9 -> 1>\", }, \"started\": mock.ANY, } assert info[\"animations\"][identity][\"options\"][\"combined\"]", "see options for a particular animation, run this again\" in", "animation\", devices, server, m: tile = devices[\"tile\"] io = tile.io[\"MEMORY\"]", "return store.clone() @pytest.fixture() def final_future(): fut = hp.create_future() try: yield", "now = store.count(Events.INCOMING(tile, io, pkt=Set64)) assert now > 0 await", "for a particular animation, run this again\" in got got", "await m.add(5) assert store.count(Events.INCOMING(tile, io, pkt=Set64)) == 0 # resume", "particular animation, run this again\" in got got = await", "0 specific = await server.assertCommand( \"/v1/lifx/command\", {\"command\": \"animation/info\", \"args\": {\"identity\":", "}, \"started\": mock.ANY, } assert info[\"animations\"][identity][\"options\"][\"combined\"] assert \"unlocked\" in info[\"animations\"][identity][\"options\"][\"pauser\"]", "[identity]}}, ) store.clear() await m.add(5) store.clear() await m.add(5) assert store.count(Events.INCOMING(tile,", "spec from interactor.commander.store import store, load_commands from photons_app.mimic.event import Events", "{\"command\": \"animation/start\", \"args\": {\"animations\": [[\"balls\", {\"every\": 0.3}]]}}, ) identity =", "async it \"pausing an animation actually pauses the animation\", devices,", "== 0 # resume await server.assertCommand( \"/v1/lifx/command\", {\"command\": \"animation/resume\", \"args\":", "> 0 # stop await server.assertCommand( \"/v1/lifx/command\", {\"command\": \"animation/stop\", \"args\":" ]
[ "import YouTube def download_video(watch_url): yt = YouTube(watch_url) (yt.streams .filter(progressive=True, file_extension='mp4')", "from pytube import YouTube def download_video(watch_url): yt = YouTube(watch_url) (yt.streams", "yt = YouTube(watch_url) (yt.streams .filter(progressive=True, file_extension='mp4') .order_by('resolution') .desc() .first() .download())", "def download_video(watch_url): yt = YouTube(watch_url) (yt.streams .filter(progressive=True, file_extension='mp4') .order_by('resolution') .desc()", "YouTube def download_video(watch_url): yt = YouTube(watch_url) (yt.streams .filter(progressive=True, file_extension='mp4') .order_by('resolution')", "pytube import YouTube def download_video(watch_url): yt = YouTube(watch_url) (yt.streams .filter(progressive=True,", "download_video(watch_url): yt = YouTube(watch_url) (yt.streams .filter(progressive=True, file_extension='mp4') .order_by('resolution') .desc() .first()" ]
[ "function to capture its arguments. It can replace either distutils.core.setup", "a dictionary mapping the source file to a destination file", "2.0 (the \"License\"); # you may not use this file", "get_setup_arguments', \"print(repr(get_setup_arguments('%s')))\" % setuppy] # invoke get_setup_arguments() in a separate", "not have any additional dependencies not available in the current", "have an extended PYTHONPATH etc. :param build_type: the build type", "# change to the directory containing the setup.py file old_cwd", "open('setup.py', 'r') as h: exec(h.read()) finally: distutils.core.setup = distutils_setup try:", ":py:class:`ament_tools.context.Context` :returns: a dictionary containing the arguments of the setup()", "try: import setuptools except ImportError: pass import subprocess import sys", "\"sys.path.insert(0, '%s')\" % ament_tools_path, 'from ament_tools.setup_arguments import get_setup_arguments', \"print(repr(get_setup_arguments('%s')))\" %", "replace disutils.core.setup and setuptools.setup \"\"\" def setup(*args, **kwargs): if args:", "of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "dictionary containing the arguments of the setup() function \"\"\" prefix", "updated with the captured arguments :returns: a function to replace", "is updated with the captured arguments :returns: a function to", "custom environment when introspecting the setup() function a separate Python", "setup() function \"\"\" global setup_lock if not setup_lock: setup_lock =", "provide a custom environment when introspecting the setup() function a", "dictionary mapping the source file to a destination file \"\"\"", "\"setup() function invoked without the keyword argument 'name'\") data.update(kwargs) return", "get_command_prefix( '%s__setup' % build_type, context.build_space, context.build_dependencies) ament_tools_path = os.path.dirname(os.path.dirname(__file__)) setuppy", "Lock() assert os.path.basename(setup_py_path) == 'setup.py' # prevent side effects in", "captured arguments :returns: a function to replace disutils.core.setup and setuptools.setup", "Source Robotics Foundation, Inc. # # Licensed under the Apache", "cmd, stdout=subprocess.PIPE, shell=True, check=True) output = result.stdout.decode() return ast.literal_eval(output) def", "the data_files structure into a dictionary. :param data_files: either a", "the arguments of the setup() function in the setup.py file.", "use this file except in compliance with the License. #", "== 'nt': ament_tools_path = ament_tools_path.replace(os.sep, os.altsep) setuppy = setuppy.replace(os.sep, os.altsep)", "ament_tools_path = ament_tools_path.replace(os.sep, os.altsep) setuppy = setuppy.replace(os.sep, os.altsep) code_lines =", "create_mock_setup_function(data): \"\"\" Create a mock function to capture its arguments.", "quote_shell_command(cmd) result = subprocess.run( cmd, stdout=subprocess.PIPE, shell=True, check=True) output =", "separate interpreter cmd = prefix + [sys.executable, '-c', ';'.join(code_lines)] cmd", "2 dest = data_file[0] assert not os.path.isabs(dest) sources = data_file[1]", "current environment. :param setup_py_path: the path to the setup.py file", "Therefore the processed setup.py file can not have any additional", "the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "a function to replace disutils.core.setup and setuptools.setup \"\"\" def setup(*args,", "# in order to capture its arguments try: distutils_setup =", "License. # You may obtain a copy of the License", "raise RuntimeError( \"setup() function invoked without the keyword argument 'name'\")", "quote_shell_command setup_lock = None def get_setup_arguments_with_context(build_type, context): \"\"\" Capture the", "distutils and setuptools # in order to capture its arguments", "path and the second element is a list of source", "invoked without the keyword argument 'name'\") data.update(kwargs) return setup def", "import get_setup_arguments', \"print(repr(get_setup_arguments('%s')))\" % setuppy] # invoke get_setup_arguments() in a", "= distutils_setup try: setuptools.setup = setuptools_setup except NameError: pass return", "under the License is distributed on an \"AS IS\" BASIS,", "the setup() function \"\"\" global setup_lock if not setup_lock: setup_lock", "License for the specific language governing permissions and # limitations", "cmd = prefix + [sys.executable, '-c', ';'.join(code_lines)] cmd = quote_shell_command(cmd)", "= Lock() assert os.path.basename(setup_py_path) == 'setup.py' # prevent side effects", "files :returns: a dictionary mapping the source file to a", "except ImportError: pass import subprocess import sys from threading import", "the directory containing the setup.py file old_cwd = os.getcwd() os.chdir(os.path.dirname(os.path.abspath(setup_py_path)))", "file to a destination file \"\"\" mapping = {} for", "destination file \"\"\" mapping = {} for data_file in data_files:", "**kwargs): if args: raise RuntimeError( 'setup() function invoked with positional", "import quote_shell_command setup_lock = None def get_setup_arguments_with_context(build_type, context): \"\"\" Capture", "in the current environment. :param setup_py_path: the path to the", "try: setuptools.setup = setuptools_setup except NameError: pass return data finally:", "a list of tuples where the first element is the", "effects in other threads with setup_lock: # change to the", "# invoke get_setup_arguments() in a separate interpreter cmd = prefix", "a list of source files :returns: a dictionary mapping the", "setup.py file old_cwd = os.getcwd() os.chdir(os.path.dirname(os.path.abspath(setup_py_path))) try: data = {}", "= data_file[0] assert not os.path.isabs(dest) sources = data_file[1] assert isinstance(sources,", "\"\"\" global setup_lock if not setup_lock: setup_lock = Lock() assert", "= create_mock_setup_function(data) # replace setup() function of distutils and setuptools", "subprocess import sys from threading import Lock from ament_tools.build_type import", "os.getcwd() os.chdir(os.path.dirname(os.path.abspath(setup_py_path))) try: data = {} mock_setup = create_mock_setup_function(data) #", "the current environment. :param setup_py_path: the path to the setup.py", "tuple): assert len(data_file) == 2 dest = data_file[0] assert not", "import setuptools except ImportError: pass import subprocess import sys from", "in compliance with the License. # You may obtain a", "arguments') if 'name' not in kwargs: raise RuntimeError( \"setup() function", "software # distributed under the License is distributed on an", "a dictionary containing the arguments of the setup() function \"\"\"", "= quote_shell_command(cmd) result = subprocess.run( cmd, stdout=subprocess.PIPE, shell=True, check=True) output", "finally: distutils.core.setup = distutils_setup try: setuptools.setup = setuptools_setup except NameError:", "a dictionary which is updated with the captured arguments :returns:", "side effects in other threads with setup_lock: # change to", "file can not have any additional dependencies not available in", "separate Python interpreter is being used which can have an", "source files or a list of tuples where the first", "os.altsep) setuppy = setuppy.replace(os.sep, os.altsep) code_lines = [ 'import sys',", "mock_setup except NameError: pass # evaluate the setup.py file with", "Capture the arguments of the setup() function in the setup.py", "prefix + [sys.executable, '-c', ';'.join(code_lines)] cmd = quote_shell_command(cmd) result =", "function to replace disutils.core.setup and setuptools.setup \"\"\" def setup(*args, **kwargs):", "os try: import setuptools except ImportError: pass import subprocess import", "'import sys', \"sys.path.insert(0, '%s')\" % ament_tools_path, 'from ament_tools.setup_arguments import get_setup_arguments',", "function in the setup.py file. The function is being run", "build_type, context.build_space, context.build_dependencies) ament_tools_path = os.path.dirname(os.path.dirname(__file__)) setuppy = os.path.join(context.source_space, 'setup.py')", "\"\"\" Transform the data_files structure into a dictionary. :param data_files:", "data_files: either a list of source files or a list", "the destination path and the second element is a list", "with setup_lock: # change to the directory containing the setup.py", "the current Python interpreter. Therefore the processed setup.py file can", "# prevent side effects in other threads with setup_lock: #", "the setup() function in the setup.py file. To provide a", ":param data_files: either a list of source files or a", "distutils.core.setup distutils.core.setup = mock_setup try: setuptools_setup = setuptools.setup setuptools.setup =", "setuptools # in order to capture its arguments try: distutils_setup", "setup.py file. The function is being run within the current", "the License. import ast import distutils.core import os try: import", "OF ANY KIND, either express or implied. # See the", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "ANY KIND, either express or implied. # See the License", "See the License for the specific language governing permissions and", "== 'setup.py' # prevent side effects in other threads with", "list) for source in sources: assert not os.path.isabs(source) mapping[source] =", "\"\"\" def setup(*args, **kwargs): if args: raise RuntimeError( 'setup() function", "the License. # You may obtain a copy of the", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "for the specific language governing permissions and # limitations under", "not setup_lock: setup_lock = Lock() assert os.path.basename(setup_py_path) == 'setup.py' #", "capture its arguments try: distutils_setup = distutils.core.setup distutils.core.setup = mock_setup", "to in writing, software # distributed under the License is", "# See the License for the specific language governing permissions", "current Python interpreter. Therefore the processed setup.py file can not", "interpreter cmd = prefix + [sys.executable, '-c', ';'.join(code_lines)] cmd =", "data_file[0] assert not os.path.isabs(dest) sources = data_file[1] assert isinstance(sources, list)", "or agreed to in writing, software # distributed under the", "required by applicable law or agreed to in writing, software", "file old_cwd = os.getcwd() os.chdir(os.path.dirname(os.path.abspath(setup_py_path))) try: data = {} mock_setup", "the setup.py file with open('setup.py', 'r') as h: exec(h.read()) finally:", "the build type :param context: the context :type context: :py:class:`ament_tools.context.Context`", "BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "governing permissions and # limitations under the License. import ast", "with the License. # You may obtain a copy of", "containing the setup.py file old_cwd = os.getcwd() os.chdir(os.path.dirname(os.path.abspath(setup_py_path))) try: data", "To provide a custom environment when introspecting the setup() function", "list of source files :returns: a dictionary mapping the source", "sys', \"sys.path.insert(0, '%s')\" % ament_tools_path, 'from ament_tools.setup_arguments import get_setup_arguments', \"print(repr(get_setup_arguments('%s')))\"", "Open Source Robotics Foundation, Inc. # # Licensed under the", "ast import distutils.core import os try: import setuptools except ImportError:", "RuntimeError( \"setup() function invoked without the keyword argument 'name'\") data.update(kwargs)", "shell=True, check=True) output = result.stdout.decode() return ast.literal_eval(output) def get_setup_arguments(setup_py_path): \"\"\"", "in sources: assert not os.path.isabs(source) mapping[source] = os.path.join(dest, os.path.basename(source)) else:", "function of distutils and setuptools # in order to capture", "check=True) output = result.stdout.decode() return ast.literal_eval(output) def get_setup_arguments(setup_py_path): \"\"\" Capture", "compliance with the License. # You may obtain a copy", "function in the setup.py file. To provide a custom environment", "the first element is the destination path and the second", "agreed to in writing, software # distributed under the License", "PYTHONPATH etc. :param build_type: the build type :param context: the", "permissions and # limitations under the License. import ast import", "setup() function in the setup.py file. To provide a custom", "setup_lock: setup_lock = Lock() assert os.path.basename(setup_py_path) == 'setup.py' # prevent", "to the setup.py file :returns: a dictionary containing the arguments", "arguments :returns: a function to replace disutils.core.setup and setuptools.setup \"\"\"", "distributed under the License is distributed on an \"AS IS\"", "def create_mock_setup_function(data): \"\"\" Create a mock function to capture its", "can replace either distutils.core.setup or setuptools.setup. :param data: a dictionary", "import get_command_prefix from ament_tools.helper import quote_shell_command setup_lock = None def", "destination path and the second element is a list of", "express or implied. # See the License for the specific", "and setuptools.setup \"\"\" def setup(*args, **kwargs): if args: raise RuntimeError(", "except in compliance with the License. # You may obtain", "'-c', ';'.join(code_lines)] cmd = quote_shell_command(cmd) result = subprocess.run( cmd, stdout=subprocess.PIPE,", "data_file in data_files: if isinstance(data_file, tuple): assert len(data_file) == 2", "context :type context: :py:class:`ament_tools.context.Context` :returns: a dictionary containing the arguments", "Licensed under the Apache License, Version 2.0 (the \"License\"); #", "the keyword argument 'name'\") data.update(kwargs) return setup def get_data_files_mapping(data_files): \"\"\"", "not use this file except in compliance with the License.", "arguments of the setup() function \"\"\" prefix = get_command_prefix( '%s__setup'", "data_file[1] assert isinstance(sources, list) for source in sources: assert not", "subprocess.run( cmd, stdout=subprocess.PIPE, shell=True, check=True) output = result.stdout.decode() return ast.literal_eval(output)", "limitations under the License. import ast import distutils.core import os", "get_data_files_mapping(data_files): \"\"\" Transform the data_files structure into a dictionary. :param", "setuptools.setup = mock_setup except NameError: pass # evaluate the setup.py", "writing, software # distributed under the License is distributed on", "dest = data_file[0] assert not os.path.isabs(dest) sources = data_file[1] assert", "you may not use this file except in compliance with", "either distutils.core.setup or setuptools.setup. :param data: a dictionary which is", "of distutils and setuptools # in order to capture its", "# Licensed under the Apache License, Version 2.0 (the \"License\");", "the captured arguments :returns: a function to replace disutils.core.setup and", "to replace disutils.core.setup and setuptools.setup \"\"\" def setup(*args, **kwargs): if", "os.path.isabs(dest) sources = data_file[1] assert isinstance(sources, list) for source in", "file. To provide a custom environment when introspecting the setup()", "try: data = {} mock_setup = create_mock_setup_function(data) # replace setup()", "CONDITIONS OF ANY KIND, either express or implied. # See", "= setuptools.setup setuptools.setup = mock_setup except NameError: pass # evaluate", "a destination file \"\"\" mapping = {} for data_file in", "interpreter. Therefore the processed setup.py file can not have any", "is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES", "Copyright 2015 Open Source Robotics Foundation, Inc. # # Licensed", ":returns: a dictionary containing the arguments of the setup() function", "invoked with positional arguments') if 'name' not in kwargs: raise", "second element is a list of source files :returns: a", "setup.py file. To provide a custom environment when introspecting the", "available in the current environment. :param setup_py_path: the path to", "type :param context: the context :type context: :py:class:`ament_tools.context.Context` :returns: a", "with the captured arguments :returns: a function to replace disutils.core.setup", "in a separate interpreter cmd = prefix + [sys.executable, '-c',", ":param data: a dictionary which is updated with the captured", "NameError: pass return data finally: os.chdir(old_cwd) def create_mock_setup_function(data): \"\"\" Create", "for data_file in data_files: if isinstance(data_file, tuple): assert len(data_file) ==", "\"\"\" prefix = get_command_prefix( '%s__setup' % build_type, context.build_space, context.build_dependencies) ament_tools_path", "os.path.isabs(source) mapping[source] = os.path.join(dest, os.path.basename(source)) else: assert not os.path.isabs(data_file) mapping[data_file]", "replace either distutils.core.setup or setuptools.setup. :param data: a dictionary which", "mapping = {} for data_file in data_files: if isinstance(data_file, tuple):", "os.path.join(context.source_space, 'setup.py') if os.name == 'nt': ament_tools_path = ament_tools_path.replace(os.sep, os.altsep)", "data.update(kwargs) return setup def get_data_files_mapping(data_files): \"\"\" Transform the data_files structure", "is being used which can have an extended PYTHONPATH etc.", "context: :py:class:`ament_tools.context.Context` :returns: a dictionary containing the arguments of the", "ament_tools_path = os.path.dirname(os.path.dirname(__file__)) setuppy = os.path.join(context.source_space, 'setup.py') if os.name ==", "try: distutils_setup = distutils.core.setup distutils.core.setup = mock_setup try: setuptools_setup =", "files or a list of tuples where the first element", "OR CONDITIONS OF ANY KIND, either express or implied. #", "threading import Lock from ament_tools.build_type import get_command_prefix from ament_tools.helper import", "try: setuptools_setup = setuptools.setup setuptools.setup = mock_setup except NameError: pass", "setuptools.setup. :param data: a dictionary which is updated with the", "the License is distributed on an \"AS IS\" BASIS, #", "build_type: the build type :param context: the context :type context:", "of the setup() function \"\"\" prefix = get_command_prefix( '%s__setup' %", "setuppy] # invoke get_setup_arguments() in a separate interpreter cmd =", "def get_data_files_mapping(data_files): \"\"\" Transform the data_files structure into a dictionary.", "setuppy = os.path.join(context.source_space, 'setup.py') if os.name == 'nt': ament_tools_path =", "setup.py file can not have any additional dependencies not available", "= result.stdout.decode() return ast.literal_eval(output) def get_setup_arguments(setup_py_path): \"\"\" Capture the arguments", "function is being run within the current Python interpreter. Therefore", "ament_tools_path.replace(os.sep, os.altsep) setuppy = setuppy.replace(os.sep, os.altsep) code_lines = [ 'import", "setuptools.setup setuptools.setup = mock_setup except NameError: pass # evaluate the", "can have an extended PYTHONPATH etc. :param build_type: the build", "= os.path.dirname(os.path.dirname(__file__)) setuppy = os.path.join(context.source_space, 'setup.py') if os.name == 'nt':", "code_lines = [ 'import sys', \"sys.path.insert(0, '%s')\" % ament_tools_path, 'from", "Foundation, Inc. # # Licensed under the Apache License, Version", "from ament_tools.build_type import get_command_prefix from ament_tools.helper import quote_shell_command setup_lock =", ":param context: the context :type context: :py:class:`ament_tools.context.Context` :returns: a dictionary", "processed setup.py file can not have any additional dependencies not", "either a list of source files or a list of", "not os.path.isabs(source) mapping[source] = os.path.join(dest, os.path.basename(source)) else: assert not os.path.isabs(data_file)", "law or agreed to in writing, software # distributed under", "source file to a destination file \"\"\" mapping = {}", "import Lock from ament_tools.build_type import get_command_prefix from ament_tools.helper import quote_shell_command", "arguments. It can replace either distutils.core.setup or setuptools.setup. :param data:", "== 2 dest = data_file[0] assert not os.path.isabs(dest) sources =", "= setuptools_setup except NameError: pass return data finally: os.chdir(old_cwd) def", "when introspecting the setup() function a separate Python interpreter is", "dictionary containing the arguments of the setup() function \"\"\" global", "into a dictionary. :param data_files: either a list of source", "of source files or a list of tuples where the", "get_command_prefix from ament_tools.helper import quote_shell_command setup_lock = None def get_setup_arguments_with_context(build_type,", "in other threads with setup_lock: # change to the directory", "'setup() function invoked with positional arguments') if 'name' not in", "setup_lock if not setup_lock: setup_lock = Lock() assert os.path.basename(setup_py_path) ==", "import distutils.core import os try: import setuptools except ImportError: pass", "\"\"\" Capture the arguments of the setup() function in the", "'setup.py') if os.name == 'nt': ament_tools_path = ament_tools_path.replace(os.sep, os.altsep) setuppy", "setup() function in the setup.py file. The function is being", "assert not os.path.isabs(dest) sources = data_file[1] assert isinstance(sources, list) for", "cmd = quote_shell_command(cmd) result = subprocess.run( cmd, stdout=subprocess.PIPE, shell=True, check=True)", "setup_py_path: the path to the setup.py file :returns: a dictionary", "def setup(*args, **kwargs): if args: raise RuntimeError( 'setup() function invoked", "directory containing the setup.py file old_cwd = os.getcwd() os.chdir(os.path.dirname(os.path.abspath(setup_py_path))) try:", "= mock_setup try: setuptools_setup = setuptools.setup setuptools.setup = mock_setup except", "its arguments. It can replace either distutils.core.setup or setuptools.setup. :param", "may obtain a copy of the License at # #", "# Copyright 2015 Open Source Robotics Foundation, Inc. # #", "return setup def get_data_files_mapping(data_files): \"\"\" Transform the data_files structure into", "which is updated with the captured arguments :returns: a function", "import ast import distutils.core import os try: import setuptools except", "an extended PYTHONPATH etc. :param build_type: the build type :param", "in data_files: if isinstance(data_file, tuple): assert len(data_file) == 2 dest", "under the License. import ast import distutils.core import os try:", "ament_tools.setup_arguments import get_setup_arguments', \"print(repr(get_setup_arguments('%s')))\" % setuppy] # invoke get_setup_arguments() in", "IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "Inc. # # Licensed under the Apache License, Version 2.0", "capture its arguments. It can replace either distutils.core.setup or setuptools.setup.", "element is a list of source files :returns: a dictionary", "def get_setup_arguments(setup_py_path): \"\"\" Capture the arguments of the setup() function", "mock_setup try: setuptools_setup = setuptools.setup setuptools.setup = mock_setup except NameError:", "and # limitations under the License. import ast import distutils.core", "may not use this file except in compliance with the", "kwargs: raise RuntimeError( \"setup() function invoked without the keyword argument", "being run within the current Python interpreter. Therefore the processed", "of the setup() function \"\"\" global setup_lock if not setup_lock:", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "order to capture its arguments try: distutils_setup = distutils.core.setup distutils.core.setup", "[sys.executable, '-c', ';'.join(code_lines)] cmd = quote_shell_command(cmd) result = subprocess.run( cmd,", "the setup.py file. The function is being run within the", "this file except in compliance with the License. # You", "dependencies not available in the current environment. :param setup_py_path: the", "invoke get_setup_arguments() in a separate interpreter cmd = prefix +", "<filename>ament_tools/setup_arguments.py # Copyright 2015 Open Source Robotics Foundation, Inc. #", "distutils.core.setup or setuptools.setup. :param data: a dictionary which is updated", "the arguments of the setup() function \"\"\" prefix = get_command_prefix(", "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "change to the directory containing the setup.py file old_cwd =", "setuptools_setup = setuptools.setup setuptools.setup = mock_setup except NameError: pass #", "'r') as h: exec(h.read()) finally: distutils.core.setup = distutils_setup try: setuptools.setup", "the setup.py file. To provide a custom environment when introspecting", "# # Licensed under the Apache License, Version 2.0 (the", ":type context: :py:class:`ament_tools.context.Context` :returns: a dictionary containing the arguments of", "file except in compliance with the License. # You may", "on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS", "with positional arguments') if 'name' not in kwargs: raise RuntimeError(", "without the keyword argument 'name'\") data.update(kwargs) return setup def get_data_files_mapping(data_files):", "not os.path.isabs(dest) sources = data_file[1] assert isinstance(sources, list) for source", "\"\"\" mapping = {} for data_file in data_files: if isinstance(data_file,", "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "of tuples where the first element is the destination path", "get_setup_arguments_with_context(build_type, context): \"\"\" Capture the arguments of the setup() function", "It can replace either distutils.core.setup or setuptools.setup. :param data: a", "function \"\"\" prefix = get_command_prefix( '%s__setup' % build_type, context.build_space, context.build_dependencies)", "distutils.core import os try: import setuptools except ImportError: pass import", "threads with setup_lock: # change to the directory containing the", "being used which can have an extended PYTHONPATH etc. :param", "disutils.core.setup and setuptools.setup \"\"\" def setup(*args, **kwargs): if args: raise", "to the directory containing the setup.py file old_cwd = os.getcwd()", "if args: raise RuntimeError( 'setup() function invoked with positional arguments')", "= get_command_prefix( '%s__setup' % build_type, context.build_space, context.build_dependencies) ament_tools_path = os.path.dirname(os.path.dirname(__file__))", "environment. :param setup_py_path: the path to the setup.py file :returns:", "language governing permissions and # limitations under the License. import", "run within the current Python interpreter. Therefore the processed setup.py", "the setup.py file :returns: a dictionary containing the arguments of", "os.path.join(dest, os.path.basename(source)) else: assert not os.path.isabs(data_file) mapping[data_file] = os.path.basename(data_file) return", "% build_type, context.build_space, context.build_dependencies) ament_tools_path = os.path.dirname(os.path.dirname(__file__)) setuppy = os.path.join(context.source_space,", "setup_lock: # change to the directory containing the setup.py file", "{} for data_file in data_files: if isinstance(data_file, tuple): assert len(data_file)", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "have any additional dependencies not available in the current environment.", "evaluate the setup.py file with open('setup.py', 'r') as h: exec(h.read())", "Create a mock function to capture its arguments. It can", "additional dependencies not available in the current environment. :param setup_py_path:", "% ament_tools_path, 'from ament_tools.setup_arguments import get_setup_arguments', \"print(repr(get_setup_arguments('%s')))\" % setuppy] #", ":returns: a dictionary mapping the source file to a destination", "os.path.basename(setup_py_path) == 'setup.py' # prevent side effects in other threads", "args: raise RuntimeError( 'setup() function invoked with positional arguments') if", "Python interpreter. Therefore the processed setup.py file can not have", "or implied. # See the License for the specific language", "License. import ast import distutils.core import os try: import setuptools", "source files :returns: a dictionary mapping the source file to", "KIND, either express or implied. # See the License for", "specific language governing permissions and # limitations under the License.", "NameError: pass # evaluate the setup.py file with open('setup.py', 'r')", "setup.py file :returns: a dictionary containing the arguments of the", "distutils_setup = distutils.core.setup distutils.core.setup = mock_setup try: setuptools_setup = setuptools.setup", "sources = data_file[1] assert isinstance(sources, list) for source in sources:", "import subprocess import sys from threading import Lock from ament_tools.build_type", "output = result.stdout.decode() return ast.literal_eval(output) def get_setup_arguments(setup_py_path): \"\"\" Capture the", "setup() function a separate Python interpreter is being used which", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "# evaluate the setup.py file with open('setup.py', 'r') as h:", "assert len(data_file) == 2 dest = data_file[0] assert not os.path.isabs(dest)", "'nt': ament_tools_path = ament_tools_path.replace(os.sep, os.altsep) setuppy = setuppy.replace(os.sep, os.altsep) code_lines", "of source files :returns: a dictionary mapping the source file", "setup.py file with open('setup.py', 'r') as h: exec(h.read()) finally: distutils.core.setup", "mapping the source file to a destination file \"\"\" mapping", "distutils_setup try: setuptools.setup = setuptools_setup except NameError: pass return data", "(the \"License\"); # you may not use this file except", "is a list of source files :returns: a dictionary mapping", "# you may not use this file except in compliance", "function invoked with positional arguments') if 'name' not in kwargs:", "list of tuples where the first element is the destination", "prevent side effects in other threads with setup_lock: # change", "is being run within the current Python interpreter. Therefore the", "'name' not in kwargs: raise RuntimeError( \"setup() function invoked without", "list of source files or a list of tuples where", "which can have an extended PYTHONPATH etc. :param build_type: the", "Python interpreter is being used which can have an extended", "distutils.core.setup = distutils_setup try: setuptools.setup = setuptools_setup except NameError: pass", "= {} for data_file in data_files: if isinstance(data_file, tuple): assert", "setup_lock = None def get_setup_arguments_with_context(build_type, context): \"\"\" Capture the arguments", "isinstance(sources, list) for source in sources: assert not os.path.isabs(source) mapping[source]", "# # Unless required by applicable law or agreed to", "[ 'import sys', \"sys.path.insert(0, '%s')\" % ament_tools_path, 'from ament_tools.setup_arguments import", "function a separate Python interpreter is being used which can", "the path to the setup.py file :returns: a dictionary containing", "'%s__setup' % build_type, context.build_space, context.build_dependencies) ament_tools_path = os.path.dirname(os.path.dirname(__file__)) setuppy =", "assert isinstance(sources, list) for source in sources: assert not os.path.isabs(source)", "obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0", "'setup.py' # prevent side effects in other threads with setup_lock:", "ImportError: pass import subprocess import sys from threading import Lock", "None def get_setup_arguments_with_context(build_type, context): \"\"\" Capture the arguments of the", "Version 2.0 (the \"License\"); # you may not use this", "build type :param context: the context :type context: :py:class:`ament_tools.context.Context` :returns:", "setuptools.setup = setuptools_setup except NameError: pass return data finally: os.chdir(old_cwd)", "of the setup() function in the setup.py file. To provide", "+ [sys.executable, '-c', ';'.join(code_lines)] cmd = quote_shell_command(cmd) result = subprocess.run(", "replace setup() function of distutils and setuptools # in order", "isinstance(data_file, tuple): assert len(data_file) == 2 dest = data_file[0] assert", "ament_tools.build_type import get_command_prefix from ament_tools.helper import quote_shell_command setup_lock = None", "assert os.path.basename(setup_py_path) == 'setup.py' # prevent side effects in other", "pass # evaluate the setup.py file with open('setup.py', 'r') as", "its arguments try: distutils_setup = distutils.core.setup distutils.core.setup = mock_setup try:", "'name'\") data.update(kwargs) return setup def get_data_files_mapping(data_files): \"\"\" Transform the data_files", "implied. # See the License for the specific language governing", "under the Apache License, Version 2.0 (the \"License\"); # you", "context: the context :type context: :py:class:`ament_tools.context.Context` :returns: a dictionary containing", "path to the setup.py file :returns: a dictionary containing the", "data finally: os.chdir(old_cwd) def create_mock_setup_function(data): \"\"\" Create a mock function", "# replace setup() function of distutils and setuptools # in", "by applicable law or agreed to in writing, software #", "old_cwd = os.getcwd() os.chdir(os.path.dirname(os.path.abspath(setup_py_path))) try: data = {} mock_setup =", "= setuppy.replace(os.sep, os.altsep) code_lines = [ 'import sys', \"sys.path.insert(0, '%s')\"", "ast.literal_eval(output) def get_setup_arguments(setup_py_path): \"\"\" Capture the arguments of the setup()", "os.altsep) code_lines = [ 'import sys', \"sys.path.insert(0, '%s')\" % ament_tools_path,", "The function is being run within the current Python interpreter.", ":param setup_py_path: the path to the setup.py file :returns: a", "stdout=subprocess.PIPE, shell=True, check=True) output = result.stdout.decode() return ast.literal_eval(output) def get_setup_arguments(setup_py_path):", "setuptools_setup except NameError: pass return data finally: os.chdir(old_cwd) def create_mock_setup_function(data):", "create_mock_setup_function(data) # replace setup() function of distutils and setuptools #", "= distutils.core.setup distutils.core.setup = mock_setup try: setuptools_setup = setuptools.setup setuptools.setup", "dictionary which is updated with the captured arguments :returns: a", "sources: assert not os.path.isabs(source) mapping[source] = os.path.join(dest, os.path.basename(source)) else: assert", "interpreter is being used which can have an extended PYTHONPATH", "for source in sources: assert not os.path.isabs(source) mapping[source] = os.path.join(dest,", "other threads with setup_lock: # change to the directory containing", "first element is the destination path and the second element", "etc. :param build_type: the build type :param context: the context", "except NameError: pass # evaluate the setup.py file with open('setup.py',", "sys from threading import Lock from ament_tools.build_type import get_command_prefix from", "an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF", "return data finally: os.chdir(old_cwd) def create_mock_setup_function(data): \"\"\" Create a mock", "Unless required by applicable law or agreed to in writing,", "os.chdir(old_cwd) def create_mock_setup_function(data): \"\"\" Create a mock function to capture", "import sys from threading import Lock from ament_tools.build_type import get_command_prefix", "argument 'name'\") data.update(kwargs) return setup def get_data_files_mapping(data_files): \"\"\" Transform the", "result.stdout.decode() return ast.literal_eval(output) def get_setup_arguments(setup_py_path): \"\"\" Capture the arguments of", "= data_file[1] assert isinstance(sources, list) for source in sources: assert", "= os.path.join(context.source_space, 'setup.py') if os.name == 'nt': ament_tools_path = ament_tools_path.replace(os.sep,", "setuptools except ImportError: pass import subprocess import sys from threading", "the specific language governing permissions and # limitations under the", "file. The function is being run within the current Python", "a list of source files or a list of tuples", "not available in the current environment. :param setup_py_path: the path", "applicable law or agreed to in writing, software # distributed", "os.chdir(os.path.dirname(os.path.abspath(setup_py_path))) try: data = {} mock_setup = create_mock_setup_function(data) # replace", "the setup() function \"\"\" prefix = get_command_prefix( '%s__setup' % build_type,", "the setup() function a separate Python interpreter is being used", "source in sources: assert not os.path.isabs(source) mapping[source] = os.path.join(dest, os.path.basename(source))", "ament_tools_path, 'from ament_tools.setup_arguments import get_setup_arguments', \"print(repr(get_setup_arguments('%s')))\" % setuppy] # invoke", "= os.getcwd() os.chdir(os.path.dirname(os.path.abspath(setup_py_path))) try: data = {} mock_setup = create_mock_setup_function(data)", "positional arguments') if 'name' not in kwargs: raise RuntimeError( \"setup()", "in writing, software # distributed under the License is distributed", "used which can have an extended PYTHONPATH etc. :param build_type:", "arguments of the setup() function in the setup.py file. The", "'from ament_tools.setup_arguments import get_setup_arguments', \"print(repr(get_setup_arguments('%s')))\" % setuppy] # invoke get_setup_arguments()", "{} mock_setup = create_mock_setup_function(data) # replace setup() function of distutils", "h: exec(h.read()) finally: distutils.core.setup = distutils_setup try: setuptools.setup = setuptools_setup", "context.build_space, context.build_dependencies) ament_tools_path = os.path.dirname(os.path.dirname(__file__)) setuppy = os.path.join(context.source_space, 'setup.py') if", "= mock_setup except NameError: pass # evaluate the setup.py file", "global setup_lock if not setup_lock: setup_lock = Lock() assert os.path.basename(setup_py_path)", "mapping[source] = os.path.join(dest, os.path.basename(source)) else: assert not os.path.isabs(data_file) mapping[data_file] =", "arguments try: distutils_setup = distutils.core.setup distutils.core.setup = mock_setup try: setuptools_setup", "to capture its arguments. It can replace either distutils.core.setup or", "dictionary. :param data_files: either a list of source files or", "len(data_file) == 2 dest = data_file[0] assert not os.path.isabs(dest) sources", "pass return data finally: os.chdir(old_cwd) def create_mock_setup_function(data): \"\"\" Create a", "extended PYTHONPATH etc. :param build_type: the build type :param context:", "import os try: import setuptools except ImportError: pass import subprocess", "function \"\"\" global setup_lock if not setup_lock: setup_lock = Lock()", "setup_lock = Lock() assert os.path.basename(setup_py_path) == 'setup.py' # prevent side", "= subprocess.run( cmd, stdout=subprocess.PIPE, shell=True, check=True) output = result.stdout.decode() return", "\"\"\" Create a mock function to capture its arguments. It", "% setuppy] # invoke get_setup_arguments() in a separate interpreter cmd", "not in kwargs: raise RuntimeError( \"setup() function invoked without the", "os.path.basename(source)) else: assert not os.path.isabs(data_file) mapping[data_file] = os.path.basename(data_file) return mapping", ":returns: a function to replace disutils.core.setup and setuptools.setup \"\"\" def", "setup() function \"\"\" prefix = get_command_prefix( '%s__setup' % build_type, context.build_space,", "and the second element is a list of source files", "License is distributed on an \"AS IS\" BASIS, # WITHOUT", "get_setup_arguments() in a separate interpreter cmd = prefix + [sys.executable,", "License, Version 2.0 (the \"License\"); # you may not use", "data = {} mock_setup = create_mock_setup_function(data) # replace setup() function", "# You may obtain a copy of the License at", "ament_tools.helper import quote_shell_command setup_lock = None def get_setup_arguments_with_context(build_type, context): \"\"\"", "= None def get_setup_arguments_with_context(build_type, context): \"\"\" Capture the arguments of", "RuntimeError( 'setup() function invoked with positional arguments') if 'name' not", "copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #", "or a list of tuples where the first element is", "where the first element is the destination path and the", "distutils.core.setup = mock_setup try: setuptools_setup = setuptools.setup setuptools.setup = mock_setup", "in the setup.py file. The function is being run within", "\"print(repr(get_setup_arguments('%s')))\" % setuppy] # invoke get_setup_arguments() in a separate interpreter", "the source file to a destination file \"\"\" mapping =", "mock function to capture its arguments. It can replace either", "the License for the specific language governing permissions and #", "the setup() function in the setup.py file. The function is", "can not have any additional dependencies not available in the", "setup(*args, **kwargs): if args: raise RuntimeError( 'setup() function invoked with", "Apache License, Version 2.0 (the \"License\"); # you may not", "raise RuntimeError( 'setup() function invoked with positional arguments') if 'name'", "either express or implied. # See the License for the", "pass import subprocess import sys from threading import Lock from", "data: a dictionary which is updated with the captured arguments", "= [ 'import sys', \"sys.path.insert(0, '%s')\" % ament_tools_path, 'from ament_tools.setup_arguments", "get_setup_arguments(setup_py_path): \"\"\" Capture the arguments of the setup() function in", "any additional dependencies not available in the current environment. :param", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "from ament_tools.helper import quote_shell_command setup_lock = None def get_setup_arguments_with_context(build_type, context):", "setuppy.replace(os.sep, os.altsep) code_lines = [ 'import sys', \"sys.path.insert(0, '%s')\" %", "Lock from ament_tools.build_type import get_command_prefix from ament_tools.helper import quote_shell_command setup_lock", "a mock function to capture its arguments. It can replace", "is the destination path and the second element is a", "a separate Python interpreter is being used which can have", "file \"\"\" mapping = {} for data_file in data_files: if", "= ament_tools_path.replace(os.sep, os.altsep) setuppy = setuppy.replace(os.sep, os.altsep) code_lines = [", "= prefix + [sys.executable, '-c', ';'.join(code_lines)] cmd = quote_shell_command(cmd) result", "assert not os.path.isabs(source) mapping[source] = os.path.join(dest, os.path.basename(source)) else: assert not", "= {} mock_setup = create_mock_setup_function(data) # replace setup() function of", "as h: exec(h.read()) finally: distutils.core.setup = distutils_setup try: setuptools.setup =", "in kwargs: raise RuntimeError( \"setup() function invoked without the keyword", "2015 Open Source Robotics Foundation, Inc. # # Licensed under", "prefix = get_command_prefix( '%s__setup' % build_type, context.build_space, context.build_dependencies) ament_tools_path =", "data_files structure into a dictionary. :param data_files: either a list", "os.path.dirname(os.path.dirname(__file__)) setuppy = os.path.join(context.source_space, 'setup.py') if os.name == 'nt': ament_tools_path", "Transform the data_files structure into a dictionary. :param data_files: either", "a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #", "def get_setup_arguments_with_context(build_type, context): \"\"\" Capture the arguments of the setup()", "arguments of the setup() function \"\"\" global setup_lock if not", "finally: os.chdir(old_cwd) def create_mock_setup_function(data): \"\"\" Create a mock function to", "'%s')\" % ament_tools_path, 'from ament_tools.setup_arguments import get_setup_arguments', \"print(repr(get_setup_arguments('%s')))\" % setuppy]", "keyword argument 'name'\") data.update(kwargs) return setup def get_data_files_mapping(data_files): \"\"\" Transform", "os.name == 'nt': ament_tools_path = ament_tools_path.replace(os.sep, os.altsep) setuppy = setuppy.replace(os.sep,", "context.build_dependencies) ament_tools_path = os.path.dirname(os.path.dirname(__file__)) setuppy = os.path.join(context.source_space, 'setup.py') if os.name", "data_files: if isinstance(data_file, tuple): assert len(data_file) == 2 dest =", "return ast.literal_eval(output) def get_setup_arguments(setup_py_path): \"\"\" Capture the arguments of the", "if not setup_lock: setup_lock = Lock() assert os.path.basename(setup_py_path) == 'setup.py'", "element is the destination path and the second element is", "if isinstance(data_file, tuple): assert len(data_file) == 2 dest = data_file[0]", ":param build_type: the build type :param context: the context :type", "= os.path.join(dest, os.path.basename(source)) else: assert not os.path.isabs(data_file) mapping[data_file] = os.path.basename(data_file)", "a separate interpreter cmd = prefix + [sys.executable, '-c', ';'.join(code_lines)]", "within the current Python interpreter. Therefore the processed setup.py file", "the context :type context: :py:class:`ament_tools.context.Context` :returns: a dictionary containing the", "environment when introspecting the setup() function a separate Python interpreter", "a custom environment when introspecting the setup() function a separate", "containing the arguments of the setup() function \"\"\" global setup_lock", "\"License\"); # you may not use this file except in", "context): \"\"\" Capture the arguments of the setup() function in", "';'.join(code_lines)] cmd = quote_shell_command(cmd) result = subprocess.run( cmd, stdout=subprocess.PIPE, shell=True,", "function invoked without the keyword argument 'name'\") data.update(kwargs) return setup", "setup def get_data_files_mapping(data_files): \"\"\" Transform the data_files structure into a", "setup() function of distutils and setuptools # in order to", "setuptools.setup \"\"\" def setup(*args, **kwargs): if args: raise RuntimeError( 'setup()", "a dictionary. :param data_files: either a list of source files", "distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR", "in order to capture its arguments try: distutils_setup = distutils.core.setup", "tuples where the first element is the destination path and", "if 'name' not in kwargs: raise RuntimeError( \"setup() function invoked", "except NameError: pass return data finally: os.chdir(old_cwd) def create_mock_setup_function(data): \"\"\"", "# distributed under the License is distributed on an \"AS", "from threading import Lock from ament_tools.build_type import get_command_prefix from ament_tools.helper", "# Unless required by applicable law or agreed to in", "if os.name == 'nt': ament_tools_path = ament_tools_path.replace(os.sep, os.altsep) setuppy =", "the setup.py file old_cwd = os.getcwd() os.chdir(os.path.dirname(os.path.abspath(setup_py_path))) try: data =", "setuppy = setuppy.replace(os.sep, os.altsep) code_lines = [ 'import sys', \"sys.path.insert(0,", "\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY", "the arguments of the setup() function \"\"\" global setup_lock if", "file with open('setup.py', 'r') as h: exec(h.read()) finally: distutils.core.setup =", "containing the arguments of the setup() function \"\"\" prefix =", "You may obtain a copy of the License at #", "exec(h.read()) finally: distutils.core.setup = distutils_setup try: setuptools.setup = setuptools_setup except", "the processed setup.py file can not have any additional dependencies", "of the setup() function in the setup.py file. The function", "result = subprocess.run( cmd, stdout=subprocess.PIPE, shell=True, check=True) output = result.stdout.decode()", "structure into a dictionary. :param data_files: either a list of", "# limitations under the License. import ast import distutils.core import", "the Apache License, Version 2.0 (the \"License\"); # you may", "with open('setup.py', 'r') as h: exec(h.read()) finally: distutils.core.setup = distutils_setup", "and setuptools # in order to capture its arguments try:", "Robotics Foundation, Inc. # # Licensed under the Apache License,", "mock_setup = create_mock_setup_function(data) # replace setup() function of distutils and", "to a destination file \"\"\" mapping = {} for data_file", "or setuptools.setup. :param data: a dictionary which is updated with", "in the setup.py file. To provide a custom environment when", "to capture its arguments try: distutils_setup = distutils.core.setup distutils.core.setup =", "arguments of the setup() function in the setup.py file. To", "introspecting the setup() function a separate Python interpreter is being", "the second element is a list of source files :returns:", "file :returns: a dictionary containing the arguments of the setup()" ]
[ "torch.testing.assert_close( x, soft_round_inverse(soft_round(x, alpha=2.0), alpha=2.0), ) for offset in range(-5,", "import torch from neuralcompression.functional import soft_round, soft_round_inverse def test_soft_round_inverse(): x", "in the # LICENSE file in the root directory of", "under the MIT license found in the # LICENSE file", "root directory of this source tree. import torch from neuralcompression.functional", "alpha=2.0), ) for offset in range(-5, 5): x = torch.linspace(offset", "x, soft_round_inverse(soft_round(x, alpha=2.0), alpha=2.0), ) for offset in range(-5, 5):", "x = torch.tensor([-1.25, -0.75, 0.75, 1.25]) torch.testing.assert_close( x, soft_round_inverse(soft_round(x, alpha=2.0),", "LICENSE file in the root directory of this source tree.", "in the root directory of this source tree. import torch", "alpha=2.0), alpha=2.0), ) for offset in range(-5, 5): x =", "<gh_stars>100-1000 # Copyright (c) Meta Platforms, Inc. and affiliates. #", "5): x = torch.linspace(offset + 0.001, offset + 0.999, 100)", "= torch.tensor([-1.25, -0.75, 0.75, 1.25]) torch.testing.assert_close( x, soft_round_inverse(soft_round(x, alpha=2.0), alpha=2.0),", "# LICENSE file in the root directory of this source", "the MIT license found in the # LICENSE file in", "found in the # LICENSE file in the root directory", "of this source tree. import torch from neuralcompression.functional import soft_round,", "neuralcompression.functional import soft_round, soft_round_inverse def test_soft_round_inverse(): x = torch.linspace(-2.0, 2.0,", "soft_round_inverse(x, alpha=1e-13), ) x = torch.tensor([-1.25, -0.75, 0.75, 1.25]) torch.testing.assert_close(", "(c) Meta Platforms, Inc. and affiliates. # # This source", "tree. import torch from neuralcompression.functional import soft_round, soft_round_inverse def test_soft_round_inverse():", "0.75, 1.25]) torch.testing.assert_close( x, soft_round_inverse(soft_round(x, alpha=2.0), alpha=2.0), ) for offset", "license found in the # LICENSE file in the root", "0.001, offset + 0.999, 100) torch.testing.assert_close( torch.ceil(x) - 0.5, soft_round_inverse(x,", "range(-5, 5): x = torch.linspace(offset + 0.001, offset + 0.999,", "import soft_round, soft_round_inverse def test_soft_round_inverse(): x = torch.linspace(-2.0, 2.0, 50)", "# Copyright (c) Meta Platforms, Inc. and affiliates. # #", "1.25]) torch.testing.assert_close( x, soft_round_inverse(soft_round(x, alpha=2.0), alpha=2.0), ) for offset in", "0.999, 100) torch.testing.assert_close( torch.ceil(x) - 0.5, soft_round_inverse(x, alpha=5000.0), atol=0.001, rtol=0.002,", "+ 0.999, 100) torch.testing.assert_close( torch.ceil(x) - 0.5, soft_round_inverse(x, alpha=5000.0), atol=0.001,", "directory of this source tree. import torch from neuralcompression.functional import", "100) torch.testing.assert_close( torch.ceil(x) - 0.5, soft_round_inverse(x, alpha=5000.0), atol=0.001, rtol=0.002, )", "2.0, 50) torch.testing.assert_close( x, soft_round_inverse(x, alpha=1e-13), ) x = torch.tensor([-1.25,", "x = torch.linspace(offset + 0.001, offset + 0.999, 100) torch.testing.assert_close(", "Meta Platforms, Inc. and affiliates. # # This source code", "the root directory of this source tree. import torch from", "soft_round_inverse(soft_round(x, alpha=2.0), alpha=2.0), ) for offset in range(-5, 5): x", "soft_round_inverse def test_soft_round_inverse(): x = torch.linspace(-2.0, 2.0, 50) torch.testing.assert_close( x,", "this source tree. import torch from neuralcompression.functional import soft_round, soft_round_inverse", "is licensed under the MIT license found in the #", "Platforms, Inc. and affiliates. # # This source code is", "the # LICENSE file in the root directory of this", ") x = torch.tensor([-1.25, -0.75, 0.75, 1.25]) torch.testing.assert_close( x, soft_round_inverse(soft_round(x,", "x, soft_round_inverse(x, alpha=1e-13), ) x = torch.tensor([-1.25, -0.75, 0.75, 1.25])", "torch from neuralcompression.functional import soft_round, soft_round_inverse def test_soft_round_inverse(): x =", "torch.linspace(-2.0, 2.0, 50) torch.testing.assert_close( x, soft_round_inverse(x, alpha=1e-13), ) x =", "file in the root directory of this source tree. import", "for offset in range(-5, 5): x = torch.linspace(offset + 0.001,", "50) torch.testing.assert_close( x, soft_round_inverse(x, alpha=1e-13), ) x = torch.tensor([-1.25, -0.75,", "torch.tensor([-1.25, -0.75, 0.75, 1.25]) torch.testing.assert_close( x, soft_round_inverse(soft_round(x, alpha=2.0), alpha=2.0), )", "source tree. import torch from neuralcompression.functional import soft_round, soft_round_inverse def", "This source code is licensed under the MIT license found", "from neuralcompression.functional import soft_round, soft_round_inverse def test_soft_round_inverse(): x = torch.linspace(-2.0,", "= torch.linspace(-2.0, 2.0, 50) torch.testing.assert_close( x, soft_round_inverse(x, alpha=1e-13), ) x", "soft_round, soft_round_inverse def test_soft_round_inverse(): x = torch.linspace(-2.0, 2.0, 50) torch.testing.assert_close(", "code is licensed under the MIT license found in the", "test_soft_round_inverse(): x = torch.linspace(-2.0, 2.0, 50) torch.testing.assert_close( x, soft_round_inverse(x, alpha=1e-13),", "source code is licensed under the MIT license found in", "licensed under the MIT license found in the # LICENSE", "Copyright (c) Meta Platforms, Inc. and affiliates. # # This", "offset in range(-5, 5): x = torch.linspace(offset + 0.001, offset", "+ 0.001, offset + 0.999, 100) torch.testing.assert_close( torch.ceil(x) - 0.5,", "# # This source code is licensed under the MIT", "in range(-5, 5): x = torch.linspace(offset + 0.001, offset +", "torch.testing.assert_close( x, soft_round_inverse(x, alpha=1e-13), ) x = torch.tensor([-1.25, -0.75, 0.75,", "MIT license found in the # LICENSE file in the", "affiliates. # # This source code is licensed under the", "= torch.linspace(offset + 0.001, offset + 0.999, 100) torch.testing.assert_close( torch.ceil(x)", "offset + 0.999, 100) torch.testing.assert_close( torch.ceil(x) - 0.5, soft_round_inverse(x, alpha=5000.0),", "Inc. and affiliates. # # This source code is licensed", "-0.75, 0.75, 1.25]) torch.testing.assert_close( x, soft_round_inverse(soft_round(x, alpha=2.0), alpha=2.0), ) for", ") for offset in range(-5, 5): x = torch.linspace(offset +", "def test_soft_round_inverse(): x = torch.linspace(-2.0, 2.0, 50) torch.testing.assert_close( x, soft_round_inverse(x,", "# This source code is licensed under the MIT license", "alpha=1e-13), ) x = torch.tensor([-1.25, -0.75, 0.75, 1.25]) torch.testing.assert_close( x,", "x = torch.linspace(-2.0, 2.0, 50) torch.testing.assert_close( x, soft_round_inverse(x, alpha=1e-13), )", "torch.linspace(offset + 0.001, offset + 0.999, 100) torch.testing.assert_close( torch.ceil(x) -", "and affiliates. # # This source code is licensed under" ]
[ "easydict import EasyDict hopper_ppo_default_config = dict( env=dict( env_id='HopperMuJoCoEnv-v0', norm_obs=dict(use_norm=False, ),", "hopper_ppo_default_config = dict( env=dict( env_id='HopperMuJoCoEnv-v0', norm_obs=dict(use_norm=False, ), norm_reward=dict(use_norm=False, ), collector_env_num=8,", "policy=dict( type='ppo', import_names=['ding.policy.ppo'], ), replay_buffer=dict(type='naive', ), ) hopper_ppo_create_default_config = EasyDict(hopper_ppo_create_default_config)", "env=dict( env_id='HopperMuJoCoEnv-v0', norm_obs=dict(use_norm=False, ), norm_reward=dict(use_norm=False, ), collector_env_num=8, evaluator_env_num=10, use_act_scale=True, n_evaluator_episode=10,", "= EasyDict(hopper_ppo_default_config) main_config = hopper_ppo_default_config hopper_ppo_create_default_config = dict( env=dict( type='pybullet',", "= dict( env=dict( env_id='HopperMuJoCoEnv-v0', norm_obs=dict(use_norm=False, ), norm_reward=dict(use_norm=False, ), collector_env_num=8, evaluator_env_num=10,", "continuous=True, ), continuous=True, learn=dict( epoch_per_collect=10, batch_size=64, learning_rate=3e-4, value_weight=0.5, entropy_weight=0.0, clip_ratio=0.2,", "on_policy=True, recompute_adv=True, model=dict( obs_shape=11, action_shape=3, continuous=True, ), continuous=True, learn=dict( epoch_per_collect=10,", "stop_value=3000, ), policy=dict( cuda=True, on_policy=True, recompute_adv=True, model=dict( obs_shape=11, action_shape=3, continuous=True,", "), eval=dict(evaluator=dict(eval_freq=5000, )), other=dict(replay_buffer=dict( replay_buffer_size=10000, replay_buffer_start_size=0, ), ), ), )", "hopper_ppo_default_config = EasyDict(hopper_ppo_default_config) main_config = hopper_ppo_default_config hopper_ppo_create_default_config = dict( env=dict(", "type='ppo', import_names=['ding.policy.ppo'], ), replay_buffer=dict(type='naive', ), ) hopper_ppo_create_default_config = EasyDict(hopper_ppo_create_default_config) create_config", "discount_factor=0.99, gae_lambda=0.97, ), eval=dict(evaluator=dict(eval_freq=5000, )), other=dict(replay_buffer=dict( replay_buffer_size=10000, replay_buffer_start_size=0, ), ),", "learning_rate=3e-4, value_weight=0.5, entropy_weight=0.0, clip_ratio=0.2, adv_norm=True, value_norm=True, ), collect=dict( n_sample=2048, unroll_len=1,", "env=dict( type='pybullet', import_names=['dizoo.pybullet.envs.pybullet_env'], ), env_manager=dict(type='subprocess'), policy=dict( type='ppo', import_names=['ding.policy.ppo'], ), replay_buffer=dict(type='naive',", "value_weight=0.5, entropy_weight=0.0, clip_ratio=0.2, adv_norm=True, value_norm=True, ), collect=dict( n_sample=2048, unroll_len=1, discount_factor=0.99,", "entropy_weight=0.0, clip_ratio=0.2, adv_norm=True, value_norm=True, ), collect=dict( n_sample=2048, unroll_len=1, discount_factor=0.99, gae_lambda=0.97,", "policy=dict( cuda=True, on_policy=True, recompute_adv=True, model=dict( obs_shape=11, action_shape=3, continuous=True, ), continuous=True,", "evaluator_env_num=10, use_act_scale=True, n_evaluator_episode=10, stop_value=3000, ), policy=dict( cuda=True, on_policy=True, recompute_adv=True, model=dict(", "cuda=True, on_policy=True, recompute_adv=True, model=dict( obs_shape=11, action_shape=3, continuous=True, ), continuous=True, learn=dict(", "model=dict( obs_shape=11, action_shape=3, continuous=True, ), continuous=True, learn=dict( epoch_per_collect=10, batch_size=64, learning_rate=3e-4,", "), continuous=True, learn=dict( epoch_per_collect=10, batch_size=64, learning_rate=3e-4, value_weight=0.5, entropy_weight=0.0, clip_ratio=0.2, adv_norm=True,", "clip_ratio=0.2, adv_norm=True, value_norm=True, ), collect=dict( n_sample=2048, unroll_len=1, discount_factor=0.99, gae_lambda=0.97, ),", "collect=dict( n_sample=2048, unroll_len=1, discount_factor=0.99, gae_lambda=0.97, ), eval=dict(evaluator=dict(eval_freq=5000, )), other=dict(replay_buffer=dict( replay_buffer_size=10000,", "), env_manager=dict(type='subprocess'), policy=dict( type='ppo', import_names=['ding.policy.ppo'], ), replay_buffer=dict(type='naive', ), ) hopper_ppo_create_default_config", "norm_reward=dict(use_norm=False, ), collector_env_num=8, evaluator_env_num=10, use_act_scale=True, n_evaluator_episode=10, stop_value=3000, ), policy=dict( cuda=True,", "), replay_buffer=dict(type='naive', ), ) hopper_ppo_create_default_config = EasyDict(hopper_ppo_create_default_config) create_config = hopper_ppo_create_default_config", "obs_shape=11, action_shape=3, continuous=True, ), continuous=True, learn=dict( epoch_per_collect=10, batch_size=64, learning_rate=3e-4, value_weight=0.5,", "import_names=['ding.policy.ppo'], ), replay_buffer=dict(type='naive', ), ) hopper_ppo_create_default_config = EasyDict(hopper_ppo_create_default_config) create_config =", "other=dict(replay_buffer=dict( replay_buffer_size=10000, replay_buffer_start_size=0, ), ), ), ) hopper_ppo_default_config = EasyDict(hopper_ppo_default_config)", "replay_buffer_start_size=0, ), ), ), ) hopper_ppo_default_config = EasyDict(hopper_ppo_default_config) main_config =", ") hopper_ppo_default_config = EasyDict(hopper_ppo_default_config) main_config = hopper_ppo_default_config hopper_ppo_create_default_config = dict(", ")), other=dict(replay_buffer=dict( replay_buffer_size=10000, replay_buffer_start_size=0, ), ), ), ) hopper_ppo_default_config =", "replay_buffer_size=10000, replay_buffer_start_size=0, ), ), ), ) hopper_ppo_default_config = EasyDict(hopper_ppo_default_config) main_config", "norm_obs=dict(use_norm=False, ), norm_reward=dict(use_norm=False, ), collector_env_num=8, evaluator_env_num=10, use_act_scale=True, n_evaluator_episode=10, stop_value=3000, ),", "env_id='HopperMuJoCoEnv-v0', norm_obs=dict(use_norm=False, ), norm_reward=dict(use_norm=False, ), collector_env_num=8, evaluator_env_num=10, use_act_scale=True, n_evaluator_episode=10, stop_value=3000,", "EasyDict hopper_ppo_default_config = dict( env=dict( env_id='HopperMuJoCoEnv-v0', norm_obs=dict(use_norm=False, ), norm_reward=dict(use_norm=False, ),", "from easydict import EasyDict hopper_ppo_default_config = dict( env=dict( env_id='HopperMuJoCoEnv-v0', norm_obs=dict(use_norm=False,", "main_config = hopper_ppo_default_config hopper_ppo_create_default_config = dict( env=dict( type='pybullet', import_names=['dizoo.pybullet.envs.pybullet_env'], ),", "recompute_adv=True, model=dict( obs_shape=11, action_shape=3, continuous=True, ), continuous=True, learn=dict( epoch_per_collect=10, batch_size=64,", "import_names=['dizoo.pybullet.envs.pybullet_env'], ), env_manager=dict(type='subprocess'), policy=dict( type='ppo', import_names=['ding.policy.ppo'], ), replay_buffer=dict(type='naive', ), )", "adv_norm=True, value_norm=True, ), collect=dict( n_sample=2048, unroll_len=1, discount_factor=0.99, gae_lambda=0.97, ), eval=dict(evaluator=dict(eval_freq=5000,", "n_sample=2048, unroll_len=1, discount_factor=0.99, gae_lambda=0.97, ), eval=dict(evaluator=dict(eval_freq=5000, )), other=dict(replay_buffer=dict( replay_buffer_size=10000, replay_buffer_start_size=0,", "hopper_ppo_default_config hopper_ppo_create_default_config = dict( env=dict( type='pybullet', import_names=['dizoo.pybullet.envs.pybullet_env'], ), env_manager=dict(type='subprocess'), policy=dict(", "collector_env_num=8, evaluator_env_num=10, use_act_scale=True, n_evaluator_episode=10, stop_value=3000, ), policy=dict( cuda=True, on_policy=True, recompute_adv=True,", "EasyDict(hopper_ppo_default_config) main_config = hopper_ppo_default_config hopper_ppo_create_default_config = dict( env=dict( type='pybullet', import_names=['dizoo.pybullet.envs.pybullet_env'],", "= dict( env=dict( type='pybullet', import_names=['dizoo.pybullet.envs.pybullet_env'], ), env_manager=dict(type='subprocess'), policy=dict( type='ppo', import_names=['ding.policy.ppo'],", "hopper_ppo_create_default_config = dict( env=dict( type='pybullet', import_names=['dizoo.pybullet.envs.pybullet_env'], ), env_manager=dict(type='subprocess'), policy=dict( type='ppo',", "), norm_reward=dict(use_norm=False, ), collector_env_num=8, evaluator_env_num=10, use_act_scale=True, n_evaluator_episode=10, stop_value=3000, ), policy=dict(", "eval=dict(evaluator=dict(eval_freq=5000, )), other=dict(replay_buffer=dict( replay_buffer_size=10000, replay_buffer_start_size=0, ), ), ), ) hopper_ppo_default_config", "continuous=True, learn=dict( epoch_per_collect=10, batch_size=64, learning_rate=3e-4, value_weight=0.5, entropy_weight=0.0, clip_ratio=0.2, adv_norm=True, value_norm=True,", "value_norm=True, ), collect=dict( n_sample=2048, unroll_len=1, discount_factor=0.99, gae_lambda=0.97, ), eval=dict(evaluator=dict(eval_freq=5000, )),", "use_act_scale=True, n_evaluator_episode=10, stop_value=3000, ), policy=dict( cuda=True, on_policy=True, recompute_adv=True, model=dict( obs_shape=11,", "dict( env=dict( env_id='HopperMuJoCoEnv-v0', norm_obs=dict(use_norm=False, ), norm_reward=dict(use_norm=False, ), collector_env_num=8, evaluator_env_num=10, use_act_scale=True,", "action_shape=3, continuous=True, ), continuous=True, learn=dict( epoch_per_collect=10, batch_size=64, learning_rate=3e-4, value_weight=0.5, entropy_weight=0.0,", "), collect=dict( n_sample=2048, unroll_len=1, discount_factor=0.99, gae_lambda=0.97, ), eval=dict(evaluator=dict(eval_freq=5000, )), other=dict(replay_buffer=dict(", "), ), ), ) hopper_ppo_default_config = EasyDict(hopper_ppo_default_config) main_config = hopper_ppo_default_config", "), policy=dict( cuda=True, on_policy=True, recompute_adv=True, model=dict( obs_shape=11, action_shape=3, continuous=True, ),", "dict( env=dict( type='pybullet', import_names=['dizoo.pybullet.envs.pybullet_env'], ), env_manager=dict(type='subprocess'), policy=dict( type='ppo', import_names=['ding.policy.ppo'], ),", "learn=dict( epoch_per_collect=10, batch_size=64, learning_rate=3e-4, value_weight=0.5, entropy_weight=0.0, clip_ratio=0.2, adv_norm=True, value_norm=True, ),", "env_manager=dict(type='subprocess'), policy=dict( type='ppo', import_names=['ding.policy.ppo'], ), replay_buffer=dict(type='naive', ), ) hopper_ppo_create_default_config =", "), ) hopper_ppo_default_config = EasyDict(hopper_ppo_default_config) main_config = hopper_ppo_default_config hopper_ppo_create_default_config =", "), collector_env_num=8, evaluator_env_num=10, use_act_scale=True, n_evaluator_episode=10, stop_value=3000, ), policy=dict( cuda=True, on_policy=True,", "epoch_per_collect=10, batch_size=64, learning_rate=3e-4, value_weight=0.5, entropy_weight=0.0, clip_ratio=0.2, adv_norm=True, value_norm=True, ), collect=dict(", "batch_size=64, learning_rate=3e-4, value_weight=0.5, entropy_weight=0.0, clip_ratio=0.2, adv_norm=True, value_norm=True, ), collect=dict( n_sample=2048,", "), ), ) hopper_ppo_default_config = EasyDict(hopper_ppo_default_config) main_config = hopper_ppo_default_config hopper_ppo_create_default_config", "type='pybullet', import_names=['dizoo.pybullet.envs.pybullet_env'], ), env_manager=dict(type='subprocess'), policy=dict( type='ppo', import_names=['ding.policy.ppo'], ), replay_buffer=dict(type='naive', ),", "n_evaluator_episode=10, stop_value=3000, ), policy=dict( cuda=True, on_policy=True, recompute_adv=True, model=dict( obs_shape=11, action_shape=3,", "unroll_len=1, discount_factor=0.99, gae_lambda=0.97, ), eval=dict(evaluator=dict(eval_freq=5000, )), other=dict(replay_buffer=dict( replay_buffer_size=10000, replay_buffer_start_size=0, ),", "import EasyDict hopper_ppo_default_config = dict( env=dict( env_id='HopperMuJoCoEnv-v0', norm_obs=dict(use_norm=False, ), norm_reward=dict(use_norm=False,", "= hopper_ppo_default_config hopper_ppo_create_default_config = dict( env=dict( type='pybullet', import_names=['dizoo.pybullet.envs.pybullet_env'], ), env_manager=dict(type='subprocess'),", "gae_lambda=0.97, ), eval=dict(evaluator=dict(eval_freq=5000, )), other=dict(replay_buffer=dict( replay_buffer_size=10000, replay_buffer_start_size=0, ), ), )," ]
[ "= jsonfile[\"name\"] references = jsonfile.get(\"references\") if len(jsonfile[\"entries\"])>0 and jsonfile[\"entries\"][0].get(\"app\"): appFamily=False", "not jsonfile[\"entries\"][0].get(\"appFamily\"): return None else: appFamily=True entries = [i[\"appFamily\"] for", "from_json(cls,jsonfile,**kwargs): id = jsonfile[\"listId\"] name = jsonfile[\"name\"] references = jsonfile.get(\"references\")", "id self.name = name self.references = reference self.app_family=is_app_family self._entries =", "jsonfile[\"name\"] references = jsonfile.get(\"references\") if len(jsonfile[\"entries\"])>0 and jsonfile[\"entries\"][0].get(\"app\"): appFamily=False entries", "class Application(BaseObject): def __init__(self,name,app_list,is_app_family,id=None,reference=None,**kwargs): self.type = \"appList\" self.id = id", "= [i[\"appFamily\"] for i in jsonfile[\"entries\"]] return cls(name,entries,appFamily,id,references,**kwargs) def to_json(self):", "name self.references = reference self.app_family=is_app_family self._entries = app_list self.url =", "= \"template/policy/list/app\" super().__init__(**kwargs) self.modified=False def get_entries(self): return self._entries def set_entries(self,entries):", "\"type\":\"app\", \"entries\":[ {\"appFamily\" if self.app_family else \"app\":i} for i in", "= jsonfile[\"listId\"] name = jsonfile[\"name\"] references = jsonfile.get(\"references\") if len(jsonfile[\"entries\"])>0", "references = jsonfile.get(\"references\") if len(jsonfile[\"entries\"])>0 and jsonfile[\"entries\"][0].get(\"app\"): appFamily=False entries =", "\"entries\":[ {\"appFamily\" if self.app_family else \"app\":i} for i in self._entries]", "= reference self.app_family=is_app_family self._entries = app_list self.url = \"template/policy/list/app\" super().__init__(**kwargs)", "self.id = id self.name = name self.references = reference self.app_family=is_app_family", "i in jsonfile[\"entries\"]] else: if not jsonfile[\"entries\"][0].get(\"appFamily\"): return None else:", "appFamily=True entries = [i[\"appFamily\"] for i in jsonfile[\"entries\"]] return cls(name,entries,appFamily,id,references,**kwargs)", "Application(BaseObject): def __init__(self,name,app_list,is_app_family,id=None,reference=None,**kwargs): self.type = \"appList\" self.id = id self.name", "= name self.references = reference self.app_family=is_app_family self._entries = app_list self.url", "if not jsonfile[\"entries\"][0].get(\"appFamily\"): return None else: appFamily=True entries = [i[\"appFamily\"]", "jsonfile[\"entries\"]] return cls(name,entries,appFamily,id,references,**kwargs) def to_json(self): return { \"name\":self.name, \"description\":\"Desc Not", "self.type = \"appList\" self.id = id self.name = name self.references", "@classmethod def from_json(cls,jsonfile,**kwargs): id = jsonfile[\"listId\"] name = jsonfile[\"name\"] references", "set_entries(self,entries): self.modified=True self._entries=entries @classmethod def from_json(cls,jsonfile,**kwargs): id = jsonfile[\"listId\"] name", "Not Required\", \"type\":\"app\", \"entries\":[ {\"appFamily\" if self.app_family else \"app\":i} for", "\"appList\" self.id = id self.name = name self.references = reference", "self._entries = app_list self.url = \"template/policy/list/app\" super().__init__(**kwargs) self.modified=False def get_entries(self):", "return cls(name,entries,appFamily,id,references,**kwargs) def to_json(self): return { \"name\":self.name, \"description\":\"Desc Not Required\",", "= id self.name = name self.references = reference self.app_family=is_app_family self._entries", "import json from cisco_sdwan_policy.BaseObject import BaseObject class Application(BaseObject): def __init__(self,name,app_list,is_app_family,id=None,reference=None,**kwargs):", "= app_list self.url = \"template/policy/list/app\" super().__init__(**kwargs) self.modified=False def get_entries(self): return", "self._entries=entries @classmethod def from_json(cls,jsonfile,**kwargs): id = jsonfile[\"listId\"] name = jsonfile[\"name\"]", "jsonfile[\"entries\"][0].get(\"appFamily\"): return None else: appFamily=True entries = [i[\"appFamily\"] for i", "None else: appFamily=True entries = [i[\"appFamily\"] for i in jsonfile[\"entries\"]]", "self.name = name self.references = reference self.app_family=is_app_family self._entries = app_list", "jsonfile[\"entries\"]] else: if not jsonfile[\"entries\"][0].get(\"appFamily\"): return None else: appFamily=True entries", "jsonfile[\"listId\"] name = jsonfile[\"name\"] references = jsonfile.get(\"references\") if len(jsonfile[\"entries\"])>0 and", "__init__(self,name,app_list,is_app_family,id=None,reference=None,**kwargs): self.type = \"appList\" self.id = id self.name = name", "json from cisco_sdwan_policy.BaseObject import BaseObject class Application(BaseObject): def __init__(self,name,app_list,is_app_family,id=None,reference=None,**kwargs): self.type", "{\"appFamily\" if self.app_family else \"app\":i} for i in self._entries] }", "\"description\":\"Desc Not Required\", \"type\":\"app\", \"entries\":[ {\"appFamily\" if self.app_family else \"app\":i}", "return None else: appFamily=True entries = [i[\"appFamily\"] for i in", "BaseObject class Application(BaseObject): def __init__(self,name,app_list,is_app_family,id=None,reference=None,**kwargs): self.type = \"appList\" self.id =", "\"name\":self.name, \"description\":\"Desc Not Required\", \"type\":\"app\", \"entries\":[ {\"appFamily\" if self.app_family else", "jsonfile[\"entries\"][0].get(\"app\"): appFamily=False entries = [i[\"app\"] for i in jsonfile[\"entries\"]] else:", "len(jsonfile[\"entries\"])>0 and jsonfile[\"entries\"][0].get(\"app\"): appFamily=False entries = [i[\"app\"] for i in", "reference self.app_family=is_app_family self._entries = app_list self.url = \"template/policy/list/app\" super().__init__(**kwargs) self.modified=False", "return { \"name\":self.name, \"description\":\"Desc Not Required\", \"type\":\"app\", \"entries\":[ {\"appFamily\" if", "def set_entries(self,entries): self.modified=True self._entries=entries @classmethod def from_json(cls,jsonfile,**kwargs): id = jsonfile[\"listId\"]", "to_json(self): return { \"name\":self.name, \"description\":\"Desc Not Required\", \"type\":\"app\", \"entries\":[ {\"appFamily\"", "id = jsonfile[\"listId\"] name = jsonfile[\"name\"] references = jsonfile.get(\"references\") if", "= jsonfile.get(\"references\") if len(jsonfile[\"entries\"])>0 and jsonfile[\"entries\"][0].get(\"app\"): appFamily=False entries = [i[\"app\"]", "cls(name,entries,appFamily,id,references,**kwargs) def to_json(self): return { \"name\":self.name, \"description\":\"Desc Not Required\", \"type\":\"app\",", "else: appFamily=True entries = [i[\"appFamily\"] for i in jsonfile[\"entries\"]] return", "\"template/policy/list/app\" super().__init__(**kwargs) self.modified=False def get_entries(self): return self._entries def set_entries(self,entries): self.modified=True", "self.url = \"template/policy/list/app\" super().__init__(**kwargs) self.modified=False def get_entries(self): return self._entries def", "return self._entries def set_entries(self,entries): self.modified=True self._entries=entries @classmethod def from_json(cls,jsonfile,**kwargs): id", "and jsonfile[\"entries\"][0].get(\"app\"): appFamily=False entries = [i[\"app\"] for i in jsonfile[\"entries\"]]", "entries = [i[\"appFamily\"] for i in jsonfile[\"entries\"]] return cls(name,entries,appFamily,id,references,**kwargs) def", "[i[\"app\"] for i in jsonfile[\"entries\"]] else: if not jsonfile[\"entries\"][0].get(\"appFamily\"): return", "def from_json(cls,jsonfile,**kwargs): id = jsonfile[\"listId\"] name = jsonfile[\"name\"] references =", "i in jsonfile[\"entries\"]] return cls(name,entries,appFamily,id,references,**kwargs) def to_json(self): return { \"name\":self.name,", "self.references = reference self.app_family=is_app_family self._entries = app_list self.url = \"template/policy/list/app\"", "self.app_family=is_app_family self._entries = app_list self.url = \"template/policy/list/app\" super().__init__(**kwargs) self.modified=False def", "for i in jsonfile[\"entries\"]] return cls(name,entries,appFamily,id,references,**kwargs) def to_json(self): return {", "jsonfile.get(\"references\") if len(jsonfile[\"entries\"])>0 and jsonfile[\"entries\"][0].get(\"app\"): appFamily=False entries = [i[\"app\"] for", "name = jsonfile[\"name\"] references = jsonfile.get(\"references\") if len(jsonfile[\"entries\"])>0 and jsonfile[\"entries\"][0].get(\"app\"):", "self.modified=True self._entries=entries @classmethod def from_json(cls,jsonfile,**kwargs): id = jsonfile[\"listId\"] name =", "super().__init__(**kwargs) self.modified=False def get_entries(self): return self._entries def set_entries(self,entries): self.modified=True self._entries=entries", "app_list self.url = \"template/policy/list/app\" super().__init__(**kwargs) self.modified=False def get_entries(self): return self._entries", "Required\", \"type\":\"app\", \"entries\":[ {\"appFamily\" if self.app_family else \"app\":i} for i", "def to_json(self): return { \"name\":self.name, \"description\":\"Desc Not Required\", \"type\":\"app\", \"entries\":[", "from cisco_sdwan_policy.BaseObject import BaseObject class Application(BaseObject): def __init__(self,name,app_list,is_app_family,id=None,reference=None,**kwargs): self.type =", "self._entries def set_entries(self,entries): self.modified=True self._entries=entries @classmethod def from_json(cls,jsonfile,**kwargs): id =", "cisco_sdwan_policy.BaseObject import BaseObject class Application(BaseObject): def __init__(self,name,app_list,is_app_family,id=None,reference=None,**kwargs): self.type = \"appList\"", "in jsonfile[\"entries\"]] else: if not jsonfile[\"entries\"][0].get(\"appFamily\"): return None else: appFamily=True", "self.modified=False def get_entries(self): return self._entries def set_entries(self,entries): self.modified=True self._entries=entries @classmethod", "import BaseObject class Application(BaseObject): def __init__(self,name,app_list,is_app_family,id=None,reference=None,**kwargs): self.type = \"appList\" self.id", "= [i[\"app\"] for i in jsonfile[\"entries\"]] else: if not jsonfile[\"entries\"][0].get(\"appFamily\"):", "[i[\"appFamily\"] for i in jsonfile[\"entries\"]] return cls(name,entries,appFamily,id,references,**kwargs) def to_json(self): return", "get_entries(self): return self._entries def set_entries(self,entries): self.modified=True self._entries=entries @classmethod def from_json(cls,jsonfile,**kwargs):", "else: if not jsonfile[\"entries\"][0].get(\"appFamily\"): return None else: appFamily=True entries =", "appFamily=False entries = [i[\"app\"] for i in jsonfile[\"entries\"]] else: if", "if len(jsonfile[\"entries\"])>0 and jsonfile[\"entries\"][0].get(\"app\"): appFamily=False entries = [i[\"app\"] for i", "for i in jsonfile[\"entries\"]] else: if not jsonfile[\"entries\"][0].get(\"appFamily\"): return None", "in jsonfile[\"entries\"]] return cls(name,entries,appFamily,id,references,**kwargs) def to_json(self): return { \"name\":self.name, \"description\":\"Desc", "def __init__(self,name,app_list,is_app_family,id=None,reference=None,**kwargs): self.type = \"appList\" self.id = id self.name =", "= \"appList\" self.id = id self.name = name self.references =", "{ \"name\":self.name, \"description\":\"Desc Not Required\", \"type\":\"app\", \"entries\":[ {\"appFamily\" if self.app_family", "entries = [i[\"app\"] for i in jsonfile[\"entries\"]] else: if not", "def get_entries(self): return self._entries def set_entries(self,entries): self.modified=True self._entries=entries @classmethod def" ]
[ "return # Cleanup self._stop() # Create & Run container docker_container", "& Run container docker_container = self.sys_docker.run( self.image, tag=self.sys_plugins.dns.version.string, init=False, dns=False,", "of Docker container.\"\"\" return DNS_DOCKER_NAME def _run(self) -> None: \"\"\"Run", "..coresys import CoreSysAttributes from .interface import DockerInterface _LOGGER: logging.Logger =", "{\"bind\": \"/config\", \"mode\": \"rw\"} }, ) self._meta = docker_container.attrs _LOGGER.info(", "\"\"\"Return name of Supervisor DNS image.\"\"\" return self.sys_plugins.dns.image @property def", "container.\"\"\" return DNS_DOCKER_NAME def _run(self) -> None: \"\"\"Run Docker image.", "\"-\"), detach=True, environment={ENV_TIME: self.sys_config.timezone}, volumes={ str(self.sys_config.path_extern_dns): {\"bind\": \"/config\", \"mode\": \"rw\"}", "str: \"\"\"Return name of Docker container.\"\"\" return DNS_DOCKER_NAME def _run(self)", "\"rw\"} }, ) self._meta = docker_container.attrs _LOGGER.info( \"Starting DNS %s", "dns=False, ipv4=self.sys_docker.network.dns, name=self.name, hostname=self.name.replace(\"_\", \"-\"), detach=True, environment={ENV_TIME: self.sys_config.timezone}, volumes={ str(self.sys_config.path_extern_dns):", "init=False, dns=False, ipv4=self.sys_docker.network.dns, name=self.name, hostname=self.name.replace(\"_\", \"-\"), detach=True, environment={ENV_TIME: self.sys_config.timezone}, volumes={", "import DockerInterface _LOGGER: logging.Logger = logging.getLogger(__name__) DNS_DOCKER_NAME: str = \"hassio_dns\"", "Supervisor wrapper for Supervisor DNS.\"\"\" @property def image(self) -> str:", "for Supervisor DNS.\"\"\" @property def image(self) -> str: \"\"\"Return name", "DockerInterface _LOGGER: logging.Logger = logging.getLogger(__name__) DNS_DOCKER_NAME: str = \"hassio_dns\" class", "name of Docker container.\"\"\" return DNS_DOCKER_NAME def _run(self) -> None:", "name of Supervisor DNS image.\"\"\" return self.sys_plugins.dns.image @property def name(self)", "DNS.\"\"\" @property def image(self) -> str: \"\"\"Return name of Supervisor", "self._is_running(): return # Cleanup self._stop() # Create & Run container", "_LOGGER: logging.Logger = logging.getLogger(__name__) DNS_DOCKER_NAME: str = \"hassio_dns\" class DockerDNS(DockerInterface,", "from ..const import ENV_TIME from ..coresys import CoreSysAttributes from .interface", "logging.getLogger(__name__) DNS_DOCKER_NAME: str = \"hassio_dns\" class DockerDNS(DockerInterface, CoreSysAttributes): \"\"\"Docker Supervisor", "name=self.name, hostname=self.name.replace(\"_\", \"-\"), detach=True, environment={ENV_TIME: self.sys_config.timezone}, volumes={ str(self.sys_config.path_extern_dns): {\"bind\": \"/config\",", "self.sys_plugins.dns.image @property def name(self) -> str: \"\"\"Return name of Docker", "Create & Run container docker_container = self.sys_docker.run( self.image, tag=self.sys_plugins.dns.version.string, init=False,", "if self._is_running(): return # Cleanup self._stop() # Create & Run", "ipv4=self.sys_docker.network.dns, name=self.name, hostname=self.name.replace(\"_\", \"-\"), detach=True, environment={ENV_TIME: self.sys_config.timezone}, volumes={ str(self.sys_config.path_extern_dns): {\"bind\":", "of Supervisor DNS image.\"\"\" return self.sys_plugins.dns.image @property def name(self) ->", "\"mode\": \"rw\"} }, ) self._meta = docker_container.attrs _LOGGER.info( \"Starting DNS", "@property def name(self) -> str: \"\"\"Return name of Docker container.\"\"\"", "CoreSysAttributes): \"\"\"Docker Supervisor wrapper for Supervisor DNS.\"\"\" @property def image(self)", "\"/config\", \"mode\": \"rw\"} }, ) self._meta = docker_container.attrs _LOGGER.info( \"Starting", "\"\"\"Run Docker image. Need run inside executor. \"\"\" if self._is_running():", "-> None: \"\"\"Run Docker image. Need run inside executor. \"\"\"", "= logging.getLogger(__name__) DNS_DOCKER_NAME: str = \"hassio_dns\" class DockerDNS(DockerInterface, CoreSysAttributes): \"\"\"Docker", "= \"hassio_dns\" class DockerDNS(DockerInterface, CoreSysAttributes): \"\"\"Docker Supervisor wrapper for Supervisor", "run inside executor. \"\"\" if self._is_running(): return # Cleanup self._stop()", "# Cleanup self._stop() # Create & Run container docker_container =", "Supervisor DNS image.\"\"\" return self.sys_plugins.dns.image @property def name(self) -> str:", "= docker_container.attrs _LOGGER.info( \"Starting DNS %s with version %s -", "Docker container.\"\"\" return DNS_DOCKER_NAME def _run(self) -> None: \"\"\"Run Docker", "# Create & Run container docker_container = self.sys_docker.run( self.image, tag=self.sys_plugins.dns.version.string,", "str(self.sys_config.path_extern_dns): {\"bind\": \"/config\", \"mode\": \"rw\"} }, ) self._meta = docker_container.attrs", "image.\"\"\" return self.sys_plugins.dns.image @property def name(self) -> str: \"\"\"Return name", "DNS image.\"\"\" return self.sys_plugins.dns.image @property def name(self) -> str: \"\"\"Return", "DNS %s with version %s - %s\", self.image, self.version, self.sys_docker.network.dns,", "CoreSysAttributes from .interface import DockerInterface _LOGGER: logging.Logger = logging.getLogger(__name__) DNS_DOCKER_NAME:", "detach=True, environment={ENV_TIME: self.sys_config.timezone}, volumes={ str(self.sys_config.path_extern_dns): {\"bind\": \"/config\", \"mode\": \"rw\"} },", "ENV_TIME from ..coresys import CoreSysAttributes from .interface import DockerInterface _LOGGER:", "wrapper for Supervisor DNS.\"\"\" @property def image(self) -> str: \"\"\"Return", "DNS_DOCKER_NAME def _run(self) -> None: \"\"\"Run Docker image. Need run", "_run(self) -> None: \"\"\"Run Docker image. Need run inside executor.", "\"Starting DNS %s with version %s - %s\", self.image, self.version,", "\"\"\"Return name of Docker container.\"\"\" return DNS_DOCKER_NAME def _run(self) ->", "import ENV_TIME from ..coresys import CoreSysAttributes from .interface import DockerInterface", "class DockerDNS(DockerInterface, CoreSysAttributes): \"\"\"Docker Supervisor wrapper for Supervisor DNS.\"\"\" @property", "_LOGGER.info( \"Starting DNS %s with version %s - %s\", self.image,", "\"hassio_dns\" class DockerDNS(DockerInterface, CoreSysAttributes): \"\"\"Docker Supervisor wrapper for Supervisor DNS.\"\"\"", "return DNS_DOCKER_NAME def _run(self) -> None: \"\"\"Run Docker image. Need", "executor. \"\"\" if self._is_running(): return # Cleanup self._stop() # Create", "def name(self) -> str: \"\"\"Return name of Docker container.\"\"\" return", "logging from ..const import ENV_TIME from ..coresys import CoreSysAttributes from", "docker_container = self.sys_docker.run( self.image, tag=self.sys_plugins.dns.version.string, init=False, dns=False, ipv4=self.sys_docker.network.dns, name=self.name, hostname=self.name.replace(\"_\",", "\"\"\"Docker Supervisor wrapper for Supervisor DNS.\"\"\" @property def image(self) ->", "docker object.\"\"\" import logging from ..const import ENV_TIME from ..coresys", "from ..coresys import CoreSysAttributes from .interface import DockerInterface _LOGGER: logging.Logger", "str = \"hassio_dns\" class DockerDNS(DockerInterface, CoreSysAttributes): \"\"\"Docker Supervisor wrapper for", "self.sys_config.timezone}, volumes={ str(self.sys_config.path_extern_dns): {\"bind\": \"/config\", \"mode\": \"rw\"} }, ) self._meta", "def _run(self) -> None: \"\"\"Run Docker image. Need run inside", "docker_container.attrs _LOGGER.info( \"Starting DNS %s with version %s - %s\",", "Run container docker_container = self.sys_docker.run( self.image, tag=self.sys_plugins.dns.version.string, init=False, dns=False, ipv4=self.sys_docker.network.dns,", "name(self) -> str: \"\"\"Return name of Docker container.\"\"\" return DNS_DOCKER_NAME", "object.\"\"\" import logging from ..const import ENV_TIME from ..coresys import", "self.image, tag=self.sys_plugins.dns.version.string, init=False, dns=False, ipv4=self.sys_docker.network.dns, name=self.name, hostname=self.name.replace(\"_\", \"-\"), detach=True, environment={ENV_TIME:", "hostname=self.name.replace(\"_\", \"-\"), detach=True, environment={ENV_TIME: self.sys_config.timezone}, volumes={ str(self.sys_config.path_extern_dns): {\"bind\": \"/config\", \"mode\":", "}, ) self._meta = docker_container.attrs _LOGGER.info( \"Starting DNS %s with", "Docker image. Need run inside executor. \"\"\" if self._is_running(): return", "image. Need run inside executor. \"\"\" if self._is_running(): return #", "import logging from ..const import ENV_TIME from ..coresys import CoreSysAttributes", "tag=self.sys_plugins.dns.version.string, init=False, dns=False, ipv4=self.sys_docker.network.dns, name=self.name, hostname=self.name.replace(\"_\", \"-\"), detach=True, environment={ENV_TIME: self.sys_config.timezone},", "def image(self) -> str: \"\"\"Return name of Supervisor DNS image.\"\"\"", "Supervisor DNS.\"\"\" @property def image(self) -> str: \"\"\"Return name of", "str: \"\"\"Return name of Supervisor DNS image.\"\"\" return self.sys_plugins.dns.image @property", "self._meta = docker_container.attrs _LOGGER.info( \"Starting DNS %s with version %s", "volumes={ str(self.sys_config.path_extern_dns): {\"bind\": \"/config\", \"mode\": \"rw\"} }, ) self._meta =", "None: \"\"\"Run Docker image. Need run inside executor. \"\"\" if", "\"\"\" if self._is_running(): return # Cleanup self._stop() # Create &", "from .interface import DockerInterface _LOGGER: logging.Logger = logging.getLogger(__name__) DNS_DOCKER_NAME: str", ".interface import DockerInterface _LOGGER: logging.Logger = logging.getLogger(__name__) DNS_DOCKER_NAME: str =", "@property def image(self) -> str: \"\"\"Return name of Supervisor DNS", "return self.sys_plugins.dns.image @property def name(self) -> str: \"\"\"Return name of", "\"\"\"DNS docker object.\"\"\" import logging from ..const import ENV_TIME from", "%s with version %s - %s\", self.image, self.version, self.sys_docker.network.dns, )", "DockerDNS(DockerInterface, CoreSysAttributes): \"\"\"Docker Supervisor wrapper for Supervisor DNS.\"\"\" @property def", "..const import ENV_TIME from ..coresys import CoreSysAttributes from .interface import", "Need run inside executor. \"\"\" if self._is_running(): return # Cleanup", "self._stop() # Create & Run container docker_container = self.sys_docker.run( self.image,", "DNS_DOCKER_NAME: str = \"hassio_dns\" class DockerDNS(DockerInterface, CoreSysAttributes): \"\"\"Docker Supervisor wrapper", "import CoreSysAttributes from .interface import DockerInterface _LOGGER: logging.Logger = logging.getLogger(__name__)", "inside executor. \"\"\" if self._is_running(): return # Cleanup self._stop() #", "container docker_container = self.sys_docker.run( self.image, tag=self.sys_plugins.dns.version.string, init=False, dns=False, ipv4=self.sys_docker.network.dns, name=self.name,", "<filename>supervisor/docker/dns.py \"\"\"DNS docker object.\"\"\" import logging from ..const import ENV_TIME", "logging.Logger = logging.getLogger(__name__) DNS_DOCKER_NAME: str = \"hassio_dns\" class DockerDNS(DockerInterface, CoreSysAttributes):", "Cleanup self._stop() # Create & Run container docker_container = self.sys_docker.run(", ") self._meta = docker_container.attrs _LOGGER.info( \"Starting DNS %s with version", "self.sys_docker.run( self.image, tag=self.sys_plugins.dns.version.string, init=False, dns=False, ipv4=self.sys_docker.network.dns, name=self.name, hostname=self.name.replace(\"_\", \"-\"), detach=True,", "environment={ENV_TIME: self.sys_config.timezone}, volumes={ str(self.sys_config.path_extern_dns): {\"bind\": \"/config\", \"mode\": \"rw\"} }, )", "= self.sys_docker.run( self.image, tag=self.sys_plugins.dns.version.string, init=False, dns=False, ipv4=self.sys_docker.network.dns, name=self.name, hostname=self.name.replace(\"_\", \"-\"),", "-> str: \"\"\"Return name of Supervisor DNS image.\"\"\" return self.sys_plugins.dns.image", "-> str: \"\"\"Return name of Docker container.\"\"\" return DNS_DOCKER_NAME def", "image(self) -> str: \"\"\"Return name of Supervisor DNS image.\"\"\" return" ]
[ "python_version >= 350: binary_operator_codes[\"MatMult\"] = \"PyNumber_MatrixMultiply\" binary_operator_codes[\"IMatMult\"] = \"PyNumber_InPlaceMatrixMultiply\" unary_operator_codes", "\"IMod\": \"PyNumber_InPlaceRemainder\", \"ILShift\": \"PyNumber_InPlaceLshift\", \"IRShift\": \"PyNumber_InPlaceRshift\", \"IBitAnd\": \"PyNumber_InPlaceAnd\", \"IBitOr\": \"PyNumber_InPlaceOr\",", "2.0 (the \"License\"); # you may not use this file", "permissions and # limitations under the License. # \"\"\" Operator", "from nuitka.PythonVersions import python_version binary_operator_codes = { # Those commented", "\"PyNumber_InPlaceXor\", } # Python 3.5 only operator if python_version >=", "optimizing Python compiler that is compatible and # integrates with", "\"IDiv\": \"PyNumber_InPlaceDivide\", \"IFloorDiv\": \"PyNumber_InPlaceFloorDivide\", \"ITrueDiv\": \"PyNumber_InPlaceTrueDivide\", \"IMod\": \"PyNumber_InPlaceRemainder\", \"ILShift\": \"PyNumber_InPlaceLshift\",", "0), } rich_comparison_codes = { \"Lt\": \"LT\", \"LtE\": \"LE\", \"Eq\":", "\"IBitOr\": \"PyNumber_InPlaceOr\", \"IBitXor\": \"PyNumber_InPlaceXor\", } # Python 3.5 only operator", "# Python 3.5 only operator if python_version >= 350: binary_operator_codes[\"MatMult\"]", "\"PyNumber_TrueDivide\", # These have their own variants only to make", "of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "\"RShift\": \"PyNumber_Rshift\", \"BitAnd\": \"PyNumber_And\", \"BitOr\": \"PyNumber_Or\", \"BitXor\": \"PyNumber_Xor\", \"IAdd\": \"PyNumber_InPlaceAdd\",", "\"PyNumber_Divide\", # \"Mult\" : \"PyNumber_Multiply\", # \"Mod\" : \"PyNumber_Remainder\", #", "generic code is in-lined # but the CPython code is", "Operator code tables These are mostly used to look up", "(\"PyNumber_Negative\", 1), \"Invert\": (\"PyNumber_Invert\", 1), \"Repr\": (\"PyObject_Repr\", 1), \"Not\": (\"UNARY_NOT\",", "are mostly used to look up the Python C/API from", "\"LShift\": \"PyNumber_Lshift\", \"RShift\": \"PyNumber_Rshift\", \"BitAnd\": \"PyNumber_And\", \"BitOr\": \"PyNumber_Or\", \"BitXor\": \"PyNumber_Xor\",", "# # Part of \"Nuitka\", an optimizing Python compiler that", "# \"Pow\" : \"PyNumber_Power\", # \"IPow\" : \"PyNumber_InPlacePower\", # The", "use this file except in compliance with the License. #", "\"PyNumber_InPlaceLshift\", \"IRShift\": \"PyNumber_InPlaceRshift\", \"IBitAnd\": \"PyNumber_InPlaceAnd\", \"IBitOr\": \"PyNumber_InPlaceOr\", \"IBitXor\": \"PyNumber_InPlaceXor\", }", "1), \"Not\": (\"UNARY_NOT\", 0), } rich_comparison_codes = { \"Lt\": \"LT\",", "if they had a specialized variant too. \"LShift\": \"PyNumber_Lshift\", \"RShift\":", "the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "\"BitOr\": \"PyNumber_Or\", \"BitXor\": \"PyNumber_Xor\", \"IAdd\": \"PyNumber_InPlaceAdd\", \"ISub\": \"PyNumber_InPlaceSubtract\", \"IMult\": \"PyNumber_InPlaceMultiply\",", "License. # You may obtain a copy of the License", "\"PyNumber_Divide\", # \"FloorDiv\" : \"PyNumber_FloorDivide\", # \"TrueDiv\" : \"PyNumber_TrueDivide\", #", "under the License is distributed on an \"AS IS\" BASIS,", "\"ILShift\": \"PyNumber_InPlaceLshift\", \"IRShift\": \"PyNumber_InPlaceRshift\", \"IBitAnd\": \"PyNumber_InPlaceAnd\", \"IBitOr\": \"PyNumber_InPlaceOr\", \"IBitXor\": \"PyNumber_InPlaceXor\",", "License for the specific language governing permissions and # limitations", "\"PyNumber_FloorDivide\", # \"TrueDiv\" : \"PyNumber_TrueDivide\", # These have their own", "\"PyNumber_InPlaceAnd\", \"IBitOr\": \"PyNumber_InPlaceOr\", \"IBitXor\": \"PyNumber_InPlaceXor\", } # Python 3.5 only", "\"IRShift\": \"PyNumber_InPlaceRshift\", \"IBitAnd\": \"PyNumber_InPlaceAnd\", \"IBitOr\": \"PyNumber_InPlaceOr\", \"IBitXor\": \"PyNumber_InPlaceXor\", } #", "own variants only to make sure the generic code is", "\"LE\", \"Eq\": \"EQ\", \"NotEq\": \"NE\", \"Gt\": \"GT\", \"GtE\": \"GE\", }", "= \"PyNumber_InPlaceMatrixMultiply\" unary_operator_codes = { \"UAdd\": (\"PyNumber_Positive\", 1), \"USub\": (\"PyNumber_Negative\",", "sure the generic code is in-lined # but the CPython", "# limitations under the License. # \"\"\" Operator code tables", "and # integrates with CPython, but also works on its", "\"PyNumber_InPlaceSubtract\", \"IMult\": \"PyNumber_InPlaceMultiply\", \"IDiv\": \"PyNumber_InPlaceDivide\", \"IFloorDiv\": \"PyNumber_InPlaceFloorDivide\", \"ITrueDiv\": \"PyNumber_InPlaceTrueDivide\", \"IMod\":", "\"PyNumber_Rshift\", \"BitAnd\": \"PyNumber_And\", \"BitOr\": \"PyNumber_Or\", \"BitXor\": \"PyNumber_Xor\", \"IAdd\": \"PyNumber_InPlaceAdd\", \"ISub\":", "is compatible and # integrates with CPython, but also works", "on its own. # # Licensed under the Apache License,", "too. \"LShift\": \"PyNumber_Lshift\", \"RShift\": \"PyNumber_Rshift\", \"BitAnd\": \"PyNumber_And\", \"BitOr\": \"PyNumber_Or\", \"BitXor\":", "{ \"Lt\": \"LT\", \"LtE\": \"LE\", \"Eq\": \"EQ\", \"NotEq\": \"NE\", \"Gt\":", "used to look up the Python C/API from operations or", "\"IAdd\": \"PyNumber_InPlaceAdd\", \"ISub\": \"PyNumber_InPlaceSubtract\", \"IMult\": \"PyNumber_InPlaceMultiply\", \"IDiv\": \"PyNumber_InPlaceDivide\", \"IFloorDiv\": \"PyNumber_InPlaceFloorDivide\",", "# integrates with CPython, but also works on its own.", "in compliance with the License. # You may obtain a", "# \"Mult\" : \"PyNumber_Multiply\", # \"Mod\" : \"PyNumber_Remainder\", # \"Div\"", "software # distributed under the License is distributed on an", "had a specialized variant too. \"LShift\": \"PyNumber_Lshift\", \"RShift\": \"PyNumber_Rshift\", \"BitAnd\":", "the Python C/API from operations or a wrapper used. \"\"\"", "but the CPython code is not in-lined. # \"Pow\" :", "\"EQ\", \"NotEq\": \"NE\", \"Gt\": \"GT\", \"GtE\": \"GE\", } containing_comparison_codes =", "is in-lined # but the CPython code is not in-lined.", "\"Div\" : \"PyNumber_Divide\", # \"Mult\" : \"PyNumber_Multiply\", # \"Mod\" :", "unary_operator_codes = { \"UAdd\": (\"PyNumber_Positive\", 1), \"USub\": (\"PyNumber_Negative\", 1), \"Invert\":", "\"PyNumber_And\", \"BitOr\": \"PyNumber_Or\", \"BitXor\": \"PyNumber_Xor\", \"IAdd\": \"PyNumber_InPlaceAdd\", \"ISub\": \"PyNumber_InPlaceSubtract\", \"IMult\":", "Python C/API from operations or a wrapper used. \"\"\" from", "section have fully specialized variants already. # \"Add\" : \"PyNumber_Add\",", "mailto:<EMAIL> # # Part of \"Nuitka\", an optimizing Python compiler", ": \"PyNumber_InPlacePower\", # The others are generic code and would", "OF ANY KIND, either express or implied. # See the", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", ">= 350: binary_operator_codes[\"MatMult\"] = \"PyNumber_MatrixMultiply\" binary_operator_codes[\"IMatMult\"] = \"PyNumber_InPlaceMatrixMultiply\" unary_operator_codes =", "ANY KIND, either express or implied. # See the License", "See the License for the specific language governing permissions and", "# Those commented out in this section have fully specialized", "the License. # You may obtain a copy of the", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "for the specific language governing permissions and # limitations under", ": \"PyNumber_Add\", # \"Sub\" : \"PyNumber_Subtract\", # \"Div\" : \"PyNumber_Divide\",", "to in writing, software # distributed under the License is", "\"Mult\" : \"PyNumber_Multiply\", # \"Mod\" : \"PyNumber_Remainder\", # \"Div\" :", "# See the License for the specific language governing permissions", ": \"PyNumber_Remainder\", # \"Div\" : \"PyNumber_Divide\", # \"FloorDiv\" : \"PyNumber_FloorDivide\",", "language governing permissions and # limitations under the License. #", "= \"PyNumber_MatrixMultiply\" binary_operator_codes[\"IMatMult\"] = \"PyNumber_InPlaceMatrixMultiply\" unary_operator_codes = { \"UAdd\": (\"PyNumber_Positive\",", "or agreed to in writing, software # distributed under the", "2019, <NAME>, mailto:<EMAIL> # # Part of \"Nuitka\", an optimizing", "\"Pow\" : \"PyNumber_Power\", # \"IPow\" : \"PyNumber_InPlacePower\", # The others", "required by applicable law or agreed to in writing, software", "\"Nuitka\", an optimizing Python compiler that is compatible and #", "BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "only operator if python_version >= 350: binary_operator_codes[\"MatMult\"] = \"PyNumber_MatrixMultiply\" binary_operator_codes[\"IMatMult\"]", "with the License. # You may obtain a copy of", ": \"PyNumber_TrueDivide\", # These have their own variants only to", "a wrapper used. \"\"\" from nuitka.PythonVersions import python_version binary_operator_codes =", "= { \"UAdd\": (\"PyNumber_Positive\", 1), \"USub\": (\"PyNumber_Negative\", 1), \"Invert\": (\"PyNumber_Invert\",", "CPython, but also works on its own. # # Licensed", "C/API from operations or a wrapper used. \"\"\" from nuitka.PythonVersions", "compliance with the License. # You may obtain a copy", "agreed to in writing, software # distributed under the License", "tables These are mostly used to look up the Python", "distributed under the License is distributed on an \"AS IS\"", "\"PyNumber_InPlaceMultiply\", \"IDiv\": \"PyNumber_InPlaceDivide\", \"IFloorDiv\": \"PyNumber_InPlaceFloorDivide\", \"ITrueDiv\": \"PyNumber_InPlaceTrueDivide\", \"IMod\": \"PyNumber_InPlaceRemainder\", \"ILShift\":", "their own variants only to make sure the generic code", "\"ISub\": \"PyNumber_InPlaceSubtract\", \"IMult\": \"PyNumber_InPlaceMultiply\", \"IDiv\": \"PyNumber_InPlaceDivide\", \"IFloorDiv\": \"PyNumber_InPlaceFloorDivide\", \"ITrueDiv\": \"PyNumber_InPlaceTrueDivide\",", "express or implied. # See the License for the specific", "fully specialized variants already. # \"Add\" : \"PyNumber_Add\", # \"Sub\"", "except in compliance with the License. # You may obtain", "limitations under the License. # \"\"\" Operator code tables These", "Licensed under the Apache License, Version 2.0 (the \"License\"); #", "not use this file except in compliance with the License.", "<gh_stars>0 # Copyright 2019, <NAME>, mailto:<EMAIL> # # Part of", "writing, software # distributed under the License is distributed on", "in this section have fully specialized variants already. # \"Add\"", "you may not use this file except in compliance with", "or a wrapper used. \"\"\" from nuitka.PythonVersions import python_version binary_operator_codes", "# Licensed under the Apache License, Version 2.0 (the \"License\");", "1), \"Repr\": (\"PyObject_Repr\", 1), \"Not\": (\"UNARY_NOT\", 0), } rich_comparison_codes =", "integrates with CPython, but also works on its own. #", "# \"TrueDiv\" : \"PyNumber_TrueDivide\", # These have their own variants", "are generic code and would be faster if they had", "own. # # Licensed under the Apache License, Version 2.0", "CONDITIONS OF ANY KIND, either express or implied. # See", "the CPython code is not in-lined. # \"Pow\" : \"PyNumber_Power\",", "\"PyNumber_Subtract\", # \"Div\" : \"PyNumber_Divide\", # \"Mult\" : \"PyNumber_Multiply\", #", "is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES", "up the Python C/API from operations or a wrapper used.", "wrapper used. \"\"\" from nuitka.PythonVersions import python_version binary_operator_codes = {", "\"UAdd\": (\"PyNumber_Positive\", 1), \"USub\": (\"PyNumber_Negative\", 1), \"Invert\": (\"PyNumber_Invert\", 1), \"Repr\":", "\"IBitXor\": \"PyNumber_InPlaceXor\", } # Python 3.5 only operator if python_version", "also works on its own. # # Licensed under the", "\"Invert\": (\"PyNumber_Invert\", 1), \"Repr\": (\"PyObject_Repr\", 1), \"Not\": (\"UNARY_NOT\", 0), }", "a specialized variant too. \"LShift\": \"PyNumber_Lshift\", \"RShift\": \"PyNumber_Rshift\", \"BitAnd\": \"PyNumber_And\",", "works on its own. # # Licensed under the Apache", "the generic code is in-lined # but the CPython code", "\"TrueDiv\" : \"PyNumber_TrueDivide\", # These have their own variants only", "operator if python_version >= 350: binary_operator_codes[\"MatMult\"] = \"PyNumber_MatrixMultiply\" binary_operator_codes[\"IMatMult\"] =", "nuitka.PythonVersions import python_version binary_operator_codes = { # Those commented out", "# Copyright 2019, <NAME>, mailto:<EMAIL> # # Part of \"Nuitka\",", "{ \"UAdd\": (\"PyNumber_Positive\", 1), \"USub\": (\"PyNumber_Negative\", 1), \"Invert\": (\"PyNumber_Invert\", 1),", "OR CONDITIONS OF ANY KIND, either express or implied. #", "} rich_comparison_codes = { \"Lt\": \"LT\", \"LtE\": \"LE\", \"Eq\": \"EQ\",", "\"BitAnd\": \"PyNumber_And\", \"BitOr\": \"PyNumber_Or\", \"BitXor\": \"PyNumber_Xor\", \"IAdd\": \"PyNumber_InPlaceAdd\", \"ISub\": \"PyNumber_InPlaceSubtract\",", "the License is distributed on an \"AS IS\" BASIS, #", "code is not in-lined. # \"Pow\" : \"PyNumber_Power\", # \"IPow\"", "\"PyNumber_MatrixMultiply\" binary_operator_codes[\"IMatMult\"] = \"PyNumber_InPlaceMatrixMultiply\" unary_operator_codes = { \"UAdd\": (\"PyNumber_Positive\", 1),", "governing permissions and # limitations under the License. # \"\"\"", "\"FloorDiv\" : \"PyNumber_FloorDivide\", # \"TrueDiv\" : \"PyNumber_TrueDivide\", # These have", "that is compatible and # integrates with CPython, but also", "law or agreed to in writing, software # distributed under", "} # Python 3.5 only operator if python_version >= 350:", "the License. # \"\"\" Operator code tables These are mostly", "\"LT\", \"LtE\": \"LE\", \"Eq\": \"EQ\", \"NotEq\": \"NE\", \"Gt\": \"GT\", \"GtE\":", "\"PyNumber_InPlaceRshift\", \"IBitAnd\": \"PyNumber_InPlaceAnd\", \"IBitOr\": \"PyNumber_InPlaceOr\", \"IBitXor\": \"PyNumber_InPlaceXor\", } # Python", "\"IBitAnd\": \"PyNumber_InPlaceAnd\", \"IBitOr\": \"PyNumber_InPlaceOr\", \"IBitXor\": \"PyNumber_InPlaceXor\", } # Python 3.5", "an optimizing Python compiler that is compatible and # integrates", "\"PyNumber_InPlaceOr\", \"IBitXor\": \"PyNumber_InPlaceXor\", } # Python 3.5 only operator if", "= { # Those commented out in this section have", "rich_comparison_codes = { \"Lt\": \"LT\", \"LtE\": \"LE\", \"Eq\": \"EQ\", \"NotEq\":", "commented out in this section have fully specialized variants already.", "may obtain a copy of the License at # #", "\"PyNumber_InPlaceAdd\", \"ISub\": \"PyNumber_InPlaceSubtract\", \"IMult\": \"PyNumber_InPlaceMultiply\", \"IDiv\": \"PyNumber_InPlaceDivide\", \"IFloorDiv\": \"PyNumber_InPlaceFloorDivide\", \"ITrueDiv\":", "(\"PyObject_Repr\", 1), \"Not\": (\"UNARY_NOT\", 0), } rich_comparison_codes = { \"Lt\":", "# \"Div\" : \"PyNumber_Divide\", # \"FloorDiv\" : \"PyNumber_FloorDivide\", # \"TrueDiv\"", "IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "\"PyNumber_InPlaceDivide\", \"IFloorDiv\": \"PyNumber_InPlaceFloorDivide\", \"ITrueDiv\": \"PyNumber_InPlaceTrueDivide\", \"IMod\": \"PyNumber_InPlaceRemainder\", \"ILShift\": \"PyNumber_InPlaceLshift\", \"IRShift\":", "may not use this file except in compliance with the", ": \"PyNumber_FloorDivide\", # \"TrueDiv\" : \"PyNumber_TrueDivide\", # These have their", "\"PyNumber_Or\", \"BitXor\": \"PyNumber_Xor\", \"IAdd\": \"PyNumber_InPlaceAdd\", \"ISub\": \"PyNumber_InPlaceSubtract\", \"IMult\": \"PyNumber_InPlaceMultiply\", \"IDiv\":", "1), \"Invert\": (\"PyNumber_Invert\", 1), \"Repr\": (\"PyObject_Repr\", 1), \"Not\": (\"UNARY_NOT\", 0),", "\"PyNumber_InPlaceRemainder\", \"ILShift\": \"PyNumber_InPlaceLshift\", \"IRShift\": \"PyNumber_InPlaceRshift\", \"IBitAnd\": \"PyNumber_InPlaceAnd\", \"IBitOr\": \"PyNumber_InPlaceOr\", \"IBitXor\":", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "and # limitations under the License. # \"\"\" Operator code", "this file except in compliance with the License. # You", "compiler that is compatible and # integrates with CPython, but", "to make sure the generic code is in-lined # but", "if python_version >= 350: binary_operator_codes[\"MatMult\"] = \"PyNumber_MatrixMultiply\" binary_operator_codes[\"IMatMult\"] = \"PyNumber_InPlaceMatrixMultiply\"", "from operations or a wrapper used. \"\"\" from nuitka.PythonVersions import", "\"NE\", \"Gt\": \"GT\", \"GtE\": \"GE\", } containing_comparison_codes = (\"In\", \"NotIn\")", "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "# # Licensed under the Apache License, Version 2.0 (the", "License. # \"\"\" Operator code tables These are mostly used", "{ # Those commented out in this section have fully", "file except in compliance with the License. # You may", "on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS", "\"PyNumber_Lshift\", \"RShift\": \"PyNumber_Rshift\", \"BitAnd\": \"PyNumber_And\", \"BitOr\": \"PyNumber_Or\", \"BitXor\": \"PyNumber_Xor\", \"IAdd\":", "this section have fully specialized variants already. # \"Add\" :", "# \"FloorDiv\" : \"PyNumber_FloorDivide\", # \"TrueDiv\" : \"PyNumber_TrueDivide\", # These", "mostly used to look up the Python C/API from operations", "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", ": \"PyNumber_Subtract\", # \"Div\" : \"PyNumber_Divide\", # \"Mult\" : \"PyNumber_Multiply\",", "only to make sure the generic code is in-lined #", "in-lined. # \"Pow\" : \"PyNumber_Power\", # \"IPow\" : \"PyNumber_InPlacePower\", #", "they had a specialized variant too. \"LShift\": \"PyNumber_Lshift\", \"RShift\": \"PyNumber_Rshift\",", "compatible and # integrates with CPython, but also works on", "have their own variants only to make sure the generic", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "# \"Add\" : \"PyNumber_Add\", # \"Sub\" : \"PyNumber_Subtract\", # \"Div\"", "\"ITrueDiv\": \"PyNumber_InPlaceTrueDivide\", \"IMod\": \"PyNumber_InPlaceRemainder\", \"ILShift\": \"PyNumber_InPlaceLshift\", \"IRShift\": \"PyNumber_InPlaceRshift\", \"IBitAnd\": \"PyNumber_InPlaceAnd\",", "# \"\"\" Operator code tables These are mostly used to", "or implied. # See the License for the specific language", "variants only to make sure the generic code is in-lined", "specialized variant too. \"LShift\": \"PyNumber_Lshift\", \"RShift\": \"PyNumber_Rshift\", \"BitAnd\": \"PyNumber_And\", \"BitOr\":", "Python 3.5 only operator if python_version >= 350: binary_operator_codes[\"MatMult\"] =", "generic code and would be faster if they had a", "KIND, either express or implied. # See the License for", "specific language governing permissions and # limitations under the License.", "in-lined # but the CPython code is not in-lined. #", "specialized variants already. # \"Add\" : \"PyNumber_Add\", # \"Sub\" :", "python_version binary_operator_codes = { # Those commented out in this", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "would be faster if they had a specialized variant too.", "# The others are generic code and would be faster", "(the \"License\"); # you may not use this file except", "# you may not use this file except in compliance", "\"PyNumber_InPlacePower\", # The others are generic code and would be", ": \"PyNumber_Divide\", # \"Mult\" : \"PyNumber_Multiply\", # \"Mod\" : \"PyNumber_Remainder\",", "\"Not\": (\"UNARY_NOT\", 0), } rich_comparison_codes = { \"Lt\": \"LT\", \"LtE\":", "\"Sub\" : \"PyNumber_Subtract\", # \"Div\" : \"PyNumber_Divide\", # \"Mult\" :", "# \"Div\" : \"PyNumber_Divide\", # \"Mult\" : \"PyNumber_Multiply\", # \"Mod\"", "# # Unless required by applicable law or agreed to", "These have their own variants only to make sure the", "obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0", "<NAME>, mailto:<EMAIL> # # Part of \"Nuitka\", an optimizing Python", "Version 2.0 (the \"License\"); # you may not use this", "Copyright 2019, <NAME>, mailto:<EMAIL> # # Part of \"Nuitka\", an", "code and would be faster if they had a specialized", "Those commented out in this section have fully specialized variants", "implied. # See the License for the specific language governing", "\"Add\" : \"PyNumber_Add\", # \"Sub\" : \"PyNumber_Subtract\", # \"Div\" :", "under the Apache License, Version 2.0 (the \"License\"); # you", "# \"IPow\" : \"PyNumber_InPlacePower\", # The others are generic code", "make sure the generic code is in-lined # but the", "\"PyNumber_Power\", # \"IPow\" : \"PyNumber_InPlacePower\", # The others are generic", "by applicable law or agreed to in writing, software #", "\"IFloorDiv\": \"PyNumber_InPlaceFloorDivide\", \"ITrueDiv\": \"PyNumber_InPlaceTrueDivide\", \"IMod\": \"PyNumber_InPlaceRemainder\", \"ILShift\": \"PyNumber_InPlaceLshift\", \"IRShift\": \"PyNumber_InPlaceRshift\",", "code tables These are mostly used to look up the", "binary_operator_codes[\"IMatMult\"] = \"PyNumber_InPlaceMatrixMultiply\" unary_operator_codes = { \"UAdd\": (\"PyNumber_Positive\", 1), \"USub\":", "These are mostly used to look up the Python C/API", ": \"PyNumber_Divide\", # \"FloorDiv\" : \"PyNumber_FloorDivide\", # \"TrueDiv\" : \"PyNumber_TrueDivide\",", "\"PyNumber_Add\", # \"Sub\" : \"PyNumber_Subtract\", # \"Div\" : \"PyNumber_Divide\", #", "with CPython, but also works on its own. # #", "an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF", "\"PyNumber_InPlaceFloorDivide\", \"ITrueDiv\": \"PyNumber_InPlaceTrueDivide\", \"IMod\": \"PyNumber_InPlaceRemainder\", \"ILShift\": \"PyNumber_InPlaceLshift\", \"IRShift\": \"PyNumber_InPlaceRshift\", \"IBitAnd\":", "Unless required by applicable law or agreed to in writing,", "(\"PyNumber_Invert\", 1), \"Repr\": (\"PyObject_Repr\", 1), \"Not\": (\"UNARY_NOT\", 0), } rich_comparison_codes", "\"BitXor\": \"PyNumber_Xor\", \"IAdd\": \"PyNumber_InPlaceAdd\", \"ISub\": \"PyNumber_InPlaceSubtract\", \"IMult\": \"PyNumber_InPlaceMultiply\", \"IDiv\": \"PyNumber_InPlaceDivide\",", "the specific language governing permissions and # limitations under the", "of \"Nuitka\", an optimizing Python compiler that is compatible and", "\"PyNumber_InPlaceTrueDivide\", \"IMod\": \"PyNumber_InPlaceRemainder\", \"ILShift\": \"PyNumber_InPlaceLshift\", \"IRShift\": \"PyNumber_InPlaceRshift\", \"IBitAnd\": \"PyNumber_InPlaceAnd\", \"IBitOr\":", "Part of \"Nuitka\", an optimizing Python compiler that is compatible", ": \"PyNumber_Power\", # \"IPow\" : \"PyNumber_InPlacePower\", # The others are", "applicable law or agreed to in writing, software # distributed", "binary_operator_codes[\"MatMult\"] = \"PyNumber_MatrixMultiply\" binary_operator_codes[\"IMatMult\"] = \"PyNumber_InPlaceMatrixMultiply\" unary_operator_codes = { \"UAdd\":", "\"NotEq\": \"NE\", \"Gt\": \"GT\", \"GtE\": \"GE\", } containing_comparison_codes = (\"In\",", "\"IMult\": \"PyNumber_InPlaceMultiply\", \"IDiv\": \"PyNumber_InPlaceDivide\", \"IFloorDiv\": \"PyNumber_InPlaceFloorDivide\", \"ITrueDiv\": \"PyNumber_InPlaceTrueDivide\", \"IMod\": \"PyNumber_InPlaceRemainder\",", "1), \"USub\": (\"PyNumber_Negative\", 1), \"Invert\": (\"PyNumber_Invert\", 1), \"Repr\": (\"PyObject_Repr\", 1),", "have fully specialized variants already. # \"Add\" : \"PyNumber_Add\", #", "not in-lined. # \"Pow\" : \"PyNumber_Power\", # \"IPow\" : \"PyNumber_InPlacePower\",", ": \"PyNumber_Multiply\", # \"Mod\" : \"PyNumber_Remainder\", # \"Div\" : \"PyNumber_Divide\",", "be faster if they had a specialized variant too. \"LShift\":", "in writing, software # distributed under the License is distributed", "= { \"Lt\": \"LT\", \"LtE\": \"LE\", \"Eq\": \"EQ\", \"NotEq\": \"NE\",", "but also works on its own. # # Licensed under", "\"PyNumber_Xor\", \"IAdd\": \"PyNumber_InPlaceAdd\", \"ISub\": \"PyNumber_InPlaceSubtract\", \"IMult\": \"PyNumber_InPlaceMultiply\", \"IDiv\": \"PyNumber_InPlaceDivide\", \"IFloorDiv\":", "operations or a wrapper used. \"\"\" from nuitka.PythonVersions import python_version", "faster if they had a specialized variant too. \"LShift\": \"PyNumber_Lshift\",", "\"Mod\" : \"PyNumber_Remainder\", # \"Div\" : \"PyNumber_Divide\", # \"FloorDiv\" :", "to look up the Python C/API from operations or a", "License is distributed on an \"AS IS\" BASIS, # WITHOUT", "# \"Sub\" : \"PyNumber_Subtract\", # \"Div\" : \"PyNumber_Divide\", # \"Mult\"", "out in this section have fully specialized variants already. #", "License, Version 2.0 (the \"License\"); # you may not use", "its own. # # Licensed under the Apache License, Version", "\"\"\" Operator code tables These are mostly used to look", "# You may obtain a copy of the License at", "copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #", "\"Repr\": (\"PyObject_Repr\", 1), \"Not\": (\"UNARY_NOT\", 0), } rich_comparison_codes = {", "variants already. # \"Add\" : \"PyNumber_Add\", # \"Sub\" : \"PyNumber_Subtract\",", "# \"Mod\" : \"PyNumber_Remainder\", # \"Div\" : \"PyNumber_Divide\", # \"FloorDiv\"", "CPython code is not in-lined. # \"Pow\" : \"PyNumber_Power\", #", "the License for the specific language governing permissions and #", "used. \"\"\" from nuitka.PythonVersions import python_version binary_operator_codes = { #", "\"IPow\" : \"PyNumber_InPlacePower\", # The others are generic code and", "(\"PyNumber_Positive\", 1), \"USub\": (\"PyNumber_Negative\", 1), \"Invert\": (\"PyNumber_Invert\", 1), \"Repr\": (\"PyObject_Repr\",", "Apache License, Version 2.0 (the \"License\"); # you may not", "either express or implied. # See the License for the", "3.5 only operator if python_version >= 350: binary_operator_codes[\"MatMult\"] = \"PyNumber_MatrixMultiply\"", "350: binary_operator_codes[\"MatMult\"] = \"PyNumber_MatrixMultiply\" binary_operator_codes[\"IMatMult\"] = \"PyNumber_InPlaceMatrixMultiply\" unary_operator_codes = {", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "is not in-lined. # \"Pow\" : \"PyNumber_Power\", # \"IPow\" :", "\"PyNumber_Remainder\", # \"Div\" : \"PyNumber_Divide\", # \"FloorDiv\" : \"PyNumber_FloorDivide\", #", "a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #", "look up the Python C/API from operations or a wrapper", "# but the CPython code is not in-lined. # \"Pow\"", "others are generic code and would be faster if they", "\"PyNumber_Multiply\", # \"Mod\" : \"PyNumber_Remainder\", # \"Div\" : \"PyNumber_Divide\", #", "and would be faster if they had a specialized variant", "binary_operator_codes = { # Those commented out in this section", "Python compiler that is compatible and # integrates with CPython,", "\"PyNumber_InPlaceMatrixMultiply\" unary_operator_codes = { \"UAdd\": (\"PyNumber_Positive\", 1), \"USub\": (\"PyNumber_Negative\", 1),", "under the License. # \"\"\" Operator code tables These are", "\"License\"); # you may not use this file except in", "\"Lt\": \"LT\", \"LtE\": \"LE\", \"Eq\": \"EQ\", \"NotEq\": \"NE\", \"Gt\": \"GT\",", "variant too. \"LShift\": \"PyNumber_Lshift\", \"RShift\": \"PyNumber_Rshift\", \"BitAnd\": \"PyNumber_And\", \"BitOr\": \"PyNumber_Or\",", "distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR", "\"LtE\": \"LE\", \"Eq\": \"EQ\", \"NotEq\": \"NE\", \"Gt\": \"GT\", \"GtE\": \"GE\",", "# distributed under the License is distributed on an \"AS", "# Unless required by applicable law or agreed to in", "(\"UNARY_NOT\", 0), } rich_comparison_codes = { \"Lt\": \"LT\", \"LtE\": \"LE\",", "\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY", "code is in-lined # but the CPython code is not", "\"Eq\": \"EQ\", \"NotEq\": \"NE\", \"Gt\": \"GT\", \"GtE\": \"GE\", } containing_comparison_codes", "You may obtain a copy of the License at #", "\"USub\": (\"PyNumber_Negative\", 1), \"Invert\": (\"PyNumber_Invert\", 1), \"Repr\": (\"PyObject_Repr\", 1), \"Not\":", "\"Div\" : \"PyNumber_Divide\", # \"FloorDiv\" : \"PyNumber_FloorDivide\", # \"TrueDiv\" :", "\"\"\" from nuitka.PythonVersions import python_version binary_operator_codes = { # Those", "# Part of \"Nuitka\", an optimizing Python compiler that is", "import python_version binary_operator_codes = { # Those commented out in", "the Apache License, Version 2.0 (the \"License\"); # you may", "# These have their own variants only to make sure", "already. # \"Add\" : \"PyNumber_Add\", # \"Sub\" : \"PyNumber_Subtract\", #", "The others are generic code and would be faster if" ]
[ "get_external_pipeline_or_raise(graphene_info, selector): from ..schema.pipelines.pipeline_errors import GrapheneInvalidSubsetError from ..schema.pipelines.pipeline import GraphenePipeline", "in selector.solid_selection: if not full_pipeline.has_solid_invocation(solid_name): raise UserFacingGraphQLError( GrapheneInvalidSubsetError( message='Solid \"{solid_name}\"", "full_pipeline = get_full_external_pipeline_or_raise(graphene_info, selector) if selector.solid_selection is None: return full_pipeline", "..schema.pipelines.pipeline_errors import GrapheneInvalidSubsetError from ..schema.pipelines.pipeline import GraphenePipeline check.inst_param(selector, \"selector\", PipelineSelector)", "from ..schema.pipelines.pipeline_errors import GrapheneInvalidSubsetError from ..schema.pipelines.pipeline import GraphenePipeline check.inst_param(graphene_info, \"graphene_info\",", "serializable_error_info_from_exc_info(sys.exc_info()) raise UserFacingGraphQLError( GrapheneInvalidSubsetError( message=\"{message}{cause_message}\".format( message=error_info.message, cause_message=\"\\n{}\".format(error_info.cause.message) if error_info.cause else", "<gh_stars>1-10 import sys from dagster import check from dagster.config.validate import", "check from dagster.config.validate import validate_config_from_snap from dagster.core.host_representation import ExternalPipeline, PipelineSelector,", "in \"{pipeline_name}\"'.format( solid_name=solid_name, pipeline_name=selector.pipeline_name ), pipeline=GraphenePipeline(full_pipeline), ) ) return get_subset_external_pipeline(graphene_info.context,", ") return external_pipeline def ensure_valid_config(external_pipeline, mode, run_config): from ..schema.pipelines.config import", "def fetch_repository(graphene_info, repository_selector): from ..schema.errors import GrapheneRepositoryNotFoundError from ..schema.external import", "from ..schema.errors import GraphenePipelineNotFoundError check.inst_param(graphene_info, \"graphene_info\", ResolveInfo) check.inst_param(selector, \"selector\", PipelineSelector)", "validated_config.success: raise UserFacingGraphQLError( GrapheneRunConfigValidationInvalid.for_validation_errors( external_pipeline, validated_config.errors ) ) return validated_config", "nodes=[ GrapheneRepository(repository=repository, repository_location=location) for location in graphene_info.context.repository_locations for repository in", "ResolveInfo) check.inst_param(selector, \"selector\", PipelineSelector) if not graphene_info.context.has_external_pipeline(selector): raise UserFacingGraphQLError(GraphenePipelineNotFoundError(selector=selector)) return", "from dagster.config.validate import validate_config_from_snap from dagster.core.host_representation import ExternalPipeline, PipelineSelector, RepositorySelector", "def get_full_external_pipeline_or_raise(graphene_info, selector): from ..schema.errors import GraphenePipelineNotFoundError check.inst_param(graphene_info, \"graphene_info\", ResolveInfo)", "PipelineSelector) full_pipeline = get_full_external_pipeline_or_raise(graphene_info, selector) if selector.solid_selection is None: return", "None: return full_pipeline for solid_name in selector.solid_selection: if not full_pipeline.has_solid_invocation(solid_name):", "RepositorySelector from dagster.core.workspace.context import BaseWorkspaceRequestContext from dagster.utils.error import serializable_error_info_from_exc_info from", "), pipeline=GraphenePipeline(context.get_full_external_pipeline(selector)), ) ) return external_pipeline def ensure_valid_config(external_pipeline, mode, run_config):", "\"workspace_request_context\", BaseWorkspaceRequestContext ) nodes = [ GrapheneWorkspaceLocationEntry(entry) for entry in", "for location in graphene_info.context.repository_locations for repository in location.get_repositories().values() ] )", "is None: return full_pipeline for solid_name in selector.solid_selection: if not", "from ..schema.pipelines.pipeline_errors import GrapheneInvalidSubsetError from ..schema.pipelines.pipeline import GraphenePipeline check.inst_param(selector, \"selector\",", "\"selector\", PipelineSelector) full_pipeline = get_full_external_pipeline_or_raise(graphene_info, selector) if selector.solid_selection is None:", "graphene_info, external_pipeline, mode, run_config, step_keys_to_execute, known_state, ): return graphene_info.context.get_external_execution_plan( external_pipeline=external_pipeline,", "selector.solid_selection: if not full_pipeline.has_solid_invocation(solid_name): raise UserFacingGraphQLError( GrapheneInvalidSubsetError( message='Solid \"{solid_name}\" does", "fetch_repository(graphene_info, repository_selector): from ..schema.errors import GrapheneRepositoryNotFoundError from ..schema.external import GrapheneRepository", "known_state=known_state, ) @capture_error def fetch_repositories(graphene_info): from ..schema.external import GrapheneRepository, GrapheneRepositoryConnection", ") return get_subset_external_pipeline(graphene_info.context, selector) def get_subset_external_pipeline(context, selector): from ..schema.pipelines.pipeline_errors import", "for solid_name in selector.solid_selection: if not full_pipeline.has_solid_invocation(solid_name): raise UserFacingGraphQLError( GrapheneInvalidSubsetError(", "graphene_info.context.has_external_pipeline(selector): raise UserFacingGraphQLError(GraphenePipelineNotFoundError(selector=selector)) return graphene_info.context.get_full_external_pipeline(selector) def get_external_pipeline_or_raise(graphene_info, selector): from ..schema.pipelines.pipeline_errors", "\"selector\", PipelineSelector) repository_location = context.get_repository_location(selector.location_name) try: external_pipeline = repository_location.get_external_pipeline(selector) except", "message=\"{message}{cause_message}\".format( message=error_info.message, cause_message=\"\\n{}\".format(error_info.cause.message) if error_info.cause else \"\", ), pipeline=GraphenePipeline(context.get_full_external_pipeline(selector)), )", "mode, run_config): from ..schema.pipelines.config import GrapheneRunConfigValidationInvalid check.inst_param(external_pipeline, \"external_pipeline\", ExternalPipeline) check.opt_str_param(mode,", "return GrapheneRepository( repository=repo_loc.get_repository(repository_selector.repository_name), repository_location=repo_loc, ) return GrapheneRepositoryNotFoundError( repository_selector.location_name, repository_selector.repository_name )", "dagster.core.host_representation import ExternalPipeline, PipelineSelector, RepositorySelector from dagster.core.workspace.context import BaseWorkspaceRequestContext from", "raise UserFacingGraphQLError(GraphenePipelineNotFoundError(selector=selector)) return graphene_info.context.get_full_external_pipeline(selector) def get_external_pipeline_or_raise(graphene_info, selector): from ..schema.pipelines.pipeline_errors import", "..schema.pipelines.config import GrapheneRunConfigValidationInvalid check.inst_param(external_pipeline, \"external_pipeline\", ExternalPipeline) check.opt_str_param(mode, \"mode\") # do", "repository_selector.repository_name ) @capture_error def fetch_workspace(workspace_request_context): from ..schema.external import GrapheneWorkspace, GrapheneWorkspaceLocationEntry", "check.inst_param(selector, \"selector\", PipelineSelector) if not graphene_info.context.has_external_pipeline(selector): raise UserFacingGraphQLError(GraphenePipelineNotFoundError(selector=selector)) return graphene_info.context.get_full_external_pipeline(selector)", "import GrapheneRepository check.inst_param(graphene_info, \"graphene_info\", ResolveInfo) check.inst_param(repository_selector, \"repository_selector\", RepositorySelector) if graphene_info.context.has_repository_location(repository_selector.location_name):", "from .utils import UserFacingGraphQLError, capture_error def get_full_external_pipeline_or_raise(graphene_info, selector): from ..schema.errors", "throws validated_config = validate_config_from_snap( config_schema_snapshot=external_pipeline.config_schema_snapshot, config_type_key=external_pipeline.root_config_key_for_mode(mode), config_value=run_config, ) if not", "), pipeline=GraphenePipeline(full_pipeline), ) ) return get_subset_external_pipeline(graphene_info.context, selector) def get_subset_external_pipeline(context, selector):", "from ..schema.errors import GrapheneRepositoryNotFoundError from ..schema.external import GrapheneRepository check.inst_param(graphene_info, \"graphene_info\",", "import GrapheneWorkspace, GrapheneWorkspaceLocationEntry check.inst_param( workspace_request_context, \"workspace_request_context\", BaseWorkspaceRequestContext ) nodes =", "BaseWorkspaceRequestContext ) nodes = [ GrapheneWorkspaceLocationEntry(entry) for entry in workspace_request_context.get_workspace_snapshot().values()", "GraphenePipelineNotFoundError check.inst_param(graphene_info, \"graphene_info\", ResolveInfo) check.inst_param(selector, \"selector\", PipelineSelector) if not graphene_info.context.has_external_pipeline(selector):", "GrapheneRepositoryConnection( nodes=[ GrapheneRepository(repository=repository, repository_location=location) for location in graphene_info.context.repository_locations for repository", "from dagster.core.workspace.context import BaseWorkspaceRequestContext from dagster.utils.error import serializable_error_info_from_exc_info from graphql.execution.base", "= repository_location.get_external_pipeline(selector) except Exception: error_info = serializable_error_info_from_exc_info(sys.exc_info()) raise UserFacingGraphQLError( GrapheneInvalidSubsetError(", "not full_pipeline.has_solid_invocation(solid_name): raise UserFacingGraphQLError( GrapheneInvalidSubsetError( message='Solid \"{solid_name}\" does not exist", "\"graphene_info\", ResolveInfo) check.inst_param(repository_selector, \"repository_selector\", RepositorySelector) if graphene_info.context.has_repository_location(repository_selector.location_name): repo_loc = graphene_info.context.get_repository_location(repository_selector.location_name)", "from ..schema.pipelines.config import GrapheneRunConfigValidationInvalid check.inst_param(external_pipeline, \"external_pipeline\", ExternalPipeline) check.opt_str_param(mode, \"mode\") #", "mode=mode, step_keys_to_execute=step_keys_to_execute, known_state=known_state, ) @capture_error def fetch_repositories(graphene_info): from ..schema.external import", "repository=repo_loc.get_repository(repository_selector.repository_name), repository_location=repo_loc, ) return GrapheneRepositoryNotFoundError( repository_selector.location_name, repository_selector.repository_name ) @capture_error def", "selector) if selector.solid_selection is None: return full_pipeline for solid_name in", "raise UserFacingGraphQLError( GrapheneInvalidSubsetError( message=\"{message}{cause_message}\".format( message=error_info.message, cause_message=\"\\n{}\".format(error_info.cause.message) if error_info.cause else \"\",", "GrapheneInvalidSubsetError from ..schema.pipelines.pipeline import GraphenePipeline check.inst_param(graphene_info, \"graphene_info\", ResolveInfo) check.inst_param(selector, \"selector\",", "def get_subset_external_pipeline(context, selector): from ..schema.pipelines.pipeline_errors import GrapheneInvalidSubsetError from ..schema.pipelines.pipeline import", "repository_selector): from ..schema.errors import GrapheneRepositoryNotFoundError from ..schema.external import GrapheneRepository check.inst_param(graphene_info,", "if selector.solid_selection is None: return full_pipeline for solid_name in selector.solid_selection:", "UserFacingGraphQLError(GraphenePipelineNotFoundError(selector=selector)) return graphene_info.context.get_full_external_pipeline(selector) def get_external_pipeline_or_raise(graphene_info, selector): from ..schema.pipelines.pipeline_errors import GrapheneInvalidSubsetError", ".utils import UserFacingGraphQLError, capture_error def get_full_external_pipeline_or_raise(graphene_info, selector): from ..schema.errors import", "external_pipeline def ensure_valid_config(external_pipeline, mode, run_config): from ..schema.pipelines.config import GrapheneRunConfigValidationInvalid check.inst_param(external_pipeline,", "repository in location.get_repositories().values() ] ) @capture_error def fetch_repository(graphene_info, repository_selector): from", "not graphene_info.context.has_external_pipeline(selector): raise UserFacingGraphQLError(GraphenePipelineNotFoundError(selector=selector)) return graphene_info.context.get_full_external_pipeline(selector) def get_external_pipeline_or_raise(graphene_info, selector): from", "selector.solid_selection is None: return full_pipeline for solid_name in selector.solid_selection: if", "..schema.external import GrapheneWorkspace, GrapheneWorkspaceLocationEntry check.inst_param( workspace_request_context, \"workspace_request_context\", BaseWorkspaceRequestContext ) nodes", "run_config=run_config, mode=mode, step_keys_to_execute=step_keys_to_execute, known_state=known_state, ) @capture_error def fetch_repositories(graphene_info): from ..schema.external", "ExternalPipeline, PipelineSelector, RepositorySelector from dagster.core.workspace.context import BaseWorkspaceRequestContext from dagster.utils.error import", "dagster import check from dagster.config.validate import validate_config_from_snap from dagster.core.host_representation import", "from dagster.utils.error import serializable_error_info_from_exc_info from graphql.execution.base import ResolveInfo from .utils", "def ensure_valid_config(external_pipeline, mode, run_config): from ..schema.pipelines.config import GrapheneRunConfigValidationInvalid check.inst_param(external_pipeline, \"external_pipeline\",", "message=error_info.message, cause_message=\"\\n{}\".format(error_info.cause.message) if error_info.cause else \"\", ), pipeline=GraphenePipeline(context.get_full_external_pipeline(selector)), ) )", "validated_config.errors ) ) return validated_config def get_external_execution_plan_or_raise( graphene_info, external_pipeline, mode,", "import BaseWorkspaceRequestContext from dagster.utils.error import serializable_error_info_from_exc_info from graphql.execution.base import ResolveInfo", "sys from dagster import check from dagster.config.validate import validate_config_from_snap from", "solid_name=solid_name, pipeline_name=selector.pipeline_name ), pipeline=GraphenePipeline(full_pipeline), ) ) return get_subset_external_pipeline(graphene_info.context, selector) def", "import GraphenePipeline check.inst_param(selector, \"selector\", PipelineSelector) repository_location = context.get_repository_location(selector.location_name) try: external_pipeline", "repository_location.get_external_pipeline(selector) except Exception: error_info = serializable_error_info_from_exc_info(sys.exc_info()) raise UserFacingGraphQLError( GrapheneInvalidSubsetError( message=\"{message}{cause_message}\".format(", "fetch_workspace(workspace_request_context): from ..schema.external import GrapheneWorkspace, GrapheneWorkspaceLocationEntry check.inst_param( workspace_request_context, \"workspace_request_context\", BaseWorkspaceRequestContext", "..schema.external import GrapheneRepository, GrapheneRepositoryConnection check.inst_param(graphene_info, \"graphene_info\", ResolveInfo) return GrapheneRepositoryConnection( nodes=[", "in location.get_repositories().values() ] ) @capture_error def fetch_repository(graphene_info, repository_selector): from ..schema.errors", "if repo_loc.has_repository(repository_selector.repository_name): return GrapheneRepository( repository=repo_loc.get_repository(repository_selector.repository_name), repository_location=repo_loc, ) return GrapheneRepositoryNotFoundError( repository_selector.location_name,", "get_full_external_pipeline_or_raise(graphene_info, selector): from ..schema.errors import GraphenePipelineNotFoundError check.inst_param(graphene_info, \"graphene_info\", ResolveInfo) check.inst_param(selector,", "BaseWorkspaceRequestContext from dagster.utils.error import serializable_error_info_from_exc_info from graphql.execution.base import ResolveInfo from", "GrapheneRepository( repository=repo_loc.get_repository(repository_selector.repository_name), repository_location=repo_loc, ) return GrapheneRepositoryNotFoundError( repository_selector.location_name, repository_selector.repository_name ) @capture_error", "validate_config_from_snap( config_schema_snapshot=external_pipeline.config_schema_snapshot, config_type_key=external_pipeline.root_config_key_for_mode(mode), config_value=run_config, ) if not validated_config.success: raise UserFacingGraphQLError(", "@capture_error def fetch_repository(graphene_info, repository_selector): from ..schema.errors import GrapheneRepositoryNotFoundError from ..schema.external", "if graphene_info.context.has_repository_location(repository_selector.location_name): repo_loc = graphene_info.context.get_repository_location(repository_selector.location_name) if repo_loc.has_repository(repository_selector.repository_name): return GrapheneRepository( repository=repo_loc.get_repository(repository_selector.repository_name),", "= [ GrapheneWorkspaceLocationEntry(entry) for entry in workspace_request_context.get_workspace_snapshot().values() ] return GrapheneWorkspace(locationEntries=nodes)", "\"graphene_info\", ResolveInfo) check.inst_param(selector, \"selector\", PipelineSelector) full_pipeline = get_full_external_pipeline_or_raise(graphene_info, selector) if", "dagster.config.validate import validate_config_from_snap from dagster.core.host_representation import ExternalPipeline, PipelineSelector, RepositorySelector from", "not exist in \"{pipeline_name}\"'.format( solid_name=solid_name, pipeline_name=selector.pipeline_name ), pipeline=GraphenePipeline(full_pipeline), ) )", "check.inst_param(external_pipeline, \"external_pipeline\", ExternalPipeline) check.opt_str_param(mode, \"mode\") # do not type check", "import GraphenePipelineNotFoundError check.inst_param(graphene_info, \"graphene_info\", ResolveInfo) check.inst_param(selector, \"selector\", PipelineSelector) if not", "GrapheneRepositoryNotFoundError from ..schema.external import GrapheneRepository check.inst_param(graphene_info, \"graphene_info\", ResolveInfo) check.inst_param(repository_selector, \"repository_selector\",", "check.inst_param(graphene_info, \"graphene_info\", ResolveInfo) check.inst_param(selector, \"selector\", PipelineSelector) if not graphene_info.context.has_external_pipeline(selector): raise", "full_pipeline for solid_name in selector.solid_selection: if not full_pipeline.has_solid_invocation(solid_name): raise UserFacingGraphQLError(", "GraphenePipeline check.inst_param(selector, \"selector\", PipelineSelector) repository_location = context.get_repository_location(selector.location_name) try: external_pipeline =", "UserFacingGraphQLError( GrapheneRunConfigValidationInvalid.for_validation_errors( external_pipeline, validated_config.errors ) ) return validated_config def get_external_execution_plan_or_raise(", "def fetch_repositories(graphene_info): from ..schema.external import GrapheneRepository, GrapheneRepositoryConnection check.inst_param(graphene_info, \"graphene_info\", ResolveInfo)", "GrapheneRepository, GrapheneRepositoryConnection check.inst_param(graphene_info, \"graphene_info\", ResolveInfo) return GrapheneRepositoryConnection( nodes=[ GrapheneRepository(repository=repository, repository_location=location)", "\"graphene_info\", ResolveInfo) return GrapheneRepositoryConnection( nodes=[ GrapheneRepository(repository=repository, repository_location=location) for location in", "..schema.external import GrapheneRepository check.inst_param(graphene_info, \"graphene_info\", ResolveInfo) check.inst_param(repository_selector, \"repository_selector\", RepositorySelector) if", "GrapheneWorkspaceLocationEntry check.inst_param( workspace_request_context, \"workspace_request_context\", BaseWorkspaceRequestContext ) nodes = [ GrapheneWorkspaceLocationEntry(entry)", "@capture_error def fetch_repositories(graphene_info): from ..schema.external import GrapheneRepository, GrapheneRepositoryConnection check.inst_param(graphene_info, \"graphene_info\",", "= get_full_external_pipeline_or_raise(graphene_info, selector) if selector.solid_selection is None: return full_pipeline for", "if error_info.cause else \"\", ), pipeline=GraphenePipeline(context.get_full_external_pipeline(selector)), ) ) return external_pipeline", "get_external_execution_plan_or_raise( graphene_info, external_pipeline, mode, run_config, step_keys_to_execute, known_state, ): return graphene_info.context.get_external_execution_plan(", "error_info = serializable_error_info_from_exc_info(sys.exc_info()) raise UserFacingGraphQLError( GrapheneInvalidSubsetError( message=\"{message}{cause_message}\".format( message=error_info.message, cause_message=\"\\n{}\".format(error_info.cause.message) if", "ResolveInfo) check.inst_param(repository_selector, \"repository_selector\", RepositorySelector) if graphene_info.context.has_repository_location(repository_selector.location_name): repo_loc = graphene_info.context.get_repository_location(repository_selector.location_name) if", "import check from dagster.config.validate import validate_config_from_snap from dagster.core.host_representation import ExternalPipeline,", ") return validated_config def get_external_execution_plan_or_raise( graphene_info, external_pipeline, mode, run_config, step_keys_to_execute,", "import GrapheneRepository, GrapheneRepositoryConnection check.inst_param(graphene_info, \"graphene_info\", ResolveInfo) return GrapheneRepositoryConnection( nodes=[ GrapheneRepository(repository=repository,", "location in graphene_info.context.repository_locations for repository in location.get_repositories().values() ] ) @capture_error", "return graphene_info.context.get_full_external_pipeline(selector) def get_external_pipeline_or_raise(graphene_info, selector): from ..schema.pipelines.pipeline_errors import GrapheneInvalidSubsetError from", "repo_loc.has_repository(repository_selector.repository_name): return GrapheneRepository( repository=repo_loc.get_repository(repository_selector.repository_name), repository_location=repo_loc, ) return GrapheneRepositoryNotFoundError( repository_selector.location_name, repository_selector.repository_name", "external_pipeline = repository_location.get_external_pipeline(selector) except Exception: error_info = serializable_error_info_from_exc_info(sys.exc_info()) raise UserFacingGraphQLError(", "import sys from dagster import check from dagster.config.validate import validate_config_from_snap", "full_pipeline.has_solid_invocation(solid_name): raise UserFacingGraphQLError( GrapheneInvalidSubsetError( message='Solid \"{solid_name}\" does not exist in", "import GrapheneRunConfigValidationInvalid check.inst_param(external_pipeline, \"external_pipeline\", ExternalPipeline) check.opt_str_param(mode, \"mode\") # do not", "PipelineSelector) if not graphene_info.context.has_external_pipeline(selector): raise UserFacingGraphQLError(GraphenePipelineNotFoundError(selector=selector)) return graphene_info.context.get_full_external_pipeline(selector) def get_external_pipeline_or_raise(graphene_info,", "selector) def get_subset_external_pipeline(context, selector): from ..schema.pipelines.pipeline_errors import GrapheneInvalidSubsetError from ..schema.pipelines.pipeline", "validate_config_from_snap throws validated_config = validate_config_from_snap( config_schema_snapshot=external_pipeline.config_schema_snapshot, config_type_key=external_pipeline.root_config_key_for_mode(mode), config_value=run_config, ) if", "pipeline_name=selector.pipeline_name ), pipeline=GraphenePipeline(full_pipeline), ) ) return get_subset_external_pipeline(graphene_info.context, selector) def get_subset_external_pipeline(context,", "ExternalPipeline) check.opt_str_param(mode, \"mode\") # do not type check run_config so", "step_keys_to_execute, known_state, ): return graphene_info.context.get_external_execution_plan( external_pipeline=external_pipeline, run_config=run_config, mode=mode, step_keys_to_execute=step_keys_to_execute, known_state=known_state,", "graphene_info.context.repository_locations for repository in location.get_repositories().values() ] ) @capture_error def fetch_repository(graphene_info,", "\"repository_selector\", RepositorySelector) if graphene_info.context.has_repository_location(repository_selector.location_name): repo_loc = graphene_info.context.get_repository_location(repository_selector.location_name) if repo_loc.has_repository(repository_selector.repository_name): return", "import ResolveInfo from .utils import UserFacingGraphQLError, capture_error def get_full_external_pipeline_or_raise(graphene_info, selector):", "GrapheneRepository check.inst_param(graphene_info, \"graphene_info\", ResolveInfo) check.inst_param(repository_selector, \"repository_selector\", RepositorySelector) if graphene_info.context.has_repository_location(repository_selector.location_name): repo_loc", "\"\", ), pipeline=GraphenePipeline(context.get_full_external_pipeline(selector)), ) ) return external_pipeline def ensure_valid_config(external_pipeline, mode,", "config_value=run_config, ) if not validated_config.success: raise UserFacingGraphQLError( GrapheneRunConfigValidationInvalid.for_validation_errors( external_pipeline, validated_config.errors", "return validated_config def get_external_execution_plan_or_raise( graphene_info, external_pipeline, mode, run_config, step_keys_to_execute, known_state,", "context.get_repository_location(selector.location_name) try: external_pipeline = repository_location.get_external_pipeline(selector) except Exception: error_info = serializable_error_info_from_exc_info(sys.exc_info())", "GrapheneRunConfigValidationInvalid check.inst_param(external_pipeline, \"external_pipeline\", ExternalPipeline) check.opt_str_param(mode, \"mode\") # do not type", "known_state, ): return graphene_info.context.get_external_execution_plan( external_pipeline=external_pipeline, run_config=run_config, mode=mode, step_keys_to_execute=step_keys_to_execute, known_state=known_state, )", "from graphql.execution.base import ResolveInfo from .utils import UserFacingGraphQLError, capture_error def", "from ..schema.external import GrapheneRepository, GrapheneRepositoryConnection check.inst_param(graphene_info, \"graphene_info\", ResolveInfo) return GrapheneRepositoryConnection(", "validate_config_from_snap from dagster.core.host_representation import ExternalPipeline, PipelineSelector, RepositorySelector from dagster.core.workspace.context import", "from dagster.core.host_representation import ExternalPipeline, PipelineSelector, RepositorySelector from dagster.core.workspace.context import BaseWorkspaceRequestContext", "location.get_repositories().values() ] ) @capture_error def fetch_repository(graphene_info, repository_selector): from ..schema.errors import", "GrapheneRunConfigValidationInvalid.for_validation_errors( external_pipeline, validated_config.errors ) ) return validated_config def get_external_execution_plan_or_raise( graphene_info,", "repository_location=location) for location in graphene_info.context.repository_locations for repository in location.get_repositories().values() ]", "exist in \"{pipeline_name}\"'.format( solid_name=solid_name, pipeline_name=selector.pipeline_name ), pipeline=GraphenePipeline(full_pipeline), ) ) return", "import ExternalPipeline, PipelineSelector, RepositorySelector from dagster.core.workspace.context import BaseWorkspaceRequestContext from dagster.utils.error", "capture_error def get_full_external_pipeline_or_raise(graphene_info, selector): from ..schema.errors import GraphenePipelineNotFoundError check.inst_param(graphene_info, \"graphene_info\",", "from ..schema.pipelines.pipeline import GraphenePipeline check.inst_param(graphene_info, \"graphene_info\", ResolveInfo) check.inst_param(selector, \"selector\", PipelineSelector)", "does not exist in \"{pipeline_name}\"'.format( solid_name=solid_name, pipeline_name=selector.pipeline_name ), pipeline=GraphenePipeline(full_pipeline), )", "get_subset_external_pipeline(context, selector): from ..schema.pipelines.pipeline_errors import GrapheneInvalidSubsetError from ..schema.pipelines.pipeline import GraphenePipeline", "\"selector\", PipelineSelector) if not graphene_info.context.has_external_pipeline(selector): raise UserFacingGraphQLError(GraphenePipelineNotFoundError(selector=selector)) return graphene_info.context.get_full_external_pipeline(selector) def", "message='Solid \"{solid_name}\" does not exist in \"{pipeline_name}\"'.format( solid_name=solid_name, pipeline_name=selector.pipeline_name ),", ") @capture_error def fetch_repositories(graphene_info): from ..schema.external import GrapheneRepository, GrapheneRepositoryConnection check.inst_param(graphene_info,", "external_pipeline, validated_config.errors ) ) return validated_config def get_external_execution_plan_or_raise( graphene_info, external_pipeline,", "run_config, step_keys_to_execute, known_state, ): return graphene_info.context.get_external_execution_plan( external_pipeline=external_pipeline, run_config=run_config, mode=mode, step_keys_to_execute=step_keys_to_execute,", "= validate_config_from_snap( config_schema_snapshot=external_pipeline.config_schema_snapshot, config_type_key=external_pipeline.root_config_key_for_mode(mode), config_value=run_config, ) if not validated_config.success: raise", "def fetch_workspace(workspace_request_context): from ..schema.external import GrapheneWorkspace, GrapheneWorkspaceLocationEntry check.inst_param( workspace_request_context, \"workspace_request_context\",", "check.inst_param(graphene_info, \"graphene_info\", ResolveInfo) return GrapheneRepositoryConnection( nodes=[ GrapheneRepository(repository=repository, repository_location=location) for location", "GrapheneRepositoryConnection check.inst_param(graphene_info, \"graphene_info\", ResolveInfo) return GrapheneRepositoryConnection( nodes=[ GrapheneRepository(repository=repository, repository_location=location) for", "return get_subset_external_pipeline(graphene_info.context, selector) def get_subset_external_pipeline(context, selector): from ..schema.pipelines.pipeline_errors import GrapheneInvalidSubsetError", "= serializable_error_info_from_exc_info(sys.exc_info()) raise UserFacingGraphQLError( GrapheneInvalidSubsetError( message=\"{message}{cause_message}\".format( message=error_info.message, cause_message=\"\\n{}\".format(error_info.cause.message) if error_info.cause", "UserFacingGraphQLError, capture_error def get_full_external_pipeline_or_raise(graphene_info, selector): from ..schema.errors import GraphenePipelineNotFoundError check.inst_param(graphene_info,", "raise UserFacingGraphQLError( GrapheneRunConfigValidationInvalid.for_validation_errors( external_pipeline, validated_config.errors ) ) return validated_config def", ") @capture_error def fetch_repository(graphene_info, repository_selector): from ..schema.errors import GrapheneRepositoryNotFoundError from", "repo_loc = graphene_info.context.get_repository_location(repository_selector.location_name) if repo_loc.has_repository(repository_selector.repository_name): return GrapheneRepository( repository=repo_loc.get_repository(repository_selector.repository_name), repository_location=repo_loc, )", "repository_location = context.get_repository_location(selector.location_name) try: external_pipeline = repository_location.get_external_pipeline(selector) except Exception: error_info", "@capture_error def fetch_workspace(workspace_request_context): from ..schema.external import GrapheneWorkspace, GrapheneWorkspaceLocationEntry check.inst_param( workspace_request_context,", "step_keys_to_execute=step_keys_to_execute, known_state=known_state, ) @capture_error def fetch_repositories(graphene_info): from ..schema.external import GrapheneRepository,", "..schema.pipelines.pipeline_errors import GrapheneInvalidSubsetError from ..schema.pipelines.pipeline import GraphenePipeline check.inst_param(graphene_info, \"graphene_info\", ResolveInfo)", ") nodes = [ GrapheneWorkspaceLocationEntry(entry) for entry in workspace_request_context.get_workspace_snapshot().values() ]", "PipelineSelector) repository_location = context.get_repository_location(selector.location_name) try: external_pipeline = repository_location.get_external_pipeline(selector) except Exception:", "mode, run_config, step_keys_to_execute, known_state, ): return graphene_info.context.get_external_execution_plan( external_pipeline=external_pipeline, run_config=run_config, mode=mode,", "GrapheneInvalidSubsetError( message=\"{message}{cause_message}\".format( message=error_info.message, cause_message=\"\\n{}\".format(error_info.cause.message) if error_info.cause else \"\", ), pipeline=GraphenePipeline(context.get_full_external_pipeline(selector)),", "\"{pipeline_name}\"'.format( solid_name=solid_name, pipeline_name=selector.pipeline_name ), pipeline=GraphenePipeline(full_pipeline), ) ) return get_subset_external_pipeline(graphene_info.context, selector)", "# do not type check run_config so that validate_config_from_snap throws", ") if not validated_config.success: raise UserFacingGraphQLError( GrapheneRunConfigValidationInvalid.for_validation_errors( external_pipeline, validated_config.errors )", "not validated_config.success: raise UserFacingGraphQLError( GrapheneRunConfigValidationInvalid.for_validation_errors( external_pipeline, validated_config.errors ) ) return", "RepositorySelector) if graphene_info.context.has_repository_location(repository_selector.location_name): repo_loc = graphene_info.context.get_repository_location(repository_selector.location_name) if repo_loc.has_repository(repository_selector.repository_name): return GrapheneRepository(", "serializable_error_info_from_exc_info from graphql.execution.base import ResolveInfo from .utils import UserFacingGraphQLError, capture_error", "graphql.execution.base import ResolveInfo from .utils import UserFacingGraphQLError, capture_error def get_full_external_pipeline_or_raise(graphene_info,", "import GraphenePipeline check.inst_param(graphene_info, \"graphene_info\", ResolveInfo) check.inst_param(selector, \"selector\", PipelineSelector) full_pipeline =", "except Exception: error_info = serializable_error_info_from_exc_info(sys.exc_info()) raise UserFacingGraphQLError( GrapheneInvalidSubsetError( message=\"{message}{cause_message}\".format( message=error_info.message,", "if not graphene_info.context.has_external_pipeline(selector): raise UserFacingGraphQLError(GraphenePipelineNotFoundError(selector=selector)) return graphene_info.context.get_full_external_pipeline(selector) def get_external_pipeline_or_raise(graphene_info, selector):", "def get_external_execution_plan_or_raise( graphene_info, external_pipeline, mode, run_config, step_keys_to_execute, known_state, ): return", "try: external_pipeline = repository_location.get_external_pipeline(selector) except Exception: error_info = serializable_error_info_from_exc_info(sys.exc_info()) raise", "else \"\", ), pipeline=GraphenePipeline(context.get_full_external_pipeline(selector)), ) ) return external_pipeline def ensure_valid_config(external_pipeline,", "..schema.pipelines.pipeline import GraphenePipeline check.inst_param(selector, \"selector\", PipelineSelector) repository_location = context.get_repository_location(selector.location_name) try:", "graphene_info.context.get_full_external_pipeline(selector) def get_external_pipeline_or_raise(graphene_info, selector): from ..schema.pipelines.pipeline_errors import GrapheneInvalidSubsetError from ..schema.pipelines.pipeline", "..schema.pipelines.pipeline import GraphenePipeline check.inst_param(graphene_info, \"graphene_info\", ResolveInfo) check.inst_param(selector, \"selector\", PipelineSelector) full_pipeline", "return external_pipeline def ensure_valid_config(external_pipeline, mode, run_config): from ..schema.pipelines.config import GrapheneRunConfigValidationInvalid", "type check run_config so that validate_config_from_snap throws validated_config = validate_config_from_snap(", "validated_config def get_external_execution_plan_or_raise( graphene_info, external_pipeline, mode, run_config, step_keys_to_execute, known_state, ):", "external_pipeline=external_pipeline, run_config=run_config, mode=mode, step_keys_to_execute=step_keys_to_execute, known_state=known_state, ) @capture_error def fetch_repositories(graphene_info): from", "..schema.errors import GrapheneRepositoryNotFoundError from ..schema.external import GrapheneRepository check.inst_param(graphene_info, \"graphene_info\", ResolveInfo)", "workspace_request_context, \"workspace_request_context\", BaseWorkspaceRequestContext ) nodes = [ GrapheneWorkspaceLocationEntry(entry) for entry", "return GrapheneRepositoryConnection( nodes=[ GrapheneRepository(repository=repository, repository_location=location) for location in graphene_info.context.repository_locations for", "external_pipeline, mode, run_config, step_keys_to_execute, known_state, ): return graphene_info.context.get_external_execution_plan( external_pipeline=external_pipeline, run_config=run_config,", "GrapheneRepositoryNotFoundError( repository_selector.location_name, repository_selector.repository_name ) @capture_error def fetch_workspace(workspace_request_context): from ..schema.external import", "check.inst_param( workspace_request_context, \"workspace_request_context\", BaseWorkspaceRequestContext ) nodes = [ GrapheneWorkspaceLocationEntry(entry) for", "return GrapheneRepositoryNotFoundError( repository_selector.location_name, repository_selector.repository_name ) @capture_error def fetch_workspace(workspace_request_context): from ..schema.external", "get_subset_external_pipeline(graphene_info.context, selector) def get_subset_external_pipeline(context, selector): from ..schema.pipelines.pipeline_errors import GrapheneInvalidSubsetError from", "do not type check run_config so that validate_config_from_snap throws validated_config", "error_info.cause else \"\", ), pipeline=GraphenePipeline(context.get_full_external_pipeline(selector)), ) ) return external_pipeline def", "if not full_pipeline.has_solid_invocation(solid_name): raise UserFacingGraphQLError( GrapheneInvalidSubsetError( message='Solid \"{solid_name}\" does not", "check run_config so that validate_config_from_snap throws validated_config = validate_config_from_snap( config_schema_snapshot=external_pipeline.config_schema_snapshot,", "import GrapheneRepositoryNotFoundError from ..schema.external import GrapheneRepository check.inst_param(graphene_info, \"graphene_info\", ResolveInfo) check.inst_param(repository_selector,", "nodes = [ GrapheneWorkspaceLocationEntry(entry) for entry in workspace_request_context.get_workspace_snapshot().values() ] return", "Exception: error_info = serializable_error_info_from_exc_info(sys.exc_info()) raise UserFacingGraphQLError( GrapheneInvalidSubsetError( message=\"{message}{cause_message}\".format( message=error_info.message, cause_message=\"\\n{}\".format(error_info.cause.message)", "pipeline=GraphenePipeline(full_pipeline), ) ) return get_subset_external_pipeline(graphene_info.context, selector) def get_subset_external_pipeline(context, selector): from", "import serializable_error_info_from_exc_info from graphql.execution.base import ResolveInfo from .utils import UserFacingGraphQLError,", "from ..schema.external import GrapheneWorkspace, GrapheneWorkspaceLocationEntry check.inst_param( workspace_request_context, \"workspace_request_context\", BaseWorkspaceRequestContext )", "GrapheneInvalidSubsetError( message='Solid \"{solid_name}\" does not exist in \"{pipeline_name}\"'.format( solid_name=solid_name, pipeline_name=selector.pipeline_name", "\"graphene_info\", ResolveInfo) check.inst_param(selector, \"selector\", PipelineSelector) if not graphene_info.context.has_external_pipeline(selector): raise UserFacingGraphQLError(GraphenePipelineNotFoundError(selector=selector))", "so that validate_config_from_snap throws validated_config = validate_config_from_snap( config_schema_snapshot=external_pipeline.config_schema_snapshot, config_type_key=external_pipeline.root_config_key_for_mode(mode), config_value=run_config,", "fetch_repositories(graphene_info): from ..schema.external import GrapheneRepository, GrapheneRepositoryConnection check.inst_param(graphene_info, \"graphene_info\", ResolveInfo) return", "from dagster import check from dagster.config.validate import validate_config_from_snap from dagster.core.host_representation", "selector): from ..schema.pipelines.pipeline_errors import GrapheneInvalidSubsetError from ..schema.pipelines.pipeline import GraphenePipeline check.inst_param(graphene_info,", "repository_location=repo_loc, ) return GrapheneRepositoryNotFoundError( repository_selector.location_name, repository_selector.repository_name ) @capture_error def fetch_workspace(workspace_request_context):", "GraphenePipeline check.inst_param(graphene_info, \"graphene_info\", ResolveInfo) check.inst_param(selector, \"selector\", PipelineSelector) full_pipeline = get_full_external_pipeline_or_raise(graphene_info,", ") ) return validated_config def get_external_execution_plan_or_raise( graphene_info, external_pipeline, mode, run_config,", "check.opt_str_param(mode, \"mode\") # do not type check run_config so that", "] ) @capture_error def fetch_repository(graphene_info, repository_selector): from ..schema.errors import GrapheneRepositoryNotFoundError", "check.inst_param(graphene_info, \"graphene_info\", ResolveInfo) check.inst_param(repository_selector, \"repository_selector\", RepositorySelector) if graphene_info.context.has_repository_location(repository_selector.location_name): repo_loc =", "ResolveInfo) check.inst_param(selector, \"selector\", PipelineSelector) full_pipeline = get_full_external_pipeline_or_raise(graphene_info, selector) if selector.solid_selection", ") ) return external_pipeline def ensure_valid_config(external_pipeline, mode, run_config): from ..schema.pipelines.config", "not type check run_config so that validate_config_from_snap throws validated_config =", "in graphene_info.context.repository_locations for repository in location.get_repositories().values() ] ) @capture_error def", "import UserFacingGraphQLError, capture_error def get_full_external_pipeline_or_raise(graphene_info, selector): from ..schema.errors import GraphenePipelineNotFoundError", "check.inst_param(selector, \"selector\", PipelineSelector) full_pipeline = get_full_external_pipeline_or_raise(graphene_info, selector) if selector.solid_selection is", "return full_pipeline for solid_name in selector.solid_selection: if not full_pipeline.has_solid_invocation(solid_name): raise", "import GrapheneInvalidSubsetError from ..schema.pipelines.pipeline import GraphenePipeline check.inst_param(selector, \"selector\", PipelineSelector) repository_location", "import validate_config_from_snap from dagster.core.host_representation import ExternalPipeline, PipelineSelector, RepositorySelector from dagster.core.workspace.context", "check.inst_param(repository_selector, \"repository_selector\", RepositorySelector) if graphene_info.context.has_repository_location(repository_selector.location_name): repo_loc = graphene_info.context.get_repository_location(repository_selector.location_name) if repo_loc.has_repository(repository_selector.repository_name):", "\"mode\") # do not type check run_config so that validate_config_from_snap", "GrapheneWorkspace, GrapheneWorkspaceLocationEntry check.inst_param( workspace_request_context, \"workspace_request_context\", BaseWorkspaceRequestContext ) nodes = [", "repository_selector.location_name, repository_selector.repository_name ) @capture_error def fetch_workspace(workspace_request_context): from ..schema.external import GrapheneWorkspace,", "import GrapheneInvalidSubsetError from ..schema.pipelines.pipeline import GraphenePipeline check.inst_param(graphene_info, \"graphene_info\", ResolveInfo) check.inst_param(selector,", "= graphene_info.context.get_repository_location(repository_selector.location_name) if repo_loc.has_repository(repository_selector.repository_name): return GrapheneRepository( repository=repo_loc.get_repository(repository_selector.repository_name), repository_location=repo_loc, ) return", "that validate_config_from_snap throws validated_config = validate_config_from_snap( config_schema_snapshot=external_pipeline.config_schema_snapshot, config_type_key=external_pipeline.root_config_key_for_mode(mode), config_value=run_config, )", "from ..schema.external import GrapheneRepository check.inst_param(graphene_info, \"graphene_info\", ResolveInfo) check.inst_param(repository_selector, \"repository_selector\", RepositorySelector)", "graphene_info.context.has_repository_location(repository_selector.location_name): repo_loc = graphene_info.context.get_repository_location(repository_selector.location_name) if repo_loc.has_repository(repository_selector.repository_name): return GrapheneRepository( repository=repo_loc.get_repository(repository_selector.repository_name), repository_location=repo_loc,", "..schema.errors import GraphenePipelineNotFoundError check.inst_param(graphene_info, \"graphene_info\", ResolveInfo) check.inst_param(selector, \"selector\", PipelineSelector) if", "UserFacingGraphQLError( GrapheneInvalidSubsetError( message='Solid \"{solid_name}\" does not exist in \"{pipeline_name}\"'.format( solid_name=solid_name,", "if not validated_config.success: raise UserFacingGraphQLError( GrapheneRunConfigValidationInvalid.for_validation_errors( external_pipeline, validated_config.errors ) )", ") ) return get_subset_external_pipeline(graphene_info.context, selector) def get_subset_external_pipeline(context, selector): from ..schema.pipelines.pipeline_errors", "check.inst_param(selector, \"selector\", PipelineSelector) repository_location = context.get_repository_location(selector.location_name) try: external_pipeline = repository_location.get_external_pipeline(selector)", "graphene_info.context.get_repository_location(repository_selector.location_name) if repo_loc.has_repository(repository_selector.repository_name): return GrapheneRepository( repository=repo_loc.get_repository(repository_selector.repository_name), repository_location=repo_loc, ) return GrapheneRepositoryNotFoundError(", "config_type_key=external_pipeline.root_config_key_for_mode(mode), config_value=run_config, ) if not validated_config.success: raise UserFacingGraphQLError( GrapheneRunConfigValidationInvalid.for_validation_errors( external_pipeline,", "ResolveInfo from .utils import UserFacingGraphQLError, capture_error def get_full_external_pipeline_or_raise(graphene_info, selector): from", "UserFacingGraphQLError( GrapheneInvalidSubsetError( message=\"{message}{cause_message}\".format( message=error_info.message, cause_message=\"\\n{}\".format(error_info.cause.message) if error_info.cause else \"\", ),", "solid_name in selector.solid_selection: if not full_pipeline.has_solid_invocation(solid_name): raise UserFacingGraphQLError( GrapheneInvalidSubsetError( message='Solid", "def get_external_pipeline_or_raise(graphene_info, selector): from ..schema.pipelines.pipeline_errors import GrapheneInvalidSubsetError from ..schema.pipelines.pipeline import", "PipelineSelector, RepositorySelector from dagster.core.workspace.context import BaseWorkspaceRequestContext from dagster.utils.error import serializable_error_info_from_exc_info", "GrapheneInvalidSubsetError from ..schema.pipelines.pipeline import GraphenePipeline check.inst_param(selector, \"selector\", PipelineSelector) repository_location =", "return graphene_info.context.get_external_execution_plan( external_pipeline=external_pipeline, run_config=run_config, mode=mode, step_keys_to_execute=step_keys_to_execute, known_state=known_state, ) @capture_error def", "graphene_info.context.get_external_execution_plan( external_pipeline=external_pipeline, run_config=run_config, mode=mode, step_keys_to_execute=step_keys_to_execute, known_state=known_state, ) @capture_error def fetch_repositories(graphene_info):", "GrapheneRepository(repository=repository, repository_location=location) for location in graphene_info.context.repository_locations for repository in location.get_repositories().values()", ") @capture_error def fetch_workspace(workspace_request_context): from ..schema.external import GrapheneWorkspace, GrapheneWorkspaceLocationEntry check.inst_param(", "check.inst_param(graphene_info, \"graphene_info\", ResolveInfo) check.inst_param(selector, \"selector\", PipelineSelector) full_pipeline = get_full_external_pipeline_or_raise(graphene_info, selector)", "raise UserFacingGraphQLError( GrapheneInvalidSubsetError( message='Solid \"{solid_name}\" does not exist in \"{pipeline_name}\"'.format(", "cause_message=\"\\n{}\".format(error_info.cause.message) if error_info.cause else \"\", ), pipeline=GraphenePipeline(context.get_full_external_pipeline(selector)), ) ) return", "from ..schema.pipelines.pipeline import GraphenePipeline check.inst_param(selector, \"selector\", PipelineSelector) repository_location = context.get_repository_location(selector.location_name)", "): return graphene_info.context.get_external_execution_plan( external_pipeline=external_pipeline, run_config=run_config, mode=mode, step_keys_to_execute=step_keys_to_execute, known_state=known_state, ) @capture_error", "\"external_pipeline\", ExternalPipeline) check.opt_str_param(mode, \"mode\") # do not type check run_config", "ResolveInfo) return GrapheneRepositoryConnection( nodes=[ GrapheneRepository(repository=repository, repository_location=location) for location in graphene_info.context.repository_locations", "dagster.core.workspace.context import BaseWorkspaceRequestContext from dagster.utils.error import serializable_error_info_from_exc_info from graphql.execution.base import", "ensure_valid_config(external_pipeline, mode, run_config): from ..schema.pipelines.config import GrapheneRunConfigValidationInvalid check.inst_param(external_pipeline, \"external_pipeline\", ExternalPipeline)", "dagster.utils.error import serializable_error_info_from_exc_info from graphql.execution.base import ResolveInfo from .utils import", "pipeline=GraphenePipeline(context.get_full_external_pipeline(selector)), ) ) return external_pipeline def ensure_valid_config(external_pipeline, mode, run_config): from", ") return GrapheneRepositoryNotFoundError( repository_selector.location_name, repository_selector.repository_name ) @capture_error def fetch_workspace(workspace_request_context): from", "for repository in location.get_repositories().values() ] ) @capture_error def fetch_repository(graphene_info, repository_selector):", "run_config so that validate_config_from_snap throws validated_config = validate_config_from_snap( config_schema_snapshot=external_pipeline.config_schema_snapshot, config_type_key=external_pipeline.root_config_key_for_mode(mode),", "\"{solid_name}\" does not exist in \"{pipeline_name}\"'.format( solid_name=solid_name, pipeline_name=selector.pipeline_name ), pipeline=GraphenePipeline(full_pipeline),", "validated_config = validate_config_from_snap( config_schema_snapshot=external_pipeline.config_schema_snapshot, config_type_key=external_pipeline.root_config_key_for_mode(mode), config_value=run_config, ) if not validated_config.success:", "selector): from ..schema.errors import GraphenePipelineNotFoundError check.inst_param(graphene_info, \"graphene_info\", ResolveInfo) check.inst_param(selector, \"selector\",", "run_config): from ..schema.pipelines.config import GrapheneRunConfigValidationInvalid check.inst_param(external_pipeline, \"external_pipeline\", ExternalPipeline) check.opt_str_param(mode, \"mode\")", "config_schema_snapshot=external_pipeline.config_schema_snapshot, config_type_key=external_pipeline.root_config_key_for_mode(mode), config_value=run_config, ) if not validated_config.success: raise UserFacingGraphQLError( GrapheneRunConfigValidationInvalid.for_validation_errors(", "selector): from ..schema.pipelines.pipeline_errors import GrapheneInvalidSubsetError from ..schema.pipelines.pipeline import GraphenePipeline check.inst_param(selector,", "get_full_external_pipeline_or_raise(graphene_info, selector) if selector.solid_selection is None: return full_pipeline for solid_name", "= context.get_repository_location(selector.location_name) try: external_pipeline = repository_location.get_external_pipeline(selector) except Exception: error_info =" ]
[ "== 'def foo():\\n return 5\\n' def test_use_refs(self): w = BasicPyFileWriter()", "w = BasicPyFileWriter() w.add_line('import abc') w.add_line('import os').space() s = w.get_file_as_string()", "\"C\"') s = w.get_file_as_string() lns = s.split('\\n') assert 'a' in", "w.has_module_name('__init__') assert w.pkg_path == os.path.join(d, 'foo') w.write() assert os.path.exists(d) assert", "TemporaryDirectory() as d: w = PyPackageWriter(pkg_name='foo', parent_dir=d) assert '__init__' in", "TestPyPackageWriter: def test_includes_init(self): with TemporaryDirectory() as d: w = PyPackageWriter(pkg_name='foo',", "= \"C\"') s = w.get_file_as_string() lns = s.split('\\n') assert 'a'", "tempfile import TemporaryDirectory from quickbase_client.utils.pywriting_utils import BasicPyFileWriter from quickbase_client.utils.pywriting_utils import", "assert 'b' in lns[1] assert 'c' in lns[2] assert 'd'", "lns[2] assert 'd' in lns[3] class TestPyPackageWriter: def test_includes_init(self): with", "class TestBasicFileWriter: def test_outputs_lines(self): w = BasicPyFileWriter() w.add_line('import abc') w.add_line('import", "= w.get_file_as_string() assert s == 'import abc\\nimport os\\n' def test_indent_dedent(self):", "w.modules assert w.has_module_name('__init__') assert w.pkg_path == os.path.join(d, 'foo') w.write() assert", "PyPackageWriter class TestBasicFileWriter: def test_outputs_lines(self): w = BasicPyFileWriter() w.add_line('import abc')", "= w.get_file_as_string() lns = s.split('\\n') assert 'a' in lns[0] assert", "os.path.join(d, 'foo') w.write() assert os.path.exists(d) assert os.path.exists(os.path.join(d, 'foo')) assert os.path.exists(os.path.join(d,", "import PyPackageWriter class TestBasicFileWriter: def test_outputs_lines(self): w = BasicPyFileWriter() w.add_line('import", "s.split('\\n') assert 'a' in lns[0] assert 'b' in lns[1] assert", "in lns[3] class TestPyPackageWriter: def test_includes_init(self): with TemporaryDirectory() as d:", "'def foo():\\n return 5\\n' def test_use_refs(self): w = BasicPyFileWriter() w.add_line('a", "class TestPyPackageWriter: def test_includes_init(self): with TemporaryDirectory() as d: w =", "w.add_line('def foo():').indent().add_line('return 5').dedent().space() s = w.get_file_as_string() assert s == 'def", "== 'import abc\\nimport os\\n' def test_indent_dedent(self): w = BasicPyFileWriter() w.add_line('def", "os\\n' def test_indent_dedent(self): w = BasicPyFileWriter() w.add_line('def foo():').indent().add_line('return 5').dedent().space() s", "'import abc\\nimport os\\n' def test_indent_dedent(self): w = BasicPyFileWriter() w.add_line('def foo():').indent().add_line('return", "BasicPyFileWriter() w.add_line('import abc') w.add_line('import os').space() s = w.get_file_as_string() assert s", "= PyPackageWriter(pkg_name='foo', parent_dir=d) assert '__init__' in w.modules assert w.has_module_name('__init__') assert", "'c' in lns[2] assert 'd' in lns[3] class TestPyPackageWriter: def", "d: w = PyPackageWriter(pkg_name='foo', parent_dir=d) assert '__init__' in w.modules assert", "import os from tempfile import TemporaryDirectory from quickbase_client.utils.pywriting_utils import BasicPyFileWriter", "assert '__init__' in w.modules assert w.has_module_name('__init__') assert w.pkg_path == os.path.join(d,", "w = PyPackageWriter(pkg_name='foo', parent_dir=d) assert '__init__' in w.modules assert w.has_module_name('__init__')", "TestBasicFileWriter: def test_outputs_lines(self): w = BasicPyFileWriter() w.add_line('import abc') w.add_line('import os').space()", "'b' in lns[1] assert 'c' in lns[2] assert 'd' in", "quickbase_client.utils.pywriting_utils import BasicPyFileWriter from quickbase_client.utils.pywriting_utils import PyPackageWriter class TestBasicFileWriter: def", "5').dedent().space() s = w.get_file_as_string() assert s == 'def foo():\\n return", "with TemporaryDirectory() as d: w = PyPackageWriter(pkg_name='foo', parent_dir=d) assert '__init__'", "w.pkg_path == os.path.join(d, 'foo') w.write() assert os.path.exists(d) assert os.path.exists(os.path.join(d, 'foo'))", "w.make_ref() w.add_line('d = \"D\"') ref.add_line('b = \"B\"').add_line('c = \"C\"') s", "assert s == 'def foo():\\n return 5\\n' def test_use_refs(self): w", "assert 'c' in lns[2] assert 'd' in lns[3] class TestPyPackageWriter:", "BasicPyFileWriter from quickbase_client.utils.pywriting_utils import PyPackageWriter class TestBasicFileWriter: def test_outputs_lines(self): w", "w = BasicPyFileWriter() w.add_line('def foo():').indent().add_line('return 5').dedent().space() s = w.get_file_as_string() assert", "in w.modules assert w.has_module_name('__init__') assert w.pkg_path == os.path.join(d, 'foo') w.write()", "assert w.has_module_name('__init__') assert w.pkg_path == os.path.join(d, 'foo') w.write() assert os.path.exists(d)", "\"B\"').add_line('c = \"C\"') s = w.get_file_as_string() lns = s.split('\\n') assert", "'__init__' in w.modules assert w.has_module_name('__init__') assert w.pkg_path == os.path.join(d, 'foo')", "lns[0] assert 'b' in lns[1] assert 'c' in lns[2] assert", "test_use_refs(self): w = BasicPyFileWriter() w.add_line('a = \"A\"') ref = w.make_ref()", "w.add_line('a = \"A\"') ref = w.make_ref() w.add_line('d = \"D\"') ref.add_line('b", "'foo') w.write() assert os.path.exists(d) assert os.path.exists(os.path.join(d, 'foo')) assert os.path.exists(os.path.join(d, 'foo',", "PyPackageWriter(pkg_name='foo', parent_dir=d) assert '__init__' in w.modules assert w.has_module_name('__init__') assert w.pkg_path", "s == 'import abc\\nimport os\\n' def test_indent_dedent(self): w = BasicPyFileWriter()", "return 5\\n' def test_use_refs(self): w = BasicPyFileWriter() w.add_line('a = \"A\"')", "= BasicPyFileWriter() w.add_line('a = \"A\"') ref = w.make_ref() w.add_line('d =", "def test_includes_init(self): with TemporaryDirectory() as d: w = PyPackageWriter(pkg_name='foo', parent_dir=d)", "test_indent_dedent(self): w = BasicPyFileWriter() w.add_line('def foo():').indent().add_line('return 5').dedent().space() s = w.get_file_as_string()", "= w.make_ref() w.add_line('d = \"D\"') ref.add_line('b = \"B\"').add_line('c = \"C\"')", "= \"D\"') ref.add_line('b = \"B\"').add_line('c = \"C\"') s = w.get_file_as_string()", "w.add_line('import os').space() s = w.get_file_as_string() assert s == 'import abc\\nimport", "from quickbase_client.utils.pywriting_utils import BasicPyFileWriter from quickbase_client.utils.pywriting_utils import PyPackageWriter class TestBasicFileWriter:", "parent_dir=d) assert '__init__' in w.modules assert w.has_module_name('__init__') assert w.pkg_path ==", "ref.add_line('b = \"B\"').add_line('c = \"C\"') s = w.get_file_as_string() lns =", "assert 'a' in lns[0] assert 'b' in lns[1] assert 'c'", "def test_outputs_lines(self): w = BasicPyFileWriter() w.add_line('import abc') w.add_line('import os').space() s", "test_outputs_lines(self): w = BasicPyFileWriter() w.add_line('import abc') w.add_line('import os').space() s =", "s = w.get_file_as_string() lns = s.split('\\n') assert 'a' in lns[0]", "= BasicPyFileWriter() w.add_line('import abc') w.add_line('import os').space() s = w.get_file_as_string() assert", "= s.split('\\n') assert 'a' in lns[0] assert 'b' in lns[1]", "test_includes_init(self): with TemporaryDirectory() as d: w = PyPackageWriter(pkg_name='foo', parent_dir=d) assert", "in lns[0] assert 'b' in lns[1] assert 'c' in lns[2]", "import BasicPyFileWriter from quickbase_client.utils.pywriting_utils import PyPackageWriter class TestBasicFileWriter: def test_outputs_lines(self):", "= w.get_file_as_string() assert s == 'def foo():\\n return 5\\n' def", "in lns[2] assert 'd' in lns[3] class TestPyPackageWriter: def test_includes_init(self):", "in lns[1] assert 'c' in lns[2] assert 'd' in lns[3]", "'d' in lns[3] class TestPyPackageWriter: def test_includes_init(self): with TemporaryDirectory() as", "lns[1] assert 'c' in lns[2] assert 'd' in lns[3] class", "TemporaryDirectory from quickbase_client.utils.pywriting_utils import BasicPyFileWriter from quickbase_client.utils.pywriting_utils import PyPackageWriter class", "s == 'def foo():\\n return 5\\n' def test_use_refs(self): w =", "w = BasicPyFileWriter() w.add_line('a = \"A\"') ref = w.make_ref() w.add_line('d", "as d: w = PyPackageWriter(pkg_name='foo', parent_dir=d) assert '__init__' in w.modules", "'a' in lns[0] assert 'b' in lns[1] assert 'c' in", "os from tempfile import TemporaryDirectory from quickbase_client.utils.pywriting_utils import BasicPyFileWriter from", "s = w.get_file_as_string() assert s == 'import abc\\nimport os\\n' def", "\"D\"') ref.add_line('b = \"B\"').add_line('c = \"C\"') s = w.get_file_as_string() lns", "import TemporaryDirectory from quickbase_client.utils.pywriting_utils import BasicPyFileWriter from quickbase_client.utils.pywriting_utils import PyPackageWriter", "== os.path.join(d, 'foo') w.write() assert os.path.exists(d) assert os.path.exists(os.path.join(d, 'foo')) assert", "w.add_line('d = \"D\"') ref.add_line('b = \"B\"').add_line('c = \"C\"') s =", "def test_use_refs(self): w = BasicPyFileWriter() w.add_line('a = \"A\"') ref =", "\"A\"') ref = w.make_ref() w.add_line('d = \"D\"') ref.add_line('b = \"B\"').add_line('c", "assert s == 'import abc\\nimport os\\n' def test_indent_dedent(self): w =", "w.write() assert os.path.exists(d) assert os.path.exists(os.path.join(d, 'foo')) assert os.path.exists(os.path.join(d, 'foo', '__init__.py'))", "w.get_file_as_string() assert s == 'def foo():\\n return 5\\n' def test_use_refs(self):", "ref = w.make_ref() w.add_line('d = \"D\"') ref.add_line('b = \"B\"').add_line('c =", "abc\\nimport os\\n' def test_indent_dedent(self): w = BasicPyFileWriter() w.add_line('def foo():').indent().add_line('return 5').dedent().space()", "= \"B\"').add_line('c = \"C\"') s = w.get_file_as_string() lns = s.split('\\n')", "assert w.pkg_path == os.path.join(d, 'foo') w.write() assert os.path.exists(d) assert os.path.exists(os.path.join(d,", "from quickbase_client.utils.pywriting_utils import PyPackageWriter class TestBasicFileWriter: def test_outputs_lines(self): w =", "abc') w.add_line('import os').space() s = w.get_file_as_string() assert s == 'import", "assert 'd' in lns[3] class TestPyPackageWriter: def test_includes_init(self): with TemporaryDirectory()", "5\\n' def test_use_refs(self): w = BasicPyFileWriter() w.add_line('a = \"A\"') ref", "def test_indent_dedent(self): w = BasicPyFileWriter() w.add_line('def foo():').indent().add_line('return 5').dedent().space() s =", "foo():').indent().add_line('return 5').dedent().space() s = w.get_file_as_string() assert s == 'def foo():\\n", "BasicPyFileWriter() w.add_line('a = \"A\"') ref = w.make_ref() w.add_line('d = \"D\"')", "BasicPyFileWriter() w.add_line('def foo():').indent().add_line('return 5').dedent().space() s = w.get_file_as_string() assert s ==", "os').space() s = w.get_file_as_string() assert s == 'import abc\\nimport os\\n'", "lns = s.split('\\n') assert 'a' in lns[0] assert 'b' in", "foo():\\n return 5\\n' def test_use_refs(self): w = BasicPyFileWriter() w.add_line('a =", "quickbase_client.utils.pywriting_utils import PyPackageWriter class TestBasicFileWriter: def test_outputs_lines(self): w = BasicPyFileWriter()", "from tempfile import TemporaryDirectory from quickbase_client.utils.pywriting_utils import BasicPyFileWriter from quickbase_client.utils.pywriting_utils", "w.get_file_as_string() lns = s.split('\\n') assert 'a' in lns[0] assert 'b'", "s = w.get_file_as_string() assert s == 'def foo():\\n return 5\\n'", "w.add_line('import abc') w.add_line('import os').space() s = w.get_file_as_string() assert s ==", "= BasicPyFileWriter() w.add_line('def foo():').indent().add_line('return 5').dedent().space() s = w.get_file_as_string() assert s", "w.get_file_as_string() assert s == 'import abc\\nimport os\\n' def test_indent_dedent(self): w", "lns[3] class TestPyPackageWriter: def test_includes_init(self): with TemporaryDirectory() as d: w", "= \"A\"') ref = w.make_ref() w.add_line('d = \"D\"') ref.add_line('b =" ]
[ "an identity shortcut in a bottleneck building block of a", "+ str(stage) + block + '_out')(x) return output_tensor def backbone_resnet(input_image,", "tf.keras.layers.Activation('relu')(x) x = tf.keras.layers.Conv2D( num_filters_2, (3,3), padding='same', name=conv_prefix + '2b')(x)", "Builds a projection shortcut in a bottleneck block of a", "stage=4, block='a', train_bn=train_bn) num_blocks = {'resnet50':5, 'resnet101':22}[architecture] for i in", "------- output_tensor : tf tensor, [batch_size, height, width, channels] The", "stage, block, train_bn=False ): \"\"\" Builds an identity shortcut in", "Whether create stage5 of network. The default is True. train_bn", "Parameters ---------- input_image : tf tensor, [batch_size, height, width, channels]", "channels] An input tensor. architecture : string The ResNet architecture", "= identity_block( x, [512,512,2048], stage=5, block='c', train_bn=train_bn) else: C5 =", "of network. The default is True. train_bn : boolean, optional", "string A lowercase letter, used for generating layer names. train_bn", "C3 = x = identity_block( x, [128,128,512], stage=3, block='d', train_bn=train_bn)", "bn_prefix = 'bn' + str(stage) + block + '_branch' x", "Networks (ResNet) \"\"\" # adapted from # https://github.com/fchollet/deep-learning-models/blob/master/resnet50.py import tensorflow", "): \"\"\" Builds a projection shortcut in a bottleneck block", "def conv_block( input_tensor, filters, stage, block, strides=(2, 2), train_bn=False ):", "list Feature maps at each stage. \"\"\" assert architecture in", "['resnet50', 'resnet101'], \\ 'Only support ResNet50\\101' # stage 1 x", "= tf.keras.layers.BatchNormalization(name='bn_conv1')(x, training=train_bn) x = tf.keras.layers.Activation('relu')(x) C1 = x =", "conv layers at the main path. stage : integer A", "identity_block( x, [256,256,1024], stage=4, block=chr(98+i), train_bn=train_bn) C4 = x #", "# stage 3 x = conv_block(x, [128,128,512], stage=3, block='a', train_bn=train_bn)", "current batch. The default is False, i.e., use the moving", "should normalize the layer input by the mean and variance", "input tensor. filters : list, positive integers The number of", "= tf.keras.layers.MaxPooling2D((3,3), (2,2), padding='same')(x) # stage 2 x = conv_block(", "the mean and variance over the current batch. The default", "stage=2, block='c', train_bn=train_bn) # stage 3 x = conv_block(x, [128,128,512],", "x = tf.keras.layers.Activation('relu')(x) C1 = x = tf.keras.layers.MaxPooling2D((3,3), (2,2), padding='same')(x)", "# adapted from # https://github.com/fchollet/deep-learning-models/blob/master/resnet50.py import tensorflow as tf def", "str(stage) + block + '_branch' bn_prefix = 'bn' + str(stage)", "= filters conv_prefix = 'res' + str(stage) + block +", "for i in range(num_blocks): x = identity_block( x, [256,256,1024], stage=4,", "layer input by the mean and variance over the current", "strides : tuple, integers, optional The conv layer strides. The", "is False, i.e., use the moving average of mean and", "the layer input. Returns ------- output_tensor : tf tensor, [batch_size,", "[512,512,2048], stage=5, block='a', train_bn=train_bn) x = identity_block( x, [512,512,2048], stage=5,", "2 x = conv_block( x, [64,64,256], stage=2, block='a', strides=(1,1), train_bn=train_bn)", "{'resnet50':5, 'resnet101':22}[architecture] for i in range(num_blocks): x = identity_block( x,", "= conv_block(x, [512,512,2048], stage=5, block='a', train_bn=train_bn) x = identity_block( x,", "moving average of mean and variance to normalize the layer", "num_filters_2, (3,3), padding='same', name=conv_prefix + '2b')(x) x = tf.keras.layers.BatchNormalization( name=bn_prefix", "strides=(2, 2), train_bn=False ): \"\"\" Builds a projection shortcut in", "[256,256,1024], stage=4, block=chr(98+i), train_bn=train_bn) C4 = x # stage 5", "tensor. architecture : string The ResNet architecture in {'resnet50', 'resnet101'}.", "(2,2), name='conv1')(x) x = tf.keras.layers.BatchNormalization(name='bn_conv1')(x, training=train_bn) x = tf.keras.layers.Activation('relu')(x) C1", "over the current batch. The default is False, i.e., use", "C4 = x # stage 5 if stage5: x =", "layer names. block : string A lowercase letter, used for", "output_tensor def conv_block( input_tensor, filters, stage, block, strides=(2, 2), train_bn=False", "names. strides : tuple, integers, optional The conv layer strides.", "train_bn=False ): \"\"\" Builds a projection shortcut in a bottleneck", "network. The default is True. train_bn : boolean, optional Whether", "'resnet101':22}[architecture] for i in range(num_blocks): x = identity_block( x, [256,256,1024],", "tf.keras.layers.BatchNormalization( name=bn_prefix + '2b')(x, training=train_bn) x = tf.keras.layers.Activation('relu')(x) x =", "normalize the layer input. Returns ------- output_tensor : tf tensor", "name='conv1')(x) x = tf.keras.layers.BatchNormalization(name='bn_conv1')(x, training=train_bn) x = tf.keras.layers.Activation('relu')(x) C1 =", "block='a', strides=(1,1), train_bn=train_bn) x = identity_block(x, [64,64,256], stage=2, block='b', train_bn=train_bn)", "filters, stage, block, train_bn=False ): \"\"\" Builds an identity shortcut", "= 'bn' + str(stage) + block + '_branch' x =", ": string The ResNet architecture in {'resnet50', 'resnet101'}. stage5 :", "'_branch' bn_prefix = 'bn' + str(stage) + block + '_branch'", "padding='same')(x) # stage 2 x = conv_block( x, [64,64,256], stage=2,", "'relu', name='res' + str(stage) + block + '_out')(x) return output_tensor", "train_bn=train_bn) C4 = x # stage 5 if stage5: x", "and variance to normalize the layer input. Returns ------- outputs", "block='d', train_bn=train_bn) # stage 4 x = conv_block(x, [256,256,1024], stage=4,", "= tf.keras.layers.Conv2D( num_filters_1, (1,1), name=conv_prefix + '2a')(input_tensor) x = tf.keras.layers.BatchNormalization(", "default is True. train_bn : boolean, optional Whether one should", "else: C5 = None return [C1, C2, C3, C4, C5]", "tf.keras.layers.Conv2D( num_filters_3, (1,1), strides, name=conv_prefix + '1')(input_tensor) shortcut = tf.keras.layers.BatchNormalization(", "num_filters_1, (1,1), strides, name=conv_prefix + '2a')(input_tensor) x = tf.keras.layers.BatchNormalization( name=bn_prefix", "+ str(stage) + block + '_branch' x = tf.keras.layers.Conv2D( num_filters_1,", "list, positive integers The number of filters in 3 conv", "mean and variance over the current batch. The default is", "tf.keras.layers.Conv2D( num_filters_1, (1,1), strides, name=conv_prefix + '2a')(input_tensor) x = tf.keras.layers.BatchNormalization(", "boolean, optional Whether create stage5 of network. The default is", "x = tf.keras.layers.Add()([input_tensor, x]) output_tensor = tf.keras.layers.Activation( 'relu', name='res' +", "backbone_resnet(input_image, architecture, stage5=True, train_bn=False): \"\"\" Builds a backbone ResNet. Parameters", "tf.keras.layers.MaxPooling2D((3,3), (2,2), padding='same')(x) # stage 2 x = conv_block( x,", "tf.keras.layers.ZeroPadding2D((3,3))(input_image) x = tf.keras.layers.Conv2D(64, (7,7), (2,2), name='conv1')(x) x = tf.keras.layers.BatchNormalization(name='bn_conv1')(x,", "train_bn=train_bn) x = identity_block(x, [128,128,512], stage=3, block='c', train_bn=train_bn) C3 =", "+ '2b')(x, training=train_bn) x = tf.keras.layers.Activation('relu')(x) x = tf.keras.layers.Conv2D( num_filters_3,", "= {'resnet50':5, 'resnet101':22}[architecture] for i in range(num_blocks): x = identity_block(", "height, width, channels] The output tensor same shape as input_tensor.", "= tf.keras.layers.Conv2D( num_filters_3, (1,1), name=conv_prefix + '2c')(x) x = tf.keras.layers.BatchNormalization(", "(7,7), (2,2), name='conv1')(x) x = tf.keras.layers.BatchNormalization(name='bn_conv1')(x, training=train_bn) x = tf.keras.layers.Activation('relu')(x)", "string A lowercase letter, used for generating layer names. strides", "optional The conv layer strides. The default is (2, 2).", "(2,2), padding='same')(x) # stage 2 x = conv_block( x, [64,64,256],", "in range(num_blocks): x = identity_block( x, [256,256,1024], stage=4, block=chr(98+i), train_bn=train_bn)", "The default is True. train_bn : boolean, optional Whether one", "= x = identity_block( x, [128,128,512], stage=3, block='d', train_bn=train_bn) #", "number of filters in 3 conv layers at the main", "train_bn : boolean, optional Whether one should normalize the layer", "height, width, channels] An input tensor. architecture : string The", "stage 1 x = tf.keras.layers.ZeroPadding2D((3,3))(input_image) x = tf.keras.layers.Conv2D(64, (7,7), (2,2),", "if stage5: x = conv_block(x, [512,512,2048], stage=5, block='a', train_bn=train_bn) x", "layer strides. The default is (2, 2). train_bn : boolean,", "generating layer names. strides : tuple, integers, optional The conv", "identity_block( input_tensor, filters, stage, block, train_bn=False ): \"\"\" Builds an", "tf tensor [batch_size, height//strides, width//strides, num_filters_3] where num_filters_3 is the", "in a bottleneck block of a ResNet. Parameters ---------- input_tensor", "stage 4 x = conv_block(x, [256,256,1024], stage=4, block='a', train_bn=train_bn) num_blocks", "name='res' + str(stage) + block + '_out')(x) return output_tensor def", "conv_block( input_tensor, filters, stage, block, strides=(2, 2), train_bn=False ): \"\"\"", "= tf.keras.layers.Conv2D( num_filters_1, (1,1), strides, name=conv_prefix + '2a')(input_tensor) x =", "[batch_size, height, width, channels] An input tensor. filters : list,", "input. Returns ------- outputs : list Feature maps at each", "tf.keras.layers.Activation('relu')(x) x = tf.keras.layers.Conv2D( num_filters_3, (1,1), name=conv_prefix + '2c')(x) x", "as tf def identity_block( input_tensor, filters, stage, block, train_bn=False ):", "stage=3, block='a', train_bn=train_bn) x = identity_block(x, [128,128,512], stage=3, block='b', train_bn=train_bn)", "names. train_bn : boolean, optional Whether one should normalize the", "output_tensor def backbone_resnet(input_image, architecture, stage5=True, train_bn=False): \"\"\" Builds a backbone", "'_out')(x) return output_tensor def backbone_resnet(input_image, architecture, stage5=True, train_bn=False): \"\"\" Builds", "import tensorflow as tf def identity_block( input_tensor, filters, stage, block,", "name=conv_prefix + '2a')(input_tensor) x = tf.keras.layers.BatchNormalization( name=bn_prefix + '2a')(x, training=train_bn)", "stage=3, block='b', train_bn=train_bn) x = identity_block(x, [128,128,512], stage=3, block='c', train_bn=train_bn)", "train_bn=train_bn) # stage 4 x = conv_block(x, [256,256,1024], stage=4, block='a',", "strides, name=conv_prefix + '2a')(input_tensor) x = tf.keras.layers.BatchNormalization( name=bn_prefix + '2a')(x,", "<filename>model/resnet.py \"\"\" Residual Networks (ResNet) \"\"\" # adapted from #", "height, width, channels] An input tensor. filters : list, positive", "integers The number of filters in 3 conv layers at", "batch. The default is False, i.e., use the moving average", "block='a', train_bn=train_bn) x = identity_block(x, [128,128,512], stage=3, block='b', train_bn=train_bn) x", "A lowercase letter, used for generating layer names. strides :", "tuple, integers, optional The conv layer strides. The default is", "the current batch. The default is False, i.e., use the", "'resnet101'], \\ 'Only support ResNet50\\101' # stage 1 x =", "= tf.keras.layers.Conv2D(64, (7,7), (2,2), name='conv1')(x) x = tf.keras.layers.BatchNormalization(name='bn_conv1')(x, training=train_bn) x", "[64,64,256], stage=2, block='b', train_bn=train_bn) C2 = x = identity_block( x,", "height//strides, width//strides, num_filters_3] where num_filters_3 is the last number in", "x, [256,256,1024], stage=4, block=chr(98+i), train_bn=train_bn) C4 = x # stage", "width, channels] An input tensor. filters : list, positive integers", ": integer A number in [2,5] used for generating layer", "1 x = tf.keras.layers.ZeroPadding2D((3,3))(input_image) x = tf.keras.layers.Conv2D(64, (7,7), (2,2), name='conv1')(x)", "Residual Networks (ResNet) \"\"\" # adapted from # https://github.com/fchollet/deep-learning-models/blob/master/resnet50.py import", "num_filters_1, num_filters_2, num_filters_3 = filters conv_prefix = 'res' + str(stage)", "i.e., use the moving average of mean and variance to", "last number is equal to input_tensor's channels. stage : integer", "# stage 1 x = tf.keras.layers.ZeroPadding2D((3,3))(input_image) x = tf.keras.layers.Conv2D(64, (7,7),", "+ block + '_branch' bn_prefix = 'bn' + str(stage) +", "tensorflow as tf def identity_block( input_tensor, filters, stage, block, train_bn=False", "in [2,5] used for generating layer names. block : string", "+ '2c')(x) x = tf.keras.layers.BatchNormalization( name=bn_prefix + '2c')(x, training=train_bn) shortcut", "'res' + str(stage) + block + '_branch' bn_prefix = 'bn'", "identity_block(x, [128,128,512], stage=3, block='c', train_bn=train_bn) C3 = x = identity_block(", "+ str(stage) + block + '_branch' bn_prefix = 'bn' +", "\"\"\" Builds a projection shortcut in a bottleneck block of", "output_tensor : tf tensor, [batch_size, height, width, channels] The output", ": list, positive integers The number of filters in 3", "training=train_bn) x = tf.keras.layers.Activation('relu')(x) x = tf.keras.layers.Conv2D( num_filters_2, (3,3), padding='same',", "x = tf.keras.layers.BatchNormalization(name='bn_conv1')(x, training=train_bn) x = tf.keras.layers.Activation('relu')(x) C1 = x", "+ '1')(input_tensor) shortcut = tf.keras.layers.BatchNormalization( name=bn_prefix + '1')(shortcut, training=train_bn) x", "= tf.keras.layers.Activation('relu')(x) x = tf.keras.layers.Conv2D( num_filters_2, (3,3), padding='same', name=conv_prefix +", "identity_block( x, [128,128,512], stage=3, block='d', train_bn=train_bn) # stage 4 x", "stage, block, strides=(2, 2), train_bn=False ): \"\"\" Builds a projection", "x = conv_block(x, [512,512,2048], stage=5, block='a', train_bn=train_bn) x = identity_block(", "C5 = x = identity_block( x, [512,512,2048], stage=5, block='c', train_bn=train_bn)", "where last number is equal to input_tensor's channels. stage :", "tensor, [batch_size, height, width, channels] An input tensor. filters :", "width, channels] The output tensor same shape as input_tensor. \"\"\"", "layer names. train_bn : boolean, optional Whether one should normalize", "A number in [2,5] used for generating layer names. block", "positive integers The number of filters in 3 conv layers", "x = tf.keras.layers.Conv2D(64, (7,7), (2,2), name='conv1')(x) x = tf.keras.layers.BatchNormalization(name='bn_conv1')(x, training=train_bn)", "path. stage : integer A number in [2,5] used for", "def backbone_resnet(input_image, architecture, stage5=True, train_bn=False): \"\"\" Builds a backbone ResNet.", "tf.keras.layers.BatchNormalization( name=bn_prefix + '1')(shortcut, training=train_bn) x = tf.keras.layers.Add()([shortcut, x]) output_tensor", "= tf.keras.layers.ZeroPadding2D((3,3))(input_image) x = tf.keras.layers.Conv2D(64, (7,7), (2,2), name='conv1')(x) x =", "backbone ResNet. Parameters ---------- input_image : tf tensor, [batch_size, height,", "def identity_block( input_tensor, filters, stage, block, train_bn=False ): \"\"\" Builds", "in {'resnet50', 'resnet101'}. stage5 : boolean, optional Whether create stage5", "for generating layer names. block : string A lowercase letter,", "ResNet. Parameters ---------- input_tensor : tf tensor, [batch_size, height, width,", "num_filters_2, num_filters_3 = filters conv_prefix = 'res' + str(stage) +", "identity_block( x, [512,512,2048], stage=5, block='b', train_bn=train_bn) C5 = x =", "= identity_block( x, [512,512,2048], stage=5, block='b', train_bn=train_bn) C5 = x", "+ str(stage) + block + '_out')(x) return output_tensor def conv_block(", "lowercase letter, used for generating layer names. train_bn : boolean,", "'resnet101'}. stage5 : boolean, optional Whether create stage5 of network.", "str(stage) + block + '_out')(x) return output_tensor def conv_block( input_tensor,", "train_bn=train_bn) x = identity_block(x, [128,128,512], stage=3, block='b', train_bn=train_bn) x =", "x = tf.keras.layers.BatchNormalization( name=bn_prefix + '2c')(x, training=train_bn) x = tf.keras.layers.Add()([input_tensor,", "x, [64,64,256], stage=2, block='c', train_bn=train_bn) # stage 3 x =", "tensor [batch_size, height//strides, width//strides, num_filters_3] where num_filters_3 is the last", "in 3 conv layers at the main path. stage :", "Parameters ---------- input_tensor : tf tensor, [batch_size, height, width, channels]", "use the moving average of mean and variance to normalize", "the last number in filters, the output tensor. \"\"\" num_filters_1,", "shortcut in a bottleneck block of a ResNet. Parameters ----------", "main path. stage : integer A number in [2,5] used", "conv_block(x, [128,128,512], stage=3, block='a', train_bn=train_bn) x = identity_block(x, [128,128,512], stage=3,", ": string A lowercase letter, used for generating layer names.", "x = identity_block(x, [128,128,512], stage=3, block='c', train_bn=train_bn) C3 = x", "+ '_branch' x = tf.keras.layers.Conv2D( num_filters_1, (1,1), name=conv_prefix + '2a')(input_tensor)", "used for generating layer names. strides : tuple, integers, optional", "architecture : string The ResNet architecture in {'resnet50', 'resnet101'}. stage5", "= x = identity_block( x, [64,64,256], stage=2, block='c', train_bn=train_bn) #", "input_tensor, filters, stage, block, strides=(2, 2), train_bn=False ): \"\"\" Builds", "A lowercase letter, used for generating layer names. train_bn :", "tf.keras.layers.Conv2D( num_filters_2, (3,3), padding='same', name=conv_prefix + '2b')(x) x = tf.keras.layers.BatchNormalization(", "block=chr(98+i), train_bn=train_bn) C4 = x # stage 5 if stage5:", "True. train_bn : boolean, optional Whether one should normalize the", "layers at the main path, where last number is equal", "in ['resnet50', 'resnet101'], \\ 'Only support ResNet50\\101' # stage 1", "False, i.e., use the moving average of mean and variance", "input_tensor. \"\"\" num_filters_1, num_filters_2, num_filters_3 = filters conv_prefix = 'res'", "train_bn=train_bn) # stage 3 x = conv_block(x, [128,128,512], stage=3, block='a',", "'1')(input_tensor) shortcut = tf.keras.layers.BatchNormalization( name=bn_prefix + '1')(shortcut, training=train_bn) x =", "at the main path, where last number is equal to", "train_bn=train_bn) C3 = x = identity_block( x, [128,128,512], stage=3, block='d',", "name=bn_prefix + '2b')(x, training=train_bn) x = tf.keras.layers.Activation('relu')(x) x = tf.keras.layers.Conv2D(", "+ '1')(shortcut, training=train_bn) x = tf.keras.layers.Add()([shortcut, x]) output_tensor = tf.keras.layers.Activation(", "to normalize the layer input. Returns ------- outputs : list", "Feature maps at each stage. \"\"\" assert architecture in ['resnet50',", "stage=5, block='b', train_bn=train_bn) C5 = x = identity_block( x, [512,512,2048],", "An input tensor. filters : list, positive integers The number", "where num_filters_3 is the last number in filters, the output", "(1,1), name=conv_prefix + '2a')(input_tensor) x = tf.keras.layers.BatchNormalization( name=bn_prefix + '2a')(x,", "conv_prefix = 'res' + str(stage) + block + '_branch' bn_prefix", "filters in 3 conv layers at the main path. stage", "name=conv_prefix + '2c')(x) x = tf.keras.layers.BatchNormalization( name=bn_prefix + '2c')(x, training=train_bn)", "block + '_branch' bn_prefix = 'bn' + str(stage) + block", "x, [64,64,256], stage=2, block='a', strides=(1,1), train_bn=train_bn) x = identity_block(x, [64,64,256],", "return output_tensor def backbone_resnet(input_image, architecture, stage5=True, train_bn=False): \"\"\" Builds a", ": boolean, optional Whether one should normalize the layer input", "boolean, optional Whether one should normalize the layer input by", "name=bn_prefix + '2c')(x, training=train_bn) shortcut = tf.keras.layers.Conv2D( num_filters_3, (1,1), strides,", "# stage 2 x = conv_block( x, [64,64,256], stage=2, block='a',", "block='b', train_bn=train_bn) C2 = x = identity_block( x, [64,64,256], stage=2,", "of mean and variance to normalize the layer input. Returns", "= 'res' + str(stage) + block + '_branch' bn_prefix =", "each stage. \"\"\" assert architecture in ['resnet50', 'resnet101'], \\ 'Only", "= x # stage 5 if stage5: x = conv_block(x,", "tf def identity_block( input_tensor, filters, stage, block, train_bn=False ): \"\"\"", "tf.keras.layers.Add()([input_tensor, x]) output_tensor = tf.keras.layers.Activation( 'relu', name='res' + str(stage) +", "[256,256,1024], stage=4, block='a', train_bn=train_bn) num_blocks = {'resnet50':5, 'resnet101':22}[architecture] for i", "the layer input. Returns ------- output_tensor : tf tensor [batch_size,", "output tensor same shape as input_tensor. \"\"\" num_filters_1, num_filters_2, num_filters_3", "layer names. strides : tuple, integers, optional The conv layer", "conv_block( x, [64,64,256], stage=2, block='a', strides=(1,1), train_bn=train_bn) x = identity_block(x,", "bottleneck block of a ResNet. Parameters ---------- input_tensor : tf", "The default is False, i.e., use the moving average of", "training=train_bn) x = tf.keras.layers.Activation('relu')(x) C1 = x = tf.keras.layers.MaxPooling2D((3,3), (2,2),", "block + '_branch' x = tf.keras.layers.Conv2D( num_filters_1, (1,1), strides, name=conv_prefix", "layer input. Returns ------- outputs : list Feature maps at", "name=bn_prefix + '2a')(x, training=train_bn) x = tf.keras.layers.Activation('relu')(x) x = tf.keras.layers.Conv2D(", "[128,128,512], stage=3, block='d', train_bn=train_bn) # stage 4 x = conv_block(x,", "'2b')(x, training=train_bn) x = tf.keras.layers.Activation('relu')(x) x = tf.keras.layers.Conv2D( num_filters_3, (1,1),", "projection shortcut in a bottleneck block of a ResNet. Parameters", "string The ResNet architecture in {'resnet50', 'resnet101'}. stage5 : boolean,", "str(stage) + block + '_out')(x) return output_tensor def backbone_resnet(input_image, architecture,", "x = identity_block( x, [256,256,1024], stage=4, block=chr(98+i), train_bn=train_bn) C4 =", "identity shortcut in a bottleneck building block of a ResNet.", "---------- input_image : tf tensor, [batch_size, height, width, channels] An", "stage=2, block='b', train_bn=train_bn) C2 = x = identity_block( x, [64,64,256],", "3 conv layers at the main path. stage : integer", "identity_block(x, [128,128,512], stage=3, block='b', train_bn=train_bn) x = identity_block(x, [128,128,512], stage=3,", "stage5=True, train_bn=False): \"\"\" Builds a backbone ResNet. Parameters ---------- input_image", "x = tf.keras.layers.Conv2D( num_filters_3, (1,1), name=conv_prefix + '2c')(x) x =", "[2,5] used for generating layer names. block : string A", "= identity_block(x, [64,64,256], stage=2, block='b', train_bn=train_bn) C2 = x =", "integers, optional The conv layer strides. The default is (2,", "5 if stage5: x = conv_block(x, [512,512,2048], stage=5, block='a', train_bn=train_bn)", "+ '2c')(x, training=train_bn) x = tf.keras.layers.Add()([input_tensor, x]) output_tensor = tf.keras.layers.Activation(", "input tensor. architecture : string The ResNet architecture in {'resnet50',", "+ '_branch' x = tf.keras.layers.Conv2D( num_filters_1, (1,1), strides, name=conv_prefix +", "---------- input_tensor : tf tensor, [batch_size, height, width, channels] An", "= tf.keras.layers.BatchNormalization( name=bn_prefix + '2b')(x, training=train_bn) x = tf.keras.layers.Activation('relu')(x) x", "block : string A lowercase letter, used for generating layer", "names. block : string A lowercase letter, used for generating", "(ResNet) \"\"\" # adapted from # https://github.com/fchollet/deep-learning-models/blob/master/resnet50.py import tensorflow as", "[512,512,2048], stage=5, block='c', train_bn=train_bn) else: C5 = None return [C1,", "input. Returns ------- output_tensor : tf tensor [batch_size, height//strides, width//strides,", "default is False, i.e., use the moving average of mean", "\\ 'Only support ResNet50\\101' # stage 1 x = tf.keras.layers.ZeroPadding2D((3,3))(input_image)", "outputs : list Feature maps at each stage. \"\"\" assert", "width//strides, num_filters_3] where num_filters_3 is the last number in filters,", "number is equal to input_tensor's channels. stage : integer A", "one should normalize the layer input by the mean and", "x = tf.keras.layers.BatchNormalization( name=bn_prefix + '2c')(x, training=train_bn) shortcut = tf.keras.layers.Conv2D(", "stage. \"\"\" assert architecture in ['resnet50', 'resnet101'], \\ 'Only support", "x = tf.keras.layers.ZeroPadding2D((3,3))(input_image) x = tf.keras.layers.Conv2D(64, (7,7), (2,2), name='conv1')(x) x", "number in filters, the output tensor. \"\"\" num_filters_1, num_filters_2, num_filters_3", "num_filters_3 is the last number in filters, the output tensor.", "default is (2, 2). train_bn : boolean, optional Whether one", "support ResNet50\\101' # stage 1 x = tf.keras.layers.ZeroPadding2D((3,3))(input_image) x =", "stage 5 if stage5: x = conv_block(x, [512,512,2048], stage=5, block='a',", "path, where last number is equal to input_tensor's channels. stage", "+ block + '_branch' x = tf.keras.layers.Conv2D( num_filters_1, (1,1), strides,", "shortcut = tf.keras.layers.BatchNormalization( name=bn_prefix + '1')(shortcut, training=train_bn) x = tf.keras.layers.Add()([shortcut,", "= conv_block(x, [256,256,1024], stage=4, block='a', train_bn=train_bn) num_blocks = {'resnet50':5, 'resnet101':22}[architecture]", "= identity_block( x, [64,64,256], stage=2, block='c', train_bn=train_bn) # stage 3", "a bottleneck block of a ResNet. Parameters ---------- input_tensor :", "training=train_bn) x = tf.keras.layers.Activation('relu')(x) x = tf.keras.layers.Conv2D( num_filters_3, (1,1), name=conv_prefix", "= tf.keras.layers.BatchNormalization( name=bn_prefix + '2c')(x, training=train_bn) shortcut = tf.keras.layers.Conv2D( num_filters_3,", "tf tensor, [batch_size, height, width, channels] An input tensor. architecture", "= identity_block( x, [128,128,512], stage=3, block='d', train_bn=train_bn) # stage 4", "range(num_blocks): x = identity_block( x, [256,256,1024], stage=4, block=chr(98+i), train_bn=train_bn) C4", "main path, where last number is equal to input_tensor's channels.", "is equal to input_tensor's channels. stage : integer A number", "conv layer strides. The default is (2, 2). train_bn :", "+ '2c')(x) x = tf.keras.layers.BatchNormalization( name=bn_prefix + '2c')(x, training=train_bn) x", "tf.keras.layers.Conv2D( num_filters_3, (1,1), name=conv_prefix + '2c')(x) x = tf.keras.layers.BatchNormalization( name=bn_prefix", "x = tf.keras.layers.Conv2D( num_filters_2, (3,3), padding='same', name=conv_prefix + '2b')(x) x", "normalize the layer input. Returns ------- outputs : list Feature", "x, [512,512,2048], stage=5, block='b', train_bn=train_bn) C5 = x = identity_block(", "bottleneck building block of a ResNet. Parameters ---------- input_tensor :", "+ block + '_out')(x) return output_tensor def backbone_resnet(input_image, architecture, stage5=True,", "[batch_size, height//strides, width//strides, num_filters_3] where num_filters_3 is the last number", "'2c')(x, training=train_bn) x = tf.keras.layers.Add()([input_tensor, x]) output_tensor = tf.keras.layers.Activation( 'relu',", "'2a')(x, training=train_bn) x = tf.keras.layers.Activation('relu')(x) x = tf.keras.layers.Conv2D( num_filters_2, (3,3),", "3 conv layers at the main path, where last number", "the output tensor. \"\"\" num_filters_1, num_filters_2, num_filters_3 = filters conv_prefix", "filters conv_prefix = 'res' + str(stage) + block + '_branch'", "ResNet. Parameters ---------- input_image : tf tensor, [batch_size, height, width,", "train_bn=train_bn) C5 = x = identity_block( x, [512,512,2048], stage=5, block='c',", "stage=4, block=chr(98+i), train_bn=train_bn) C4 = x # stage 5 if", "variance over the current batch. The default is False, i.e.,", "https://github.com/fchollet/deep-learning-models/blob/master/resnet50.py import tensorflow as tf def identity_block( input_tensor, filters, stage,", "\"\"\" # adapted from # https://github.com/fchollet/deep-learning-models/blob/master/resnet50.py import tensorflow as tf", "number in [2,5] used for generating layer names. block :", "The default is (2, 2). train_bn : boolean, optional Whether", ": boolean, optional Whether create stage5 of network. The default", "stage=5, block='c', train_bn=train_bn) else: C5 = None return [C1, C2,", "layer input. Returns ------- output_tensor : tf tensor [batch_size, height//strides,", "a ResNet. Parameters ---------- input_tensor : tf tensor, [batch_size, height,", "name=bn_prefix + '2c')(x, training=train_bn) x = tf.keras.layers.Add()([input_tensor, x]) output_tensor =", "tf.keras.layers.BatchNormalization( name=bn_prefix + '2c')(x, training=train_bn) x = tf.keras.layers.Add()([input_tensor, x]) output_tensor", "block, strides=(2, 2), train_bn=False ): \"\"\" Builds a projection shortcut", "shape as input_tensor. \"\"\" num_filters_1, num_filters_2, num_filters_3 = filters conv_prefix", "[128,128,512], stage=3, block='c', train_bn=train_bn) C3 = x = identity_block( x,", "+ '2b')(x) x = tf.keras.layers.BatchNormalization( name=bn_prefix + '2b')(x, training=train_bn) x", "block='c', train_bn=train_bn) # stage 3 x = conv_block(x, [128,128,512], stage=3,", "Returns ------- output_tensor : tf tensor [batch_size, height//strides, width//strides, num_filters_3]", ": tuple, integers, optional The conv layer strides. The default", "input by the mean and variance over the current batch.", "# https://github.com/fchollet/deep-learning-models/blob/master/resnet50.py import tensorflow as tf def identity_block( input_tensor, filters,", "used for generating layer names. train_bn : boolean, optional Whether", "the main path, where last number is equal to input_tensor's", "name=conv_prefix + '1')(input_tensor) shortcut = tf.keras.layers.BatchNormalization( name=bn_prefix + '1')(shortcut, training=train_bn)", "'_branch' x = tf.keras.layers.Conv2D( num_filters_1, (1,1), name=conv_prefix + '2a')(input_tensor) x", "by the mean and variance over the current batch. The", "training=train_bn) x = tf.keras.layers.Add()([input_tensor, x]) output_tensor = tf.keras.layers.Activation( 'relu', name='res'", "conv layers at the main path, where last number is", "input_tensor, filters, stage, block, train_bn=False ): \"\"\" Builds an identity", "stage 2 x = conv_block( x, [64,64,256], stage=2, block='a', strides=(1,1),", "training=train_bn) x = tf.keras.layers.Add()([shortcut, x]) output_tensor = tf.keras.layers.Activation( 'relu', name='res'", "x = conv_block(x, [256,256,1024], stage=4, block='a', train_bn=train_bn) num_blocks = {'resnet50':5,", "the main path. stage : integer A number in [2,5]", ": tf tensor, [batch_size, height, width, channels] An input tensor.", "stage : integer A number in [2,5] used for generating", "variance to normalize the layer input. Returns ------- outputs :", "integer A number in [2,5] used for generating layer names.", "last number in filters, the output tensor. \"\"\" num_filters_1, num_filters_2,", "(1,1), strides, name=conv_prefix + '1')(input_tensor) shortcut = tf.keras.layers.BatchNormalization( name=bn_prefix +", "stage=2, block='a', strides=(1,1), train_bn=train_bn) x = identity_block(x, [64,64,256], stage=2, block='b',", "identity_block( x, [64,64,256], stage=2, block='c', train_bn=train_bn) # stage 3 x", "the layer input by the mean and variance over the", "letter, used for generating layer names. train_bn : boolean, optional", "input_image : tf tensor, [batch_size, height, width, channels] An input", "num_filters_1, (1,1), name=conv_prefix + '2a')(input_tensor) x = tf.keras.layers.BatchNormalization( name=bn_prefix +", "x = identity_block(x, [64,64,256], stage=2, block='b', train_bn=train_bn) C2 = x", "optional Whether create stage5 of network. The default is True.", "tf tensor, [batch_size, height, width, channels] The output tensor same", "block='a', train_bn=train_bn) x = identity_block( x, [512,512,2048], stage=5, block='b', train_bn=train_bn)", "tensor same shape as input_tensor. \"\"\" num_filters_1, num_filters_2, num_filters_3 =", "+ block + '_branch' x = tf.keras.layers.Conv2D( num_filters_1, (1,1), name=conv_prefix", ": tf tensor [batch_size, height//strides, width//strides, num_filters_3] where num_filters_3 is", "shortcut in a bottleneck building block of a ResNet. Parameters", "[64,64,256], stage=2, block='a', strides=(1,1), train_bn=train_bn) x = identity_block(x, [64,64,256], stage=2,", "= conv_block(x, [128,128,512], stage=3, block='a', train_bn=train_bn) x = identity_block(x, [128,128,512],", "from # https://github.com/fchollet/deep-learning-models/blob/master/resnet50.py import tensorflow as tf def identity_block( input_tensor,", "i in range(num_blocks): x = identity_block( x, [256,256,1024], stage=4, block=chr(98+i),", "tf.keras.layers.Conv2D(64, (7,7), (2,2), name='conv1')(x) x = tf.keras.layers.BatchNormalization(name='bn_conv1')(x, training=train_bn) x =", "Builds a backbone ResNet. Parameters ---------- input_image : tf tensor,", "[128,128,512], stage=3, block='a', train_bn=train_bn) x = identity_block(x, [128,128,512], stage=3, block='b',", "ResNet50\\101' # stage 1 x = tf.keras.layers.ZeroPadding2D((3,3))(input_image) x = tf.keras.layers.Conv2D(64,", "= tf.keras.layers.Add()([shortcut, x]) output_tensor = tf.keras.layers.Activation( 'relu', name='res' + str(stage)", "block of a ResNet. Parameters ---------- input_tensor : tf tensor,", "a backbone ResNet. Parameters ---------- input_image : tf tensor, [batch_size,", "filters in 3 conv layers at the main path, where", "stage 3 x = conv_block(x, [128,128,512], stage=3, block='a', train_bn=train_bn) x", "x = conv_block(x, [128,128,512], stage=3, block='a', train_bn=train_bn) x = identity_block(x,", "stage5 of network. The default is True. train_bn : boolean,", "[batch_size, height, width, channels] The output tensor same shape as", "train_bn=False): \"\"\" Builds a backbone ResNet. Parameters ---------- input_image :", "2), train_bn=False ): \"\"\" Builds a projection shortcut in a", "'_out')(x) return output_tensor def conv_block( input_tensor, filters, stage, block, strides=(2,", ": tf tensor, [batch_size, height, width, channels] The output tensor", "+ block + '_out')(x) return output_tensor def conv_block( input_tensor, filters,", "name=bn_prefix + '1')(shortcut, training=train_bn) x = tf.keras.layers.Add()([shortcut, x]) output_tensor =", "architecture in ['resnet50', 'resnet101'], \\ 'Only support ResNet50\\101' # stage", "lowercase letter, used for generating layer names. strides : tuple,", "tensor. \"\"\" num_filters_1, num_filters_2, num_filters_3 = filters conv_prefix = 'res'", "layer input. Returns ------- output_tensor : tf tensor, [batch_size, height,", "shortcut = tf.keras.layers.Conv2D( num_filters_3, (1,1), strides, name=conv_prefix + '1')(input_tensor) shortcut", "for generating layer names. train_bn : boolean, optional Whether one", "num_filters_3] where num_filters_3 is the last number in filters, the", "tf.keras.layers.BatchNormalization( name=bn_prefix + '2c')(x, training=train_bn) shortcut = tf.keras.layers.Conv2D( num_filters_3, (1,1),", "strides, name=conv_prefix + '1')(input_tensor) shortcut = tf.keras.layers.BatchNormalization( name=bn_prefix + '1')(shortcut,", "= tf.keras.layers.Activation('relu')(x) x = tf.keras.layers.Conv2D( num_filters_3, (1,1), name=conv_prefix + '2c')(x)", "width, channels] An input tensor. architecture : string The ResNet", "output tensor. \"\"\" num_filters_1, num_filters_2, num_filters_3 = filters conv_prefix =", "x, [128,128,512], stage=3, block='d', train_bn=train_bn) # stage 4 x =", "[512,512,2048], stage=5, block='b', train_bn=train_bn) C5 = x = identity_block( x,", "num_blocks = {'resnet50':5, 'resnet101':22}[architecture] for i in range(num_blocks): x =", "input. Returns ------- output_tensor : tf tensor, [batch_size, height, width,", "x = identity_block( x, [64,64,256], stage=2, block='c', train_bn=train_bn) # stage", "identity_block( x, [512,512,2048], stage=5, block='c', train_bn=train_bn) else: C5 = None", "tensor, [batch_size, height, width, channels] An input tensor. architecture :", "+ '_branch' bn_prefix = 'bn' + str(stage) + block +", "a bottleneck building block of a ResNet. Parameters ---------- input_tensor", "tf.keras.layers.Conv2D( num_filters_1, (1,1), name=conv_prefix + '2a')(input_tensor) x = tf.keras.layers.BatchNormalization( name=bn_prefix", "generating layer names. block : string A lowercase letter, used", "as input_tensor. \"\"\" num_filters_1, num_filters_2, num_filters_3 = filters conv_prefix =", "2). train_bn : boolean, optional Whether one should normalize the", "at the main path. stage : integer A number in", "x = identity_block( x, [128,128,512], stage=3, block='d', train_bn=train_bn) # stage", "for generating layer names. strides : tuple, integers, optional The", "block + '_branch' x = tf.keras.layers.Conv2D( num_filters_1, (1,1), name=conv_prefix +", "ResNet architecture in {'resnet50', 'resnet101'}. stage5 : boolean, optional Whether", "letter, used for generating layer names. strides : tuple, integers,", "3 x = conv_block(x, [128,128,512], stage=3, block='a', train_bn=train_bn) x =", "x]) output_tensor = tf.keras.layers.Activation( 'relu', name='res' + str(stage) + block", "in a bottleneck building block of a ResNet. Parameters ----------", "to normalize the layer input. Returns ------- output_tensor : tf", "# stage 5 if stage5: x = conv_block(x, [512,512,2048], stage=5,", "the moving average of mean and variance to normalize the", "str(stage) + block + '_branch' x = tf.keras.layers.Conv2D( num_filters_1, (1,1),", "input_tensor : tf tensor, [batch_size, height, width, channels] An input", "An input tensor. architecture : string The ResNet architecture in", "'1')(shortcut, training=train_bn) x = tf.keras.layers.Add()([shortcut, x]) output_tensor = tf.keras.layers.Activation( 'relu',", "block + '_out')(x) return output_tensor def backbone_resnet(input_image, architecture, stage5=True, train_bn=False):", "create stage5 of network. The default is True. train_bn :", "filters, stage, block, strides=(2, 2), train_bn=False ): \"\"\" Builds a", "tf.keras.layers.Add()([shortcut, x]) output_tensor = tf.keras.layers.Activation( 'relu', name='res' + str(stage) +", "the layer input. Returns ------- outputs : list Feature maps", "mean and variance to normalize the layer input. Returns -------", "tf tensor, [batch_size, height, width, channels] An input tensor. filters", "tf.keras.layers.Activation('relu')(x) C1 = x = tf.keras.layers.MaxPooling2D((3,3), (2,2), padding='same')(x) # stage", "block='a', train_bn=train_bn) num_blocks = {'resnet50':5, 'resnet101':22}[architecture] for i in range(num_blocks):", "and variance to normalize the layer input. Returns ------- output_tensor", "= tf.keras.layers.BatchNormalization( name=bn_prefix + '2c')(x, training=train_bn) x = tf.keras.layers.Add()([input_tensor, x])", "is the last number in filters, the output tensor. \"\"\"", "tensor, [batch_size, height, width, channels] The output tensor same shape", "(1,1), name=conv_prefix + '2c')(x) x = tf.keras.layers.BatchNormalization( name=bn_prefix + '2c')(x,", "maps at each stage. \"\"\" assert architecture in ['resnet50', 'resnet101'],", "= tf.keras.layers.BatchNormalization( name=bn_prefix + '1')(shortcut, training=train_bn) x = tf.keras.layers.Add()([shortcut, x])", "normalize the layer input. Returns ------- output_tensor : tf tensor,", "train_bn=train_bn) C2 = x = identity_block( x, [64,64,256], stage=2, block='c',", "stage=3, block='d', train_bn=train_bn) # stage 4 x = conv_block(x, [256,256,1024],", "\"\"\" Residual Networks (ResNet) \"\"\" # adapted from # https://github.com/fchollet/deep-learning-models/blob/master/resnet50.py", "+ '2c')(x, training=train_bn) shortcut = tf.keras.layers.Conv2D( num_filters_3, (1,1), strides, name=conv_prefix", "\"\"\" num_filters_1, num_filters_2, num_filters_3 = filters conv_prefix = 'res' +", "): \"\"\" Builds an identity shortcut in a bottleneck building", "+ '_out')(x) return output_tensor def conv_block( input_tensor, filters, stage, block,", "conv_block(x, [512,512,2048], stage=5, block='a', train_bn=train_bn) x = identity_block( x, [512,512,2048],", "= conv_block( x, [64,64,256], stage=2, block='a', strides=(1,1), train_bn=train_bn) x =", "C1 = x = tf.keras.layers.MaxPooling2D((3,3), (2,2), padding='same')(x) # stage 2", "x = identity_block( x, [512,512,2048], stage=5, block='c', train_bn=train_bn) else: C5", "block='b', train_bn=train_bn) x = identity_block(x, [128,128,512], stage=3, block='c', train_bn=train_bn) C3", "------- outputs : list Feature maps at each stage. \"\"\"", "return output_tensor def conv_block( input_tensor, filters, stage, block, strides=(2, 2),", "identity_block(x, [64,64,256], stage=2, block='b', train_bn=train_bn) C2 = x = identity_block(", "= tf.keras.layers.Conv2D( num_filters_2, (3,3), padding='same', name=conv_prefix + '2b')(x) x =", "tf.keras.layers.Activation( 'relu', name='res' + str(stage) + block + '_out')(x) return", "output_tensor = tf.keras.layers.Activation( 'relu', name='res' + str(stage) + block +", "block='c', train_bn=train_bn) else: C5 = None return [C1, C2, C3,", "is True. train_bn : boolean, optional Whether one should normalize", "channels] An input tensor. filters : list, positive integers The", "x = tf.keras.layers.Activation('relu')(x) x = tf.keras.layers.Conv2D( num_filters_2, (3,3), padding='same', name=conv_prefix", "x = tf.keras.layers.Activation('relu')(x) x = tf.keras.layers.Conv2D( num_filters_3, (1,1), name=conv_prefix +", "C2 = x = identity_block( x, [64,64,256], stage=2, block='c', train_bn=train_bn)", "x # stage 5 if stage5: x = conv_block(x, [512,512,2048],", "(1,1), strides, name=conv_prefix + '2a')(input_tensor) x = tf.keras.layers.BatchNormalization( name=bn_prefix +", "+ '_out')(x) return output_tensor def backbone_resnet(input_image, architecture, stage5=True, train_bn=False): \"\"\"", "'bn' + str(stage) + block + '_branch' x = tf.keras.layers.Conv2D(", "assert architecture in ['resnet50', 'resnet101'], \\ 'Only support ResNet50\\101' #", "channels] The output tensor same shape as input_tensor. \"\"\" num_filters_1,", "\"\"\" Builds a backbone ResNet. Parameters ---------- input_image : tf", "train_bn=train_bn) x = identity_block( x, [512,512,2048], stage=5, block='b', train_bn=train_bn) C5", "= identity_block( x, [256,256,1024], stage=4, block=chr(98+i), train_bn=train_bn) C4 = x", "x = identity_block(x, [128,128,512], stage=3, block='b', train_bn=train_bn) x = identity_block(x,", "+ '2a')(input_tensor) x = tf.keras.layers.BatchNormalization( name=bn_prefix + '2a')(x, training=train_bn) x", "is (2, 2). train_bn : boolean, optional Whether one should", "= tf.keras.layers.Conv2D( num_filters_3, (1,1), strides, name=conv_prefix + '1')(input_tensor) shortcut =", "architecture in {'resnet50', 'resnet101'}. stage5 : boolean, optional Whether create", "in 3 conv layers at the main path, where last", "at each stage. \"\"\" assert architecture in ['resnet50', 'resnet101'], \\", "= identity_block(x, [128,128,512], stage=3, block='b', train_bn=train_bn) x = identity_block(x, [128,128,512],", "num_filters_3, (1,1), name=conv_prefix + '2c')(x) x = tf.keras.layers.BatchNormalization( name=bn_prefix +", "The ResNet architecture in {'resnet50', 'resnet101'}. stage5 : boolean, optional", "x = tf.keras.layers.BatchNormalization( name=bn_prefix + '2a')(x, training=train_bn) x = tf.keras.layers.Activation('relu')(x)", "padding='same', name=conv_prefix + '2b')(x) x = tf.keras.layers.BatchNormalization( name=bn_prefix + '2b')(x,", "adapted from # https://github.com/fchollet/deep-learning-models/blob/master/resnet50.py import tensorflow as tf def identity_block(", "'2b')(x) x = tf.keras.layers.BatchNormalization( name=bn_prefix + '2b')(x, training=train_bn) x =", "filters : list, positive integers The number of filters in", ": list Feature maps at each stage. \"\"\" assert architecture", "= identity_block(x, [128,128,512], stage=3, block='c', train_bn=train_bn) C3 = x =", "block='c', train_bn=train_bn) C3 = x = identity_block( x, [128,128,512], stage=3,", "\"\"\" assert architecture in ['resnet50', 'resnet101'], \\ 'Only support ResNet50\\101'", "building block of a ResNet. Parameters ---------- input_tensor : tf", "[128,128,512], stage=3, block='b', train_bn=train_bn) x = identity_block(x, [128,128,512], stage=3, block='c',", "= tf.keras.layers.Activation('relu')(x) C1 = x = tf.keras.layers.MaxPooling2D((3,3), (2,2), padding='same')(x) #", "block, train_bn=False ): \"\"\" Builds an identity shortcut in a", "'_branch' x = tf.keras.layers.Conv2D( num_filters_1, (1,1), strides, name=conv_prefix + '2a')(input_tensor)", "'Only support ResNet50\\101' # stage 1 x = tf.keras.layers.ZeroPadding2D((3,3))(input_image) x", "input_tensor's channels. stage : integer A number in [2,5] used", "train_bn=train_bn) x = identity_block(x, [64,64,256], stage=2, block='b', train_bn=train_bn) C2 =", "x = identity_block( x, [512,512,2048], stage=5, block='b', train_bn=train_bn) C5 =", "in filters, the output tensor. \"\"\" num_filters_1, num_filters_2, num_filters_3 =", "conv_block(x, [256,256,1024], stage=4, block='a', train_bn=train_bn) num_blocks = {'resnet50':5, 'resnet101':22}[architecture] for", "training=train_bn) shortcut = tf.keras.layers.Conv2D( num_filters_3, (1,1), strides, name=conv_prefix + '1')(input_tensor)", "x = tf.keras.layers.Conv2D( num_filters_1, (1,1), strides, name=conv_prefix + '2a')(input_tensor) x", "average of mean and variance to normalize the layer input.", "[64,64,256], stage=2, block='c', train_bn=train_bn) # stage 3 x = conv_block(x,", "of filters in 3 conv layers at the main path,", "a projection shortcut in a bottleneck block of a ResNet.", "{'resnet50', 'resnet101'}. stage5 : boolean, optional Whether create stage5 of", "Returns ------- outputs : list Feature maps at each stage.", "output_tensor : tf tensor [batch_size, height//strides, width//strides, num_filters_3] where num_filters_3", "train_bn=False ): \"\"\" Builds an identity shortcut in a bottleneck", "variance to normalize the layer input. Returns ------- output_tensor :", "equal to input_tensor's channels. stage : integer A number in", "stage5 : boolean, optional Whether create stage5 of network. The", "'2c')(x) x = tf.keras.layers.BatchNormalization( name=bn_prefix + '2c')(x, training=train_bn) x =", "block='b', train_bn=train_bn) C5 = x = identity_block( x, [512,512,2048], stage=5,", "------- output_tensor : tf tensor [batch_size, height//strides, width//strides, num_filters_3] where", "tf.keras.layers.BatchNormalization(name='bn_conv1')(x, training=train_bn) x = tf.keras.layers.Activation('relu')(x) C1 = x = tf.keras.layers.MaxPooling2D((3,3),", "strides=(1,1), train_bn=train_bn) x = identity_block(x, [64,64,256], stage=2, block='b', train_bn=train_bn) C2", "'2a')(input_tensor) x = tf.keras.layers.BatchNormalization( name=bn_prefix + '2a')(x, training=train_bn) x =", "Whether one should normalize the layer input by the mean", "architecture, stage5=True, train_bn=False): \"\"\" Builds a backbone ResNet. Parameters ----------", "x = conv_block( x, [64,64,256], stage=2, block='a', strides=(1,1), train_bn=train_bn) x", "and variance over the current batch. The default is False,", "channels. stage : integer A number in [2,5] used for", "\"\"\" Builds an identity shortcut in a bottleneck building block", "strides. The default is (2, 2). train_bn : boolean, optional", "block + '_out')(x) return output_tensor def conv_block( input_tensor, filters, stage,", "The output tensor same shape as input_tensor. \"\"\" num_filters_1, num_filters_2,", "'2c')(x, training=train_bn) shortcut = tf.keras.layers.Conv2D( num_filters_3, (1,1), strides, name=conv_prefix +", "(3,3), padding='same', name=conv_prefix + '2b')(x) x = tf.keras.layers.BatchNormalization( name=bn_prefix +", "tf.keras.layers.BatchNormalization( name=bn_prefix + '2a')(x, training=train_bn) x = tf.keras.layers.Activation('relu')(x) x =", "name=conv_prefix + '2b')(x) x = tf.keras.layers.BatchNormalization( name=bn_prefix + '2b')(x, training=train_bn)", "= x = identity_block( x, [512,512,2048], stage=5, block='c', train_bn=train_bn) else:", "[batch_size, height, width, channels] An input tensor. architecture : string", "= x = tf.keras.layers.MaxPooling2D((3,3), (2,2), padding='same')(x) # stage 2 x", "used for generating layer names. block : string A lowercase", "num_filters_3 = filters conv_prefix = 'res' + str(stage) + block", "optional Whether one should normalize the layer input by the", "4 x = conv_block(x, [256,256,1024], stage=4, block='a', train_bn=train_bn) num_blocks =", "normalize the layer input by the mean and variance over", "Builds an identity shortcut in a bottleneck building block of", "x, [512,512,2048], stage=5, block='c', train_bn=train_bn) else: C5 = None return", "train_bn=train_bn) else: C5 = None return [C1, C2, C3, C4,", "same shape as input_tensor. \"\"\" num_filters_1, num_filters_2, num_filters_3 = filters", "x = tf.keras.layers.MaxPooling2D((3,3), (2,2), padding='same')(x) # stage 2 x =", "train_bn=train_bn) num_blocks = {'resnet50':5, 'resnet101':22}[architecture] for i in range(num_blocks): x", "The number of filters in 3 conv layers at the", "stage=3, block='c', train_bn=train_bn) C3 = x = identity_block( x, [128,128,512],", "stage5: x = conv_block(x, [512,512,2048], stage=5, block='a', train_bn=train_bn) x =", "filters, the output tensor. \"\"\" num_filters_1, num_filters_2, num_filters_3 = filters", "stage=5, block='a', train_bn=train_bn) x = identity_block( x, [512,512,2048], stage=5, block='b',", "+ '2a')(x, training=train_bn) x = tf.keras.layers.Activation('relu')(x) x = tf.keras.layers.Conv2D( num_filters_2,", "'2c')(x) x = tf.keras.layers.BatchNormalization( name=bn_prefix + '2c')(x, training=train_bn) shortcut =", "x = tf.keras.layers.BatchNormalization( name=bn_prefix + '2b')(x, training=train_bn) x = tf.keras.layers.Activation('relu')(x)", "= tf.keras.layers.Add()([input_tensor, x]) output_tensor = tf.keras.layers.Activation( 'relu', name='res' + str(stage)", "of a ResNet. Parameters ---------- input_tensor : tf tensor, [batch_size,", "num_filters_3, (1,1), strides, name=conv_prefix + '1')(input_tensor) shortcut = tf.keras.layers.BatchNormalization( name=bn_prefix", "tensor. filters : list, positive integers The number of filters", "x = tf.keras.layers.Conv2D( num_filters_1, (1,1), name=conv_prefix + '2a')(input_tensor) x =", "x = tf.keras.layers.Add()([shortcut, x]) output_tensor = tf.keras.layers.Activation( 'relu', name='res' +", "= tf.keras.layers.Activation( 'relu', name='res' + str(stage) + block + '_out')(x)", "# stage 4 x = conv_block(x, [256,256,1024], stage=4, block='a', train_bn=train_bn)", "= tf.keras.layers.BatchNormalization( name=bn_prefix + '2a')(x, training=train_bn) x = tf.keras.layers.Activation('relu')(x) x", "to input_tensor's channels. stage : integer A number in [2,5]", "(2, 2). train_bn : boolean, optional Whether one should normalize", "generating layer names. train_bn : boolean, optional Whether one should", "layers at the main path. stage : integer A number", "Returns ------- output_tensor : tf tensor, [batch_size, height, width, channels]", "The conv layer strides. The default is (2, 2). train_bn", "of filters in 3 conv layers at the main path." ]
[ "parses the fields in line to generate json structure \"\"\"", "fields in line to generate json structure \"\"\" expected_min_no_fields =", "\"\"\" expected_min_no_fields = 5 if len(line) < expected_min_no_fields: raise LineParserException('line", "LineParserException('Only able to parse times in UTC. You gave {}'.format(timezone))", "Parser.__init__(self) def compose_timestamp(self, datetime, timezone): if len(datetime) != 27: raise", "= 5 if len(line) < expected_min_no_fields: raise LineParserException('line too short')", "__init__(self): Parser.__init__(self) def compose_timestamp(self, datetime, timezone): if len(datetime) != 27:", "parse(self, line): \"\"\" parses the fields in line to generate", "gave {}'.format(timezone)) return datetime def parse(self, line): \"\"\" parses the", "able to parse times in UTC. You gave {}'.format(timezone)) return", "compose_timestamp(self, datetime, timezone): if len(datetime) != 27: raise LineParserException('wrong length", "{}'.format(timezone)) return datetime def parse(self, line): \"\"\" parses the fields", "datetime - wrong date is: ' + datetime) if not", "length of datetime - wrong date is: ' + datetime)", "return { '@timestamp': self.compose_timestamp(line[0], timezone), 'log_level': log_level, 'process_id': int(pid), 'message':", "line[2].lstrip(\"[\").rstrip(\"]\") timezone = 'UTC' return { '@timestamp': self.compose_timestamp(line[0], timezone), 'log_level':", "wrong date is: ' + datetime) if not timezone ==", "def parse(self, line): \"\"\" parses the fields in line to", "raise LineParserException('Only able to parse times in UTC. You gave", "\"\"\" parses the fields in line to generate json structure", "generate json structure \"\"\" expected_min_no_fields = 5 if len(line) <", "times in UTC. You gave {}'.format(timezone)) return datetime def parse(self,", "line): \"\"\" parses the fields in line to generate json", "' + datetime) if not timezone == 'UTC': raise LineParserException('Only", "LineParserException('wrong length of datetime - wrong date is: ' +", "'@timestamp': self.compose_timestamp(line[0], timezone), 'log_level': log_level, 'process_id': int(pid), 'message': ' '.join(map(str,", "import Parser, LineParserException class Mysql57(Parser): def __init__(self): Parser.__init__(self) def compose_timestamp(self,", "5 if len(line) < expected_min_no_fields: raise LineParserException('line too short') pid", "expected_min_no_fields: raise LineParserException('line too short') pid = line[1] log_level =", "timezone), 'log_level': log_level, 'process_id': int(pid), 'message': ' '.join(map(str, line[3:])) }", "if len(line) < expected_min_no_fields: raise LineParserException('line too short') pid =", "datetime def parse(self, line): \"\"\" parses the fields in line", "UTC. You gave {}'.format(timezone)) return datetime def parse(self, line): \"\"\"", "== 'UTC': raise LineParserException('Only able to parse times in UTC.", "timezone = 'UTC' return { '@timestamp': self.compose_timestamp(line[0], timezone), 'log_level': log_level,", "Parser, LineParserException class Mysql57(Parser): def __init__(self): Parser.__init__(self) def compose_timestamp(self, datetime,", "line[1] log_level = line[2].lstrip(\"[\").rstrip(\"]\") timezone = 'UTC' return { '@timestamp':", "datetime, timezone): if len(datetime) != 27: raise LineParserException('wrong length of", "raise LineParserException('line too short') pid = line[1] log_level = line[2].lstrip(\"[\").rstrip(\"]\")", "'UTC': raise LineParserException('Only able to parse times in UTC. You", "parse times in UTC. You gave {}'.format(timezone)) return datetime def", "in line to generate json structure \"\"\" expected_min_no_fields = 5", "log_level = line[2].lstrip(\"[\").rstrip(\"]\") timezone = 'UTC' return { '@timestamp': self.compose_timestamp(line[0],", "timezone == 'UTC': raise LineParserException('Only able to parse times in", "return datetime def parse(self, line): \"\"\" parses the fields in", "too short') pid = line[1] log_level = line[2].lstrip(\"[\").rstrip(\"]\") timezone =", "{ '@timestamp': self.compose_timestamp(line[0], timezone), 'log_level': log_level, 'process_id': int(pid), 'message': '", "structure \"\"\" expected_min_no_fields = 5 if len(line) < expected_min_no_fields: raise", "json structure \"\"\" expected_min_no_fields = 5 if len(line) < expected_min_no_fields:", "You gave {}'.format(timezone)) return datetime def parse(self, line): \"\"\" parses", "timezone): if len(datetime) != 27: raise LineParserException('wrong length of datetime", "not timezone == 'UTC': raise LineParserException('Only able to parse times", "is: ' + datetime) if not timezone == 'UTC': raise", "!= 27: raise LineParserException('wrong length of datetime - wrong date", "pid = line[1] log_level = line[2].lstrip(\"[\").rstrip(\"]\") timezone = 'UTC' return", "len(line) < expected_min_no_fields: raise LineParserException('line too short') pid = line[1]", "raise LineParserException('wrong length of datetime - wrong date is: '", "- wrong date is: ' + datetime) if not timezone", "of datetime - wrong date is: ' + datetime) if", "rds_log_cat.parser.parser import Parser, LineParserException class Mysql57(Parser): def __init__(self): Parser.__init__(self) def", "= line[1] log_level = line[2].lstrip(\"[\").rstrip(\"]\") timezone = 'UTC' return {", "27: raise LineParserException('wrong length of datetime - wrong date is:", "LineParserException class Mysql57(Parser): def __init__(self): Parser.__init__(self) def compose_timestamp(self, datetime, timezone):", "self.compose_timestamp(line[0], timezone), 'log_level': log_level, 'process_id': int(pid), 'message': ' '.join(map(str, line[3:]))", "LineParserException('line too short') pid = line[1] log_level = line[2].lstrip(\"[\").rstrip(\"]\") timezone", "if not timezone == 'UTC': raise LineParserException('Only able to parse", "line to generate json structure \"\"\" expected_min_no_fields = 5 if", "def __init__(self): Parser.__init__(self) def compose_timestamp(self, datetime, timezone): if len(datetime) !=", "< expected_min_no_fields: raise LineParserException('line too short') pid = line[1] log_level", "+ datetime) if not timezone == 'UTC': raise LineParserException('Only able", "len(datetime) != 27: raise LineParserException('wrong length of datetime - wrong", "if len(datetime) != 27: raise LineParserException('wrong length of datetime -", "Mysql57(Parser): def __init__(self): Parser.__init__(self) def compose_timestamp(self, datetime, timezone): if len(datetime)", "datetime) if not timezone == 'UTC': raise LineParserException('Only able to", "= 'UTC' return { '@timestamp': self.compose_timestamp(line[0], timezone), 'log_level': log_level, 'process_id':", "to parse times in UTC. You gave {}'.format(timezone)) return datetime", "<reponame>Scout24/rds-log-cat from rds_log_cat.parser.parser import Parser, LineParserException class Mysql57(Parser): def __init__(self):", "short') pid = line[1] log_level = line[2].lstrip(\"[\").rstrip(\"]\") timezone = 'UTC'", "= line[2].lstrip(\"[\").rstrip(\"]\") timezone = 'UTC' return { '@timestamp': self.compose_timestamp(line[0], timezone),", "to generate json structure \"\"\" expected_min_no_fields = 5 if len(line)", "'UTC' return { '@timestamp': self.compose_timestamp(line[0], timezone), 'log_level': log_level, 'process_id': int(pid),", "date is: ' + datetime) if not timezone == 'UTC':", "from rds_log_cat.parser.parser import Parser, LineParserException class Mysql57(Parser): def __init__(self): Parser.__init__(self)", "class Mysql57(Parser): def __init__(self): Parser.__init__(self) def compose_timestamp(self, datetime, timezone): if", "the fields in line to generate json structure \"\"\" expected_min_no_fields", "expected_min_no_fields = 5 if len(line) < expected_min_no_fields: raise LineParserException('line too", "in UTC. You gave {}'.format(timezone)) return datetime def parse(self, line):", "def compose_timestamp(self, datetime, timezone): if len(datetime) != 27: raise LineParserException('wrong" ]
[ "algorithm even when finding edit distance of 1 neighborhoods stop_check", "tierdict, tier_type = tier_type, sequence_type = sequence_type, algorithm = algorithm,", "query, sequence_type, max_distance): w_len = len(getattr(w, sequence_type)) query_len = len(getattr(query,", "+ [str(c) for c in sequence[i+1:]] # deletion for char", "the queried word \"\"\" matches = [] sequence_type = corpus_context.sequence_type", "stop_check is not None and stop_check(): return if call_back is", "corpustools.symbolsim.edit_distance import edit_distance from corpustools.symbolsim.khorsi import khorsi from corpustools.symbolsim.phono_edit_distance import", "# results[str(w)] = [getattr(r, output_format) for r in res[1]] #", "if w_len < query_len-max_distance: return False return edit_distance(getattr(w, sequence_type), getattr(query,", "= '.'.join(query) if '.' not in query else query for", ">= max_distance def neighborhood_density_all_words(corpus_context, tierdict, tier_type = None, sequence_type =", "word found in corpus with the transcription new_query = parse(query,", "can't be found in the corpus new_query = parse(query, trans_delimiter)", "= trans_delimiter.join(candidate) else: cand_str = ''.join(candidate) if cand_str in tierdict:", "not None: call_back('Finding neighbors for {}...'.format(query)) call_back(0,len(corpus_context)) cur = 0", "= sequence_type, algorithm = algorithm, max_distance = max_distance, collapse_homophones =", "[str(c) for c in sequence[:]] + [str(char)] # insertion def", "query_word = Word(**{sequence_type: new_query}) else: # if file contains spelling", ": bool Force use of the less efficient quadratic algorithm", "sequence[i+1:]] # substitution for char in corpus_context.inventory: # final pass", "or None Optional function to check whether to gracefully terminate", "Maximum edit distance from the queried word to consider a", "# call_back(cur) # res = function(w) # results[str(w)] = [getattr(r,", "not None and stop_check(): return if last_value_removed: tierdict[last_key_removed].append(last_value_removed) w_sequence =", "cur = 0 if algorithm == 'edit_distance' and max_distance ==", "'tier': cand_str = trans_delimiter.join(candidate) else: cand_str = ''.join(candidate) if cand_str", "any(getattr(m, sequence_type) == w_sequence for m in matches): continue else:", "algorithm : str The algorithm used to determine distance max_distance", "last_value_removed = None last_key_removed = None if num_cores == -1", "'') query_word = Word(**{sequence_type: list(query_word)}) elif tier_type.att_type == 'tier': if", "= False): \"\"\"Generates all neighbors of edit distance <= 1", "the file can't be found in the corpus new_query =", "optional Optional function to supply progress information during the function", "if stop_check is not None and stop_check(): return if last_value_removed:", "corpus_context, tierdict = tierdict, tier_type = tier_type, sequence_type = sequence_type,", "that word in the corpus is to be referred to.", "= max_distance) for w in corpus_context: if stop_check is not", "manager for a corpus algorithm : str The algorithm used", "in sequence[i+1:]] # substitution for char in corpus_context.inventory: # final", "of words in corpus m: length of query s: size", "corpus and adds them as attributes of the words. Parameters", "in res[1]] # setattr(w.original, settable_attr.name, res[0]-1) # #the -1 is", "sequence[i]]: yield [str(c) for c in sequence[:]] + [str(char)] #", "\"\"\" matches = [] query = ensure_query_is_word(query, corpus_context, corpus_context.sequence_type, tier_type)", ": CorpusContext Context manager for a corpus algorithm : str", "c in sequence] for i in range(len(sequence)): yield [str(c) for", "whether to gracefully terminate early call_back : callable, optional Optional", "in corpus_context.inventory: # final pass to get insertion at len+1", "for r in res[1]] setattr(w.original, settable_attr.name, res[0]) # for w", "'edit_distance': is_neighbor = partial(_is_edit_distance_neighbor, sequence_type = corpus_context.sequence_type, max_distance = max_distance)", "of edit distance <= 1 and searches for them in", "function(w) # results[str(w)] = res[1]#[str(r) for r in res[1]] #", "stop_check, chunk_size= 1) for n in neighbors: #Have to look", "attribute that neighbourhood density results will be assigned to \"\"\"", "all words in the corpus and adds them as attributes", "tier_type, file_type=file_type) for candidate in generate_neighbor_candidates(corpus_context, query, sequence_type): if tier_type.att_type", "max_distance) <= max_distance def _is_phono_edit_distance_neighbor(w, query, sequence_type, specifier, max_distance): return", "tier_type, trans_delimiter='.', file_type=None): if isinstance(query, Word): query_word = query else:", "entry in corpus: corpus_word_with_td = str(getattr(entry, sequence_type)) if query_with̠td ==", "finding edit distance of 1 neighborhoods stop_check : callable, optional", "sequence_type: query_with̠td = '.'.join(query) if '.' not in query else", "sequence_type), sequence_type, specifier) <= max_distance def _is_khorsi_neighbor(w, query, freq_base, sequence_type,", "is not None: call_back('Calculating neighborhood densities...') call_back(0,len(corpus_context)) cur = 0", "Word The word whose neighborhood density to calculate. algorithm :", "to supply progress information during the function Returns ------- tuple(int,", "== sequence_type: query_word = Word(**{sequence_type: list(query)}) else: query_word = query.replace(trans_delimiter,", "from functools import partial from corpustools.corpus.classes import Word from corpustools.symbolsim.edit_distance", "is not None and stop_check(): return if call_back is not", "consider a word a neighbor. stop_check : callable, optional Optional", "in sequence[:i]] + [str(char)] + [str(c) for c in sequence[i:]]", "adds them as attributes of the words. Parameters ---------- corpus_context", "neighbors = score_mp(iterable, function, num_cores, call_back, stop_check, chunk_size= 1) for", "and max_distance == 1 and not force_quadratic: return fast_neighborhood_density(corpus_context, query,", "generate_neighbor_candidates(corpus_context, query, sequence_type): sequence = getattr(query, sequence_type) yield [str(c) for", "return # cur += 1 # call_back(cur) # res =", "as attributes of the words. Parameters ---------- corpus_context : CorpusContext", "= max_distance) elif algorithm == 'khorsi': freq_base = corpus_context.get_frequency_base() is_neighbor", "def generate_neighbor_candidates(corpus_context, query, sequence_type): sequence = getattr(query, sequence_type) yield [str(c)", "= None): function = partial(find_mutation_minpairs, corpus_context, tier_type=tier_type, collapse_homophones = collapse_homophones)", "(1 + s), where n: number of words in corpus", "the queried word to consider a word a neighbor. stop_check", "a corpus query : Word The word whose minimal pairs", "return results def neighborhood_density(corpus_context, query, tierdict, algorithm = 'edit_distance', max_distance", "= False, stop_check = None, call_back = None): \"\"\"Find all", "call_back is not None: call_back('Finding neighbors for {}...'.format(query)) call_back(0,len(corpus_context)) cur", "<= 1 and searches for them in corpus_context. Will be", "Words. \"\"\" matches = [] query = ensure_query_is_word(query, corpus_context, corpus_context.sequence_type,", "# the following should be run if no word found", "= len(getattr(query, sequence_type)) if w_len > query_len+max_distance: return False if", "based only on segment mutations (not deletions/insertions) Parameters ---------- corpus_context", "query_word = query else: if tier_type.att_type == 'spelling': if file_type", "None: call_back('Calculating neighborhood densities...') call_back(0,len(corpus_context)) cur = 0 results =", "_is_edit_distance_neighbor(w, query, sequence_type, max_distance): w_len = len(getattr(w, sequence_type)) query_len =", "query_word = corpus.corpus.find(query) except KeyError: # if the word in", "= tierdict[last_key_removed].pop(i) break res = neighborhood_density(corpus_context, w, tierdict, tier_type =", "in tierdict[cand_str]: w_sequence = getattr(w, sequence_type) if collapse_homophones and any(getattr(word,", "corpus_context.sequence_type, max_distance = max_distance) for w in corpus_context: if stop_check", "easier than fixing the neighbourhood density algorithm else: iterable =", "and searches for them in corpus_context. Will be faster than", "# #the -1 is to account for the fact that", "#multiprocessing pickles objects setattr(corpus_context.corpus.find(corpus_context.corpus.key(n[0])), #corpus_context.attribute.name, n[1][0]) settable_attr.name, n[1][0]) return results", "max_distance) for w in corpus_context: if stop_check is not None", "file_type == sequence_type: query_with̠td = '.'.join(query) if '.' not in", "call_back('Finding neighbors for {}...'.format(query)) call_back(0,len(corpus_context)) cur = 0 if algorithm", "their own neighbour, and this is incorrect # #subtracting 1", "mutations (not deletions/insertions) Parameters ---------- corpus_context : CorpusContext Context manager", "to look up the key, then look up the object", "for c in sequence[i:]] # insertion yield [str(c) for c", "Returns ------- list The found minimal pairs for the queried", "= collapse_homophones) results[str(w)] = res[1] setattr(w.original, corpus_context.attribute.name, res[0]) # for", "tierdict[last_key_removed].pop(i) break res = find_mutation_minpairs(corpus_context, w, tier_type=tier_type, collapse_homophones = collapse_homophones)", "= None, call_back = None): \"\"\"Find all minimal pairs of", "------- tuple(int, set) Tuple of the number of neighbors and", "supply progress information during the function settable_attr: string Name of", "insertion at len+1 if str(char) not in ['#', sequence[i]]: yield", "to \"\"\" function = partial(neighborhood_density, corpus_context, tierdict = tierdict, tier_type", "density algorithm else: iterable = ((w,) for w in corpus_context)", "== 'edit_distance' and max_distance == 1 and not force_quadratic: return", "= query else: if tier_type.att_type == 'spelling': if file_type ==", "function, num_cores, call_back, stop_check, chunk_size = 1) for n in", "of the words. Parameters ---------- corpus_context : CorpusContext Context manager", "a corpus query : Word The word whose neighborhood density", "word based only on segment mutations (not deletions/insertions) Parameters ----------", "function settable_attr: string Name of attribute that neighbourhood density results", "force_quadratic = False, file_type = None, tier_type=None, sequence_type = None,", "words in the corpus and adds them as attributes of", "from corpustools.symbolsim.khorsi import khorsi from corpustools.symbolsim.phono_edit_distance import phono_edit_distance from corpustools.symbolsim.phono_align", "in range(len(sequence)): yield [str(c) for c in sequence[:i]] + [str(c)", "generate_neighbor_candidates(corpus_context, query, sequence_type): if tier_type.att_type == 'tier': cand_str = trans_delimiter.join(candidate)", "= score_mp(iterable, function, num_cores, call_back, stop_check, chunk_size= 1) for n", "consider a word a neighbor force_quadratic : bool Force use", "((w,) for w in corpus_context) neighbors = score_mp(iterable, function, num_cores,", "not None: cur += 1 if cur % 10 ==", "for entry in corpus: corpus_word_with_td = str(getattr(entry, sequence_type)) if query_with̠td", "res[1]] # setattr(w.original, corpus_context.attribute.name, res[0]) else: iterable = ((w,) for", "up the object due to how #multiprocessing pickles objects setattr(corpus_context.corpus.find(corpus_context.corpus.key(n[0])),", "return False return edit_distance(getattr(w, sequence_type), getattr(query, sequence_type), sequence_type, max_distance) <=", "= freq_base, sequence_type = corpus_context.sequence_type, max_distance = max_distance) for w", "last_key_removed = str(w_sequence) for i, item in enumerate(tierdict[last_key_removed]): if str(item)", "sequence_type), getattr(query, sequence_type), sequence_type, max_distance) <= max_distance def _is_phono_edit_distance_neighbor(w, query,", "during the function Returns ------- list The found minimal pairs", "= ((w,) for w in corpus_context) neighbors = score_mp(iterable, function,", "contains spelling try: query_word = corpus.corpus.find(query) except KeyError: # if", "up the key, then look up the object due to", "The algorithm used to determine distance max_distance : float, optional", "collapse_homophones and any(getattr(word, sequence_type) == w_sequence for word in neighbors):", "sequence_type), freq_base, sequence_type, max_distance) >= max_distance def neighborhood_density_all_words(corpus_context, tierdict, tier_type", "to how #multiprocessing pickles objects setattr(corpus_context.corpus.find(corpus_context.corpus.key(n[0])), corpus_context.attribute.name, n[1][0]) return results", "results def neighborhood_density(corpus_context, query, tierdict, algorithm = 'edit_distance', max_distance =", "segment mutations (not deletions/insertions) Parameters ---------- corpus_context : CorpusContext Context", "= [getattr(r, output_format) for r in res[1]] # setattr(w.original, settable_attr.name,", "that words are counted as their own neighbour, and this", "continue else: #matches.append(str(w_sequence)) matches.append(w) matches = [m.spelling for m in", "set of neighbor Words. \"\"\" matches = [] query =", "functools import partial from corpustools.corpus.classes import Word from corpustools.symbolsim.edit_distance import", "<= max_distance def _is_khorsi_neighbor(w, query, freq_base, sequence_type, max_distance): return khorsi(getattr(w,", "sequence_type)) if query_with̠td == corpus_word_with_td: # if a word in", "import phono_edit_distance from corpustools.symbolsim.phono_align import Aligner from corpustools.multiproc import filter_mp,", "found in corpus with the transcription new_query = parse(query, trans_delimiter)", "import khorsi from corpustools.symbolsim.phono_edit_distance import phono_edit_distance from corpustools.symbolsim.phono_align import Aligner", "sequence_type = corpus_context.sequence_type, max_distance = max_distance) for w in corpus_context:", "= algorithm, max_distance = max_distance, collapse_homophones = collapse_homophones) if call_back", "neighbourhood density results will be assigned to \"\"\" function =", "= str(getattr(entry, sequence_type)) if query_with̠td == corpus_word_with_td: # if a", "in neighbors): continue else: neighbors.append(w) return (len(neighbors), neighbors) def generate_neighbor_candidates(corpus_context,", "score_mp(iterable, function, num_cores, call_back, stop_check, chunk_size = 1) for n", "setattr(w.original, settable_attr.name, res[0]) # for w in corpus_context: # if", "= ''.join(candidate) if cand_str in tierdict: for w in tierdict[cand_str]:", "def _is_edit_distance_neighbor(w, query, sequence_type, max_distance): w_len = len(getattr(w, sequence_type)) query_len", "cur = 0 results = dict() last_value_removed = None last_key_removed", "False return edit_distance(getattr(w, sequence_type), getattr(query, sequence_type), sequence_type, max_distance) <= max_distance", "#multiprocessing pickles objects setattr(corpus_context.corpus.find(corpus_context.corpus.key(n[0])), corpus_context.attribute.name, n[1][0]) return results def find_mutation_minpairs(corpus_context,", "(not deletions/insertions) Parameters ---------- corpus_context : CorpusContext Context manager for", "word in the corpus is to be referred to. #", "query else query for entry in corpus: corpus_word_with_td = str(getattr(entry,", "str The algorithm used to determine distance max_distance : float,", "Context manager for a corpus algorithm : str The algorithm", "neighbors = list() query = ensure_query_is_word(query, corpus_context, sequence_type, tier_type, file_type=file_type)", "dict() last_value_removed = None last_key_removed = None if num_cores ==", "ensure_query_is_word(query, corpus_context, sequence_type, tier_type, file_type=file_type) for candidate in generate_neighbor_candidates(corpus_context, query,", "float, optional Maximum edit distance from the queried word to", "(len(neighbors), neighbors) def fast_neighborhood_density(corpus_context, query, sequence_type, tier_type, tierdict, file_type=None, trans_delimiter='.',", "= None, sequence_type = None, algorithm = 'edit_distance', max_distance =", "tier_type) if call_back is not None: call_back('Finding neighbors...') call_back(0,len(corpus_context)) cur", "= parse(query, trans_delimiter) query_word = Word(**{sequence_type: list(new_query)}) return query_word def", "n[1][0]) return results def find_mutation_minpairs(corpus_context, query, tier_type = None, collapse_homophones", "corpus has the same transcription return entry # that word", "if m[-1][-1]['f'] != 1: continue w_sequence = getattr(w, sequence_type) if", "algorithm = 'edit_distance', max_distance = 1, output_format = 'spelling', num_cores", "corpus_context: if stop_check is not None and stop_check(): return if", "file_type == sequence_type: query_word = Word(**{sequence_type: list(query)}) else: query_word =", "of 1 neighborhoods stop_check : callable, optional Optional function to", "corpus_word_with_td: # if a word in corpus has the same", "corpus. Parameters ---------- corpus_context : CorpusContext Context manager for a", "of the query word based only on segment mutations (not", "= algorithm, max_distance = max_distance, collapse_homophones = collapse_homophones) results[str(w)] =", "where n: number of words in corpus m: length of", "counted as their own neighbour, and this is incorrect #", "query_word = Word(**{sequence_type: list(query)}) else: query_word = query.replace(trans_delimiter, '') query_word", "= set(matches)-set([query]) return (len(neighbors), neighbors) def fast_neighborhood_density(corpus_context, query, sequence_type, tier_type,", "= max_distance) elif algorithm == 'phono_edit_distance': is_neighbor = partial(_is_phono_edit_distance_neighbor, specifier", "corpus_context. Will be faster than neighborhood_density when: n > m", "call_back, stop_check, chunk_size = 1) for n in neighbors: #Have", "function = partial(find_mutation_minpairs, corpus_context, tier_type=tier_type, collapse_homophones = collapse_homophones) if call_back", "corpus_context.get_frequency_base() is_neighbor = partial(_is_khorsi_neighbor, freq_base = freq_base, sequence_type = corpus_context.sequence_type,", "-1 or num_cores == 1: for w in corpus_context: if", "= corpus_context.sequence_type query = ensure_query_is_word(query, corpus_context, corpus_context.sequence_type, tier_type) if call_back", "0 results = dict() last_value_removed = None last_key_removed = None", "results def find_mutation_minpairs(corpus_context, query, tier_type = None, collapse_homophones = False,", "max_distance = 1, output_format = 'spelling', num_cores = -1, settable_attr", "_is_khorsi_neighbor(w, query, freq_base, sequence_type, max_distance): return khorsi(getattr(w, sequence_type), getattr(query, sequence_type),", "for c in sequence[i+1:]] # substitution for char in corpus_context.inventory:", "neighbors = score_mp(iterable, function, num_cores, call_back, stop_check, chunk_size = 1)", "neighbor. stop_check : callable, optional Optional function to check whether", "tier_type = None, sequence_type = None, algorithm = 'edit_distance', max_distance", "corpustools.corpus.classes import Word from corpustools.symbolsim.edit_distance import edit_distance from corpustools.symbolsim.khorsi import", "in sequence] for i in range(len(sequence)): yield [str(c) for c", "max_distance def _is_phono_edit_distance_neighbor(w, query, sequence_type, specifier, max_distance): return phono_edit_distance(getattr(w, sequence_type),", "used to determine distance max_distance : float, optional Maximum edit", "w_sequence = getattr(w, sequence_type) query_sequence = getattr(query, sequence_type) if stop_check", "sequence_type, max_distance): return khorsi(getattr(w, sequence_type), getattr(query, sequence_type), freq_base, sequence_type, max_distance)", "pass to get insertion at len+1 if str(char) not in", "sub_penalty=1) for w in corpus_context: w_sequence = getattr(w, sequence_type) query_sequence", "function Returns ------- list The found minimal pairs for the", "max_distance) >= max_distance def neighborhood_density_all_words(corpus_context, tierdict, tier_type = None, sequence_type", "algorithm else: iterable = ((w,) for w in corpus_context) neighbors", "in corpus_context) neighbors = score_mp(iterable, function, num_cores, call_back, stop_check, chunk_size=", "10 == 0: call_back(cur) if (len(w_sequence) > len(query_sequence)+1 or len(w_sequence)", "= Word(**{sequence_type: list(query_word)}) elif tier_type.att_type == 'tier': if file_type ==", "a word in corpus has the same transcription return entry", "in matches): continue else: #matches.append(str(w_sequence)) matches.append(w) matches = [m.spelling for", "results[str(w)] = res[1] setattr(w.original, corpus_context.attribute.name, res[0]) # for w in", "getattr(query, sequence_type) if stop_check is not None and stop_check(): return", "in corpus with the transcription new_query = parse(query, trans_delimiter) query_word", "w in tierdict[cand_str]: w_sequence = getattr(w, sequence_type) if collapse_homophones and", "Optional function to supply progress information during the function Returns", "<= max_distance def _is_phono_edit_distance_neighbor(w, query, sequence_type, specifier, max_distance): return phono_edit_distance(getattr(w,", "= res[1]#[str(r) for r in res[1]] # setattr(w.original, corpus_context.attribute.name, res[0])", "algorithm = algorithm, max_distance = max_distance, collapse_homophones = collapse_homophones) if", "run if no word found in corpus with the transcription", "= getattr(w, sequence_type) query_sequence = getattr(query, sequence_type) if stop_check is", "return phono_edit_distance(getattr(w, sequence_type), getattr(query, sequence_type), sequence_type, specifier) <= max_distance def", "= partial(_is_khorsi_neighbor, freq_base = freq_base, sequence_type = corpus_context.sequence_type, max_distance =", "particular word in the corpus. Parameters ---------- corpus_context : CorpusContext", "in corpus_context.inventory: if str(char) not in ['#', sequence[i]]: yield [str(c)", "= partial(_is_edit_distance_neighbor, sequence_type = corpus_context.sequence_type, max_distance = max_distance) elif algorithm", "= -1, collapse_homophones = False, stop_check = None, call_back =", "[str(char)] # insertion def find_mutation_minpairs_all_words(corpus_context, tierdict, tier_type = None, num_cores", "the corpus. Parameters ---------- corpus_context : CorpusContext Context manager for", "look up the key, then look up the object due", "from corpustools.multiproc import filter_mp, score_mp def _is_edit_distance_neighbor(w, query, sequence_type, max_distance):", "= None if num_cores == -1 or num_cores == 1:", "edit distance from the queried word to consider a word", "call_back(cur) if (len(w_sequence) > len(query_sequence)+1 or len(w_sequence) < len(query_sequence)-1): continue", "as their own neighbour, and this is incorrect # #subtracting", "settable_attr.name, res[0]-1) # #the -1 is to account for the", "res[0]) else: iterable = ((w,) for w in corpus_context) neighbors", "length of query s: size of segment inventory \"\"\" neighbors", "query_with̠td == corpus_word_with_td: # if a word in corpus has", "Word The word whose minimal pairs to find stop_check :", "w in corpus_context: w_sequence = getattr(w, sequence_type) query_sequence = getattr(query,", "file_type=None, trans_delimiter='.', collapse_homophones = False): \"\"\"Generates all neighbors of edit", "query = ensure_query_is_word(query, corpus_context, corpus_context.sequence_type, tier_type) if call_back is not", "The word whose neighborhood density to calculate. algorithm : str", "at len+1 if str(char) not in ['#', sequence[i]]: yield [str(c)", "tier_type, sequence_type = sequence_type, algorithm = algorithm, max_distance = max_distance,", "= None, call_back = None): \"\"\"Calculate the neighborhood density of", "the object due to how #multiprocessing pickles objects setattr(corpus_context.corpus.find(corpus_context.corpus.key(n[0])), #corpus_context.attribute.name,", "to check whether to gracefully terminate early call_back : callable,", "query : Word The word whose neighborhood density to calculate.", "None: cur += 1 if cur % 10 == 0:", "> len(query_sequence)+1 or len(w_sequence) < len(query_sequence)-1): continue m = al.make_similarity_matrix(query_sequence,", "Optional function to supply progress information during the function settable_attr:", "even when finding edit distance of 1 neighborhoods stop_check :", "c in sequence[i+1:]] # substitution for char in corpus_context.inventory: #", "import edit_distance from corpustools.symbolsim.khorsi import khorsi from corpustools.symbolsim.phono_edit_distance import phono_edit_distance", "word a neighbor force_quadratic : bool Force use of the", "in corpus has the same transcription return entry # that", "for c in sequence[i+1:]] # deletion for char in corpus_context.inventory:", ": callable or None Optional function to supply progress information", "max_distance == 1 and not force_quadratic: return fast_neighborhood_density(corpus_context, query, corpus_context.sequence_type,", "m in matches] neighbors = list(set(matches)-set([str(query_sequence)])) return (len(neighbors), neighbors) def", "n[1][0]) return results def neighborhood_density(corpus_context, query, tierdict, algorithm = 'edit_distance',", "corpus_context.sequence_type query = ensure_query_is_word(query, corpus_context, corpus_context.sequence_type, tier_type) if call_back is", "in ['#', sequence[i]]: yield [str(c) for c in sequence[:i]] +", "results[str(w)] = [getattr(r, output_format) for r in res[1]] setattr(w.original, settable_attr.name,", "during the function settable_attr: string Name of attribute that neighbourhood", "collapse_homophones = False, stop_check = None, call_back = None): \"\"\"Calculate", "on segment mutations (not deletions/insertions) Parameters ---------- corpus_context : CorpusContext", "return False if w_len < query_len-max_distance: return False return edit_distance(getattr(w,", "query = ensure_query_is_word(query, corpus_context, sequence_type, tier_type, file_type=file_type) for candidate in", "[str(char)] + [str(c) for c in sequence[i:]] # insertion yield", "w_len = len(getattr(w, sequence_type)) query_len = len(getattr(query, sequence_type)) if w_len", "[str(char)] + [str(c) for c in sequence[i+1:]] # substitution for", "density of all words in the corpus and adds them", "# call_back(cur) # res = function(w) # results[str(w)] = res[1]#[str(r)", "query, freq_base, sequence_type, max_distance): return khorsi(getattr(w, sequence_type), getattr(query, sequence_type), freq_base,", "ensure_query_is_word(query, corpus_context, corpus_context.sequence_type, tier_type) if call_back is not None: call_back('Finding", "_is_phono_edit_distance_neighbor(w, query, sequence_type, specifier, max_distance): return phono_edit_distance(getattr(w, sequence_type), getattr(query, sequence_type),", "spelling try: query_word = corpus.corpus.find(query) except KeyError: # if the", "sequence_type, specifier, max_distance): return phono_edit_distance(getattr(w, sequence_type), getattr(query, sequence_type), sequence_type, specifier)", "sequence_type, max_distance) <= max_distance def _is_phono_edit_distance_neighbor(w, query, sequence_type, specifier, max_distance):", "= [m.spelling for m in matches] neighbors = list(set(matches)-set([str(query_sequence)])) return", "of query s: size of segment inventory \"\"\" neighbors =", "insertion def find_mutation_minpairs_all_words(corpus_context, tierdict, tier_type = None, num_cores = -1,", "= getattr(w, sequence_type) if collapse_homophones and any(getattr(word, sequence_type) == w_sequence", "corpus_context.sequence_type, max_distance = max_distance) elif algorithm == 'phono_edit_distance': is_neighbor =", "if call_back is not None: cur += 1 if cur", "collapse_homophones = False, force_quadratic = False, file_type = None, tier_type=None,", "tier_type=tier_type, collapse_homophones = collapse_homophones) if call_back is not None: call_back('Calculating", "for w in tierdict[cand_str]: w_sequence = getattr(w, sequence_type) if collapse_homophones", "last_value_removed: tierdict[last_key_removed].append(last_value_removed) w_sequence = getattr(w, corpus_context.sequence_type) last_key_removed = str(w_sequence) for", "sequence_type = sequence_type, algorithm = algorithm, max_distance = max_distance, collapse_homophones", "to find stop_check : callable or None Optional function to", "in the corpus. Parameters ---------- corpus_context : CorpusContext Context manager", "set) Tuple of the number of neighbors and the set", "in the corpus and adds them as attributes of the", "stop_check, chunk_size = 1) for n in neighbors: #Have to", "neighbors = set(matches)-set([query]) return (len(neighbors), neighbors) def fast_neighborhood_density(corpus_context, query, sequence_type,", "# #subtracting 1 here is easier than fixing the neighbourhood", "sequence_type) if stop_check is not None and stop_check(): return if", "max_distance = 1, collapse_homophones = False, force_quadratic = False, file_type", "tierdict, algorithm = 'edit_distance', max_distance = 1, collapse_homophones = False,", "be run if no word found in corpus with the", "gracefully terminate early call_back : callable or None Optional function", "tier_type.att_type == 'tier': if file_type == sequence_type: query_with̠td = '.'.join(query)", ": float, optional Maximum edit distance from the queried word", "None, tier_type=None, sequence_type = None, stop_check = None, call_back =", "found in the corpus new_query = parse(query, trans_delimiter) query_word =", "w, tierdict, tier_type = tier_type, sequence_type = sequence_type, algorithm =", "corpus_context.inventory: # final pass to get insertion at len+1 if", "for a corpus algorithm : str The algorithm used to", "force_quadratic: return fast_neighborhood_density(corpus_context, query, corpus_context.sequence_type, tier_type, tierdict, file_type=file_type, collapse_homophones=collapse_homophones) if", "call_back = None): function = partial(find_mutation_minpairs, corpus_context, tier_type=tier_type, collapse_homophones =", "len(getattr(w, sequence_type)) query_len = len(getattr(query, sequence_type)) if w_len > query_len+max_distance:", "to gracefully terminate early call_back : callable or None Optional", "if tier_type.att_type == 'spelling': if file_type == sequence_type: query_word =", "corpustools.symbolsim.phono_edit_distance import phono_edit_distance from corpustools.symbolsim.phono_align import Aligner from corpustools.multiproc import", "1, collapse_homophones = False, force_quadratic = False, file_type = None,", "searches for them in corpus_context. Will be faster than neighborhood_density", "ensure_query_is_word(query, corpus, sequence_type, tier_type, trans_delimiter='.', file_type=None): if isinstance(query, Word): query_word", "sequence_type, max_distance): w_len = len(getattr(w, sequence_type)) query_len = len(getattr(query, sequence_type))", "word in corpus has the same transcription return entry #", "tierdict[last_key_removed].append(last_value_removed) w_sequence = getattr(w, corpus_context.sequence_type) last_key_removed = str(w_sequence) for i,", "the function Returns ------- tuple(int, set) Tuple of the number", ": CorpusContext Context manager for a corpus query : Word", "if file_type == sequence_type: query_word = Word(**{sequence_type: list(query)}) else: query_word", "= 0 results = dict() last_value_removed = None last_key_removed =", "of a particular word in the corpus. Parameters ---------- corpus_context", "sequence_type) yield [str(c) for c in sequence] for i in", "iterable = ((w,) for w in corpus_context) neighbors = score_mp(iterable,", "m[-1][-1]['f'] != 1: continue w_sequence = getattr(w, sequence_type) if collapse_homophones", "w_sequence for m in matches): continue else: #matches.append(str(w_sequence)) matches.append(w) matches", "= -1, settable_attr = None, collapse_homophones = False, stop_check =", "num_cores = -1, settable_attr = None, collapse_homophones = False, stop_check", "query_len+max_distance: return False if w_len < query_len-max_distance: return False return", "return query_word def parse(word, delimiter): return word.split(delimiter) if delimiter in", "[str(c) for c in sequence] for i in range(len(sequence)): yield", "objects setattr(corpus_context.corpus.find(corpus_context.corpus.key(n[0])), #corpus_context.attribute.name, n[1][0]) settable_attr.name, n[1][0]) return results def neighborhood_density(corpus_context,", "the transcription new_query = parse(query, trans_delimiter) query_word = Word(**{sequence_type: new_query})", "find_mutation_minpairs(corpus_context, w, tier_type=tier_type, collapse_homophones = collapse_homophones) results[str(w)] = res[1] setattr(w.original,", "> query_len+max_distance: return False if w_len < query_len-max_distance: return False", "str(w): last_value_removed = tierdict[last_key_removed].pop(i) break res = neighborhood_density(corpus_context, w, tierdict,", "tierdict[last_key_removed].pop(i) break res = neighborhood_density(corpus_context, w, tierdict, tier_type = tier_type,", "Word(**{sequence_type: list(query)}) else: query_word = query.replace(trans_delimiter, '') query_word = Word(**{sequence_type:", "res[1]] setattr(w.original, settable_attr.name, res[0]) # for w in corpus_context: #", "continue matches.append(w) neighbors = set(matches)-set([query]) return (len(neighbors), neighbors) def fast_neighborhood_density(corpus_context,", "== -1 or num_cores == 1: for w in corpus_context:", "corpus m: length of query s: size of segment inventory", "and stop_check(): return if call_back is not None: cur +=", "query, tier_type = None, collapse_homophones = False, stop_check = None,", "sequence_type): if tier_type.att_type == 'tier': cand_str = trans_delimiter.join(candidate) else: cand_str", "corpus new_query = parse(query, trans_delimiter) query_word = Word(**{sequence_type: list(new_query)}) return", "elif tier_type.att_type == 'tier': if file_type == sequence_type: query_with̠td =", "res[0]-1) # #the -1 is to account for the fact", "terminate early call_back : callable or None Optional function to", "if collapse_homophones and any(getattr(m, sequence_type) == w_sequence for m in", "determine distance max_distance : float, optional Maximum edit distance from", "check whether to gracefully terminate early call_back : callable or", "#Have to look up the key, then look up the", "== 'spelling': if file_type == sequence_type: query_word = Word(**{sequence_type: list(query)})", "only on segment mutations (not deletions/insertions) Parameters ---------- corpus_context :", "+= 1 if cur % 10 == 0: call_back(cur) if", "except KeyError: # if the word in the file can't", "'edit_distance', max_distance = 1, output_format = 'spelling', num_cores = -1,", "[str(c) for c in sequence[i+1:]] # deletion for char in", "collapse_homophones = collapse_homophones) results[str(w)] = res[1] setattr(w.original, corpus_context.attribute.name, res[0]) #", "w_sequence for word in neighbors): continue else: neighbors.append(w) return (len(neighbors),", "early call_back : callable or None Optional function to supply", "parse(word, delimiter): return word.split(delimiter) if delimiter in word else list(word)", "word whose minimal pairs to find stop_check : callable or", "is not None and stop_check(): return if last_value_removed: tierdict[last_key_removed].append(last_value_removed) w_sequence", "referred to. # the following should be run if no", "of attribute that neighbourhood density results will be assigned to", "is_neighbor(w, query): continue matches.append(w) neighbors = set(matches)-set([query]) return (len(neighbors), neighbors)", "from the queried word to consider a word a neighbor", "'phono_edit_distance': is_neighbor = partial(_is_phono_edit_distance_neighbor, specifier = corpus_context.specifier, sequence_type = corpus_context.sequence_type,", "corpus: corpus_word_with_td = str(getattr(entry, sequence_type)) if query_with̠td == corpus_word_with_td: #", "stop_check is not None and stop_check(): # return # cur", "max_distance, collapse_homophones = collapse_homophones) results[str(w)] = [getattr(r, output_format) for r", "not in ['#', sequence[i]]: yield [str(c) for c in sequence[:]]", "(len(neighbors), neighbors) def generate_neighbor_candidates(corpus_context, query, sequence_type): sequence = getattr(query, sequence_type)", "in corpus_context: # if stop_check is not None and stop_check():", "pairs for the queried word \"\"\" matches = [] sequence_type", "size of segment inventory \"\"\" neighbors = list() query =", "1: for w in corpus_context: if stop_check is not None", "fast_neighborhood_density(corpus_context, query, sequence_type, tier_type, tierdict, file_type=None, trans_delimiter='.', collapse_homophones = False):", "if file_type == sequence_type: query_with̠td = '.'.join(query) if '.' not", "---------- corpus_context : CorpusContext Context manager for a corpus algorithm", "is_neighbor = partial(_is_phono_edit_distance_neighbor, specifier = corpus_context.specifier, sequence_type = corpus_context.sequence_type, max_distance", "corpus_context: # if stop_check is not None and stop_check(): #", "find_mutation_minpairs_all_words(corpus_context, tierdict, tier_type = None, num_cores = -1, collapse_homophones =", "to determine distance max_distance : float, optional Maximum edit distance", "= getattr(query, sequence_type) if stop_check is not None and stop_check():", "for r in res[1]] # setattr(w.original, corpus_context.attribute.name, res[0]) else: iterable", "'spelling': if file_type == sequence_type: query_word = Word(**{sequence_type: list(query)}) else:", "def _is_khorsi_neighbor(w, query, freq_base, sequence_type, max_distance): return khorsi(getattr(w, sequence_type), getattr(query,", "tier_type) if call_back is not None: call_back('Finding neighbors for {}...'.format(query))", "and not force_quadratic: return fast_neighborhood_density(corpus_context, query, corpus_context.sequence_type, tier_type, tierdict, file_type=file_type,", "!= 1: continue w_sequence = getattr(w, sequence_type) if collapse_homophones and", "query : Word The word whose minimal pairs to find", "= al.make_similarity_matrix(query_sequence, w_sequence) if m[-1][-1]['f'] != 1: continue w_sequence =", "sequence_type), getattr(query, sequence_type), sequence_type, specifier) <= max_distance def _is_khorsi_neighbor(w, query,", "from corpustools.symbolsim.phono_edit_distance import phono_edit_distance from corpustools.symbolsim.phono_align import Aligner from corpustools.multiproc", "corpus_context.sequence_type, tier_type) if call_back is not None: call_back('Finding neighbors...') call_back(0,len(corpus_context))", "c in sequence[:i]] + [str(char)] + [str(c) for c in", "of neighbor Words. \"\"\" matches = [] query = ensure_query_is_word(query,", "tier_type, tierdict, file_type=file_type, collapse_homophones=collapse_homophones) if algorithm == 'edit_distance': is_neighbor =", "matches): continue else: #matches.append(str(w_sequence)) matches.append(w) matches = [m.spelling for m", "algorithm == 'edit_distance' and max_distance == 1 and not force_quadratic:", "algorithm used to determine distance max_distance : float, optional Maximum", "> m * (1 + s), where n: number of", "is incorrect # #subtracting 1 here is easier than fixing", "here is easier than fixing the neighbourhood density algorithm else:", "call_back(0,len(corpus_context)) cur = 0 if algorithm == 'edit_distance' and max_distance", "None and stop_check(): return if last_value_removed: tierdict[last_key_removed].append(last_value_removed) w_sequence = getattr(w,", "following should be run if no word found in corpus", "== 'tier': if file_type == sequence_type: query_with̠td = '.'.join(query) if", "-1, collapse_homophones = False, stop_check = None, call_back = None):", "specifier) <= max_distance def _is_khorsi_neighbor(w, query, freq_base, sequence_type, max_distance): return", "= corpus_context.sequence_type, max_distance = max_distance) for w in corpus_context: if", ": callable or None Optional function to check whether to", "is not None: call_back('Finding neighbors for {}...'.format(query)) call_back(0,len(corpus_context)) cur =", "= 1, output_format = 'spelling', num_cores = -1, settable_attr =", "sequence_type) query_sequence = getattr(query, sequence_type) if stop_check is not None", "else: #matches.append(str(w_sequence)) matches.append(w) matches = [m.spelling for m in matches]", "of neighbors and the set of neighbor Words. \"\"\" matches", "None): \"\"\"Find all minimal pairs of the query word based", "words in corpus m: length of query s: size of", "(len(w_sequence) > len(query_sequence)+1 or len(w_sequence) < len(query_sequence)-1): continue m =", "= getattr(w, corpus_context.sequence_type) last_key_removed = str(w_sequence) for i, item in", "substitution for char in corpus_context.inventory: # final pass to get", "if w_len > query_len+max_distance: return False if w_len < query_len-max_distance:", "= None last_key_removed = None if num_cores == -1 or", "chunk_size= 1) for n in neighbors: #Have to look up", "if isinstance(query, Word): query_word = query else: if tier_type.att_type ==", "freq_base, sequence_type, max_distance): return khorsi(getattr(w, sequence_type), getattr(query, sequence_type), freq_base, sequence_type,", "the set of neighbor Words. \"\"\" matches = [] query", "settable_attr: string Name of attribute that neighbourhood density results will", "tierdict[cand_str]: w_sequence = getattr(w, sequence_type) if collapse_homophones and any(getattr(word, sequence_type)", "settable_attr = None, collapse_homophones = False, stop_check = None, call_back", "the query word based only on segment mutations (not deletions/insertions)", "sequence_type): sequence = getattr(query, sequence_type) yield [str(c) for c in", "freq_base = freq_base, sequence_type = corpus_context.sequence_type, max_distance = max_distance) for", "= getattr(query, sequence_type) yield [str(c) for c in sequence] for", "neighbor Words. \"\"\" matches = [] query = ensure_query_is_word(query, corpus_context,", "list(query_word)}) elif tier_type.att_type == 'tier': if file_type == sequence_type: query_with̠td", "a neighbor force_quadratic : bool Force use of the less", "corpus, sequence_type, tier_type, trans_delimiter='.', file_type=None): if isinstance(query, Word): query_word =", "the fact that words are counted as their own neighbour,", "# cur += 1 # call_back(cur) # res = function(w)", "return edit_distance(getattr(w, sequence_type), getattr(query, sequence_type), sequence_type, max_distance) <= max_distance def", "if stop_check is not None and stop_check(): # return #", "= Word(**{sequence_type: list(query)}) else: query_word = query.replace(trans_delimiter, '') query_word =", "a particular word in the corpus. Parameters ---------- corpus_context :", "\"\"\"Calculate the neighborhood density of a particular word in the", "algorithm == 'phono_edit_distance': is_neighbor = partial(_is_phono_edit_distance_neighbor, specifier = corpus_context.specifier, sequence_type", "edit distance <= 1 and searches for them in corpus_context.", "str(w_sequence) for i, item in enumerate(tierdict[last_key_removed]): if str(item) == str(w):", "transcription new_query = parse(query, trans_delimiter) query_word = Word(**{sequence_type: new_query}) else:", "= getattr(w, sequence_type) if collapse_homophones and any(getattr(m, sequence_type) == w_sequence", "def fast_neighborhood_density(corpus_context, query, sequence_type, tier_type, tierdict, file_type=None, trans_delimiter='.', collapse_homophones =", "segment inventory \"\"\" neighbors = list() query = ensure_query_is_word(query, corpus_context,", "use of the less efficient quadratic algorithm even when finding", "= corpus_context.get_frequency_base() is_neighbor = partial(_is_khorsi_neighbor, freq_base = freq_base, sequence_type =", "tier_type.att_type == 'tier': cand_str = trans_delimiter.join(candidate) else: cand_str = ''.join(candidate)", "corpus_context.sequence_type, tier_type, tierdict, file_type=file_type, collapse_homophones=collapse_homophones) if algorithm == 'edit_distance': is_neighbor", "= ensure_query_is_word(query, corpus_context, sequence_type, tier_type, file_type=file_type) for candidate in generate_neighbor_candidates(corpus_context,", "neighbors of edit distance <= 1 and searches for them", "== sequence_type: query_with̠td = '.'.join(query) if '.' not in query", "for them in corpus_context. Will be faster than neighborhood_density when:", "callable, optional Optional function to supply progress information during the", "query, sequence_type): sequence = getattr(query, sequence_type) yield [str(c) for c", "freq_base = corpus_context.get_frequency_base() is_neighbor = partial(_is_khorsi_neighbor, freq_base = freq_base, sequence_type", "stop_check = None, call_back = None): \"\"\"Find all minimal pairs", "corpus with the transcription new_query = parse(query, trans_delimiter) query_word =", "output_format) for r in res[1]] setattr(w.original, settable_attr.name, res[0]) # for", "query_len-max_distance: return False return edit_distance(getattr(w, sequence_type), getattr(query, sequence_type), sequence_type, max_distance)", "if stop_check is not None and stop_check(): return if call_back", "sequence[i]]: yield [str(c) for c in sequence[:i]] + [str(char)] +", "filter_mp, score_mp def _is_edit_distance_neighbor(w, query, sequence_type, max_distance): w_len = len(getattr(w,", "matches.append(w) neighbors = set(matches)-set([query]) return (len(neighbors), neighbors) def fast_neighborhood_density(corpus_context, query,", "= tierdict[last_key_removed].pop(i) break res = find_mutation_minpairs(corpus_context, w, tier_type=tier_type, collapse_homophones =", "query, corpus_context.sequence_type, tier_type, tierdict, file_type=file_type, collapse_homophones=collapse_homophones) if algorithm == 'edit_distance':", "# res = function(w) # results[str(w)] = res[1]#[str(r) for r", "= res[1] setattr(w.original, corpus_context.attribute.name, res[0]) # for w in corpus_context:", "corpus_context : CorpusContext Context manager for a corpus query :", "function, num_cores, call_back, stop_check, chunk_size= 1) for n in neighbors:", "cur % 10 == 0: call_back(cur) if not is_neighbor(w, query):", "res = find_mutation_minpairs(corpus_context, w, tier_type=tier_type, collapse_homophones = collapse_homophones) results[str(w)] =", "return (len(neighbors), neighbors) def fast_neighborhood_density(corpus_context, query, sequence_type, tier_type, tierdict, file_type=None,", "inventory \"\"\" neighbors = list() query = ensure_query_is_word(query, corpus_context, sequence_type,", "#the -1 is to account for the fact that words", "+ [str(char)] + [str(c) for c in sequence[i+1:]] # substitution", "+= 1 # call_back(cur) # res = function(w) # results[str(w)]", "enumerate(tierdict[last_key_removed]): if str(item) == str(w): last_value_removed = tierdict[last_key_removed].pop(i) break res", "break res = neighborhood_density(corpus_context, w, tierdict, tier_type = tier_type, sequence_type", "= len(getattr(w, sequence_type)) query_len = len(getattr(query, sequence_type)) if w_len >", "supply progress information during the function Returns ------- tuple(int, set)", "import filter_mp, score_mp def _is_edit_distance_neighbor(w, query, sequence_type, max_distance): w_len =", "The found minimal pairs for the queried word \"\"\" matches", "max_distance def _is_khorsi_neighbor(w, query, freq_base, sequence_type, max_distance): return khorsi(getattr(w, sequence_type),", "== 'edit_distance': is_neighbor = partial(_is_edit_distance_neighbor, sequence_type = corpus_context.sequence_type, max_distance =", "the queried word to consider a word a neighbor force_quadratic", "from corpustools.corpus.classes import Word from corpustools.symbolsim.edit_distance import edit_distance from corpustools.symbolsim.khorsi", "query word based only on segment mutations (not deletions/insertions) Parameters", "tier_type=tier_type, collapse_homophones = collapse_homophones) results[str(w)] = res[1] setattr(w.original, corpus_context.attribute.name, res[0])", "sequence[:i]] + [str(char)] + [str(c) for c in sequence[i:]] #", "word \"\"\" matches = [] sequence_type = corpus_context.sequence_type query =", "neighborhood_density_all_words(corpus_context, tierdict, tier_type = None, sequence_type = None, algorithm =", "------- list The found minimal pairs for the queried word", "word to consider a word a neighbor force_quadratic : bool", "terminate early call_back : callable, optional Optional function to supply", "insertion yield [str(c) for c in sequence[:i]] + [str(char)] +", "Name of attribute that neighbourhood density results will be assigned", "if '.' not in query else query for entry in", ": Word The word whose minimal pairs to find stop_check", "to. # the following should be run if no word", "num_cores == 1: for w in corpus_context: if stop_check is", "density results will be assigned to \"\"\" function = partial(neighborhood_density,", "Parameters ---------- corpus_context : CorpusContext Context manager for a corpus", "how #multiprocessing pickles objects setattr(corpus_context.corpus.find(corpus_context.corpus.key(n[0])), #corpus_context.attribute.name, n[1][0]) settable_attr.name, n[1][0]) return", "same transcription return entry # that word in the corpus", "for candidate in generate_neighbor_candidates(corpus_context, query, sequence_type): if tier_type.att_type == 'tier':", ": str The algorithm used to determine distance max_distance :", "gracefully terminate early call_back : callable, optional Optional function to", "from corpustools.symbolsim.phono_align import Aligner from corpustools.multiproc import filter_mp, score_mp def", "None): \"\"\"Calculate the neighborhood density of all words in the", "information during the function settable_attr: string Name of attribute that", "= None): \"\"\"Find all minimal pairs of the query word", "None, call_back = None): function = partial(find_mutation_minpairs, corpus_context, tier_type=tier_type, collapse_homophones", "w_sequence = getattr(w, sequence_type) if collapse_homophones and any(getattr(word, sequence_type) ==", "neighbors = list(set(matches)-set([str(query_sequence)])) return (len(neighbors), neighbors) def ensure_query_is_word(query, corpus, sequence_type,", "< len(query_sequence)-1): continue m = al.make_similarity_matrix(query_sequence, w_sequence) if m[-1][-1]['f'] !=", "w in corpus_context) neighbors = score_mp(iterable, function, num_cores, call_back, stop_check,", "len(w_sequence) < len(query_sequence)-1): continue m = al.make_similarity_matrix(query_sequence, w_sequence) if m[-1][-1]['f']", "entry # that word in the corpus is to be", "quadratic algorithm even when finding edit distance of 1 neighborhoods", "objects setattr(corpus_context.corpus.find(corpus_context.corpus.key(n[0])), corpus_context.attribute.name, n[1][0]) return results def find_mutation_minpairs(corpus_context, query, tier_type", "neighbor force_quadratic : bool Force use of the less efficient", "# insertion yield [str(c) for c in sequence[:i]] + [str(char)]", "cand_str = trans_delimiter.join(candidate) else: cand_str = ''.join(candidate) if cand_str in", "cand_str = ''.join(candidate) if cand_str in tierdict: for w in", "number of words in corpus m: length of query s:", "for char in corpus_context.inventory: # final pass to get insertion", "Tuple of the number of neighbors and the set of", "them in corpus_context. Will be faster than neighborhood_density when: n", "= Aligner(features_tf=False, ins_penalty=float('inf'), del_penalty=float('inf'), sub_penalty=1) for w in corpus_context: w_sequence", "else: query_word = query.replace(trans_delimiter, '') query_word = Word(**{sequence_type: list(query_word)}) elif", "not in query else query for entry in corpus: corpus_word_with_td", "list(new_query)}) return query_word def parse(word, delimiter): return word.split(delimiter) if delimiter", "fixing the neighbourhood density algorithm else: iterable = ((w,) for", "whether to gracefully terminate early call_back : callable or None", "in generate_neighbor_candidates(corpus_context, query, sequence_type): if tier_type.att_type == 'tier': cand_str =", "candidate in generate_neighbor_candidates(corpus_context, query, sequence_type): if tier_type.att_type == 'tier': cand_str", "neighbors: #Have to look up the key, then look up", "call_back is not None: call_back('Finding neighbors...') call_back(0,len(corpus_context)) cur = 0", "new_query = parse(query, trans_delimiter) query_word = Word(**{sequence_type: list(new_query)}) return query_word", "# substitution for char in corpus_context.inventory: # final pass to", "when: n > m * (1 + s), where n:", "str(item) == str(w): last_value_removed = tierdict[last_key_removed].pop(i) break res = neighborhood_density(corpus_context,", "query, sequence_type): if tier_type.att_type == 'tier': cand_str = trans_delimiter.join(candidate) else:", "sequence[:]] + [str(char)] # insertion def find_mutation_minpairs_all_words(corpus_context, tierdict, tier_type =", "sequence_type, tier_type, tierdict, file_type=None, trans_delimiter='.', collapse_homophones = False): \"\"\"Generates all", "if collapse_homophones and any(getattr(word, sequence_type) == w_sequence for word in", "None, sequence_type = None, algorithm = 'edit_distance', max_distance = 1,", "False, stop_check = None, call_back = None): function = partial(find_mutation_minpairs,", "= list() query = ensure_query_is_word(query, corpus_context, sequence_type, tier_type, file_type=file_type) for", "'edit_distance', max_distance = 1, collapse_homophones = False, force_quadratic = False,", "== corpus_word_with_td: # if a word in corpus has the", "is easier than fixing the neighbourhood density algorithm else: iterable", "final pass to get insertion at len+1 if str(char) not", "getattr(w, sequence_type) query_sequence = getattr(query, sequence_type) if stop_check is not", "sequence_type, algorithm = algorithm, max_distance = max_distance, collapse_homophones = collapse_homophones)", "w in corpus_context: # if stop_check is not None and", "results = dict() last_value_removed = None last_key_removed = None if", "sequence_type = corpus_context.sequence_type, max_distance = max_distance) elif algorithm == 'phono_edit_distance':", "# insertion def find_mutation_minpairs_all_words(corpus_context, tierdict, tier_type = None, num_cores =", "1 and searches for them in corpus_context. Will be faster", "word in neighbors): continue else: neighbors.append(w) return (len(neighbors), neighbors) def", "cur += 1 # call_back(cur) # res = function(w) #", "sequence_type = corpus_context.sequence_type, max_distance = max_distance) elif algorithm == 'khorsi':", "tierdict, file_type=None, trans_delimiter='.', collapse_homophones = False): \"\"\"Generates all neighbors of", "return (len(neighbors), neighbors) def generate_neighbor_candidates(corpus_context, query, sequence_type): sequence = getattr(query,", "function = partial(neighborhood_density, corpus_context, tierdict = tierdict, tier_type = tier_type,", "False): \"\"\"Generates all neighbors of edit distance <= 1 and", "w in corpus_context: if stop_check is not None and stop_check():", "#corpus_context.attribute.name, n[1][0]) settable_attr.name, n[1][0]) return results def neighborhood_density(corpus_context, query, tierdict,", "if cur % 10 == 0: call_back(cur) if not is_neighbor(w,", "object due to how #multiprocessing pickles objects setattr(corpus_context.corpus.find(corpus_context.corpus.key(n[0])), corpus_context.attribute.name, n[1][0])", "# deletion for char in corpus_context.inventory: if str(char) not in", "tier_type=None, sequence_type = None, stop_check = None, call_back = None):", "matches] neighbors = list(set(matches)-set([str(query_sequence)])) return (len(neighbors), neighbors) def ensure_query_is_word(query, corpus,", "res[1] setattr(w.original, corpus_context.attribute.name, res[0]) # for w in corpus_context: #", "partial(_is_edit_distance_neighbor, sequence_type = corpus_context.sequence_type, max_distance = max_distance) elif algorithm ==", "query): continue matches.append(w) neighbors = set(matches)-set([query]) return (len(neighbors), neighbors) def", "in sequence[i+1:]] # deletion for char in corpus_context.inventory: if str(char)", "False, force_quadratic = False, file_type = None, tier_type=None, sequence_type =", "\"\"\" neighbors = list() query = ensure_query_is_word(query, corpus_context, sequence_type, tier_type,", "None, call_back = None): \"\"\"Calculate the neighborhood density of a", "stop_check = None, call_back = None): \"\"\"Calculate the neighborhood density", "query_word = query.replace(trans_delimiter, '') query_word = Word(**{sequence_type: list(query_word)}) elif tier_type.att_type", "corpus_context.attribute.name, res[0]) else: iterable = ((w,) for w in corpus_context)", "check whether to gracefully terminate early call_back : callable, optional", "sequence_type) == w_sequence for word in neighbors): continue else: neighbors.append(w)", "m = al.make_similarity_matrix(query_sequence, w_sequence) if m[-1][-1]['f'] != 1: continue w_sequence", "not None and stop_check(): return if call_back is not None:", "corpus.corpus.find(query) except KeyError: # if the word in the file", "efficient quadratic algorithm even when finding edit distance of 1", "tier_type, tierdict, file_type=None, trans_delimiter='.', collapse_homophones = False): \"\"\"Generates all neighbors", "= str(w_sequence) for i, item in enumerate(tierdict[last_key_removed]): if str(item) ==", "str(item) == str(w): last_value_removed = tierdict[last_key_removed].pop(i) break res = find_mutation_minpairs(corpus_context,", "stop_check(): # return # cur += 1 # call_back(cur) #", "# setattr(w.original, settable_attr.name, res[0]-1) # #the -1 is to account", "early call_back : callable, optional Optional function to supply progress", "cur += 1 if cur % 10 == 0: call_back(cur)", "settable_attr.name, res[0]) # for w in corpus_context: # if stop_check", "== 1: for w in corpus_context: if stop_check is not", "if (len(w_sequence) > len(query_sequence)+1 or len(w_sequence) < len(query_sequence)-1): continue m", "max_distance = max_distance, collapse_homophones = collapse_homophones) results[str(w)] = [getattr(r, output_format)", "Returns ------- tuple(int, set) Tuple of the number of neighbors", ": callable, optional Optional function to supply progress information during", "= tierdict, tier_type = tier_type, sequence_type = sequence_type, algorithm =", "trans_delimiter) query_word = Word(**{sequence_type: new_query}) else: # if file contains", "1 here is easier than fixing the neighbourhood density algorithm", "= [] sequence_type = corpus_context.sequence_type query = ensure_query_is_word(query, corpus_context, corpus_context.sequence_type,", "c in sequence[i:]] # insertion yield [str(c) for c in", "= tier_type, sequence_type = sequence_type, algorithm = algorithm, max_distance =", "i in range(len(sequence)): yield [str(c) for c in sequence[:i]] +", "tierdict: for w in tierdict[cand_str]: w_sequence = getattr(w, sequence_type) if", "stop_check = None, call_back = None): function = partial(find_mutation_minpairs, corpus_context,", "corpustools.symbolsim.phono_align import Aligner from corpustools.multiproc import filter_mp, score_mp def _is_edit_distance_neighbor(w,", "for char in corpus_context.inventory: if str(char) not in ['#', sequence[i]]:", "if not is_neighbor(w, query): continue matches.append(w) neighbors = set(matches)-set([query]) return", "list(query)}) else: query_word = query.replace(trans_delimiter, '') query_word = Word(**{sequence_type: list(query_word)})", "corpus_context.attribute.name, n[1][0]) return results def find_mutation_minpairs(corpus_context, query, tier_type = None,", "pickles objects setattr(corpus_context.corpus.find(corpus_context.corpus.key(n[0])), #corpus_context.attribute.name, n[1][0]) settable_attr.name, n[1][0]) return results def", "not is_neighbor(w, query): continue matches.append(w) neighbors = set(matches)-set([query]) return (len(neighbors),", "parse(query, trans_delimiter) query_word = Word(**{sequence_type: new_query}) else: # if file", "manager for a corpus query : Word The word whose", "[str(c) for c in sequence[i:]] # insertion yield [str(c) for", "of segment inventory \"\"\" neighbors = list() query = ensure_query_is_word(query,", "supply progress information during the function Returns ------- list The", "10 == 0: call_back(cur) if not is_neighbor(w, query): continue matches.append(w)", "\"\"\"Generates all neighbors of edit distance <= 1 and searches", "tier_type.att_type == 'spelling': if file_type == sequence_type: query_word = Word(**{sequence_type:", "function Returns ------- tuple(int, set) Tuple of the number of", "calculate. algorithm : str The algorithm used to determine distance", "the function Returns ------- list The found minimal pairs for", "in sequence[:i]] + [str(char)] + [str(c) for c in sequence[i+1:]]", "elif algorithm == 'phono_edit_distance': is_neighbor = partial(_is_phono_edit_distance_neighbor, specifier = corpus_context.specifier,", "= max_distance, collapse_homophones = collapse_homophones) if call_back is not None:", "string Name of attribute that neighbourhood density results will be", "if the word in the file can't be found in", "matches = [] sequence_type = corpus_context.sequence_type query = ensure_query_is_word(query, corpus_context,", "Context manager for a corpus query : Word The word", "account for the fact that words are counted as their", "and stop_check(): return if last_value_removed: tierdict[last_key_removed].append(last_value_removed) w_sequence = getattr(w, corpus_context.sequence_type)", "max_distance = max_distance) for w in corpus_context: if stop_check is", "#matches.append(str(w_sequence)) matches.append(w) matches = [m.spelling for m in matches] neighbors", "None last_key_removed = None if num_cores == -1 or num_cores", "queried word to consider a word a neighbor force_quadratic :", "the words. Parameters ---------- corpus_context : CorpusContext Context manager for", "tuple(int, set) Tuple of the number of neighbors and the", "for c in sequence[:i]] + [str(char)] + [str(c) for c", "r in res[1]] setattr(w.original, settable_attr.name, res[0]) # for w in", "with the transcription new_query = parse(query, trans_delimiter) query_word = Word(**{sequence_type:", "phono_edit_distance(getattr(w, sequence_type), getattr(query, sequence_type), sequence_type, specifier) <= max_distance def _is_khorsi_neighbor(w,", "than fixing the neighbourhood density algorithm else: iterable = ((w,)", "densities...') call_back(0,len(corpus_context)) cur = 0 results = dict() last_value_removed =", "results will be assigned to \"\"\" function = partial(neighborhood_density, corpus_context,", "last_value_removed = tierdict[last_key_removed].pop(i) break res = find_mutation_minpairs(corpus_context, w, tier_type=tier_type, collapse_homophones", "call_back : callable, optional Optional function to supply progress information", "collapse_homophones) results[str(w)] = [getattr(r, output_format) for r in res[1]] setattr(w.original,", "tier_type = None, num_cores = -1, collapse_homophones = False, stop_check", "cur % 10 == 0: call_back(cur) if (len(w_sequence) > len(query_sequence)+1", "== 'tier': cand_str = trans_delimiter.join(candidate) else: cand_str = ''.join(candidate) if", "word to consider a word a neighbor. stop_check : callable,", "algorithm == 'khorsi': freq_base = corpus_context.get_frequency_base() is_neighbor = partial(_is_khorsi_neighbor, freq_base", "corpus_context, corpus_context.sequence_type, tier_type) if call_back is not None: call_back('Finding neighbors...')", "char in corpus_context.inventory: # final pass to get insertion at", "is not None and stop_check(): # return # cur +=", "a neighbor. stop_check : callable, optional Optional function to check", "= partial(_is_phono_edit_distance_neighbor, specifier = corpus_context.specifier, sequence_type = corpus_context.sequence_type, max_distance =", "[m.spelling for m in matches] neighbors = list(set(matches)-set([str(query_sequence)])) return (len(neighbors),", "khorsi from corpustools.symbolsim.phono_edit_distance import phono_edit_distance from corpustools.symbolsim.phono_align import Aligner from", "collapse_homophones = False, stop_check = None, call_back = None): function", "False, stop_check = None, call_back = None): \"\"\"Calculate the neighborhood", "get insertion at len+1 if str(char) not in ['#', sequence[i]]:", "for i, item in enumerate(tierdict[last_key_removed]): if str(item) == str(w): last_value_removed", "in sequence[:]] + [str(char)] # insertion def find_mutation_minpairs_all_words(corpus_context, tierdict, tier_type", "if str(char) not in ['#', sequence[i]]: yield [str(c) for c", "# return # cur += 1 # call_back(cur) # res", "callable or None Optional function to supply progress information during", "for a corpus query : Word The word whose neighborhood", "any(getattr(word, sequence_type) == w_sequence for word in neighbors): continue else:", "neighbors and the set of neighbor Words. \"\"\" matches =", "Word(**{sequence_type: list(new_query)}) return query_word def parse(word, delimiter): return word.split(delimiter) if", "call_back(0,len(corpus_context)) cur = 0 al = Aligner(features_tf=False, ins_penalty=float('inf'), del_penalty=float('inf'), sub_penalty=1)", "Will be faster than neighborhood_density when: n > m *", ": callable, optional Optional function to check whether to gracefully", "= None, tier_type=None, sequence_type = None, stop_check = None, call_back", "neighbors) def fast_neighborhood_density(corpus_context, query, sequence_type, tier_type, tierdict, file_type=None, trans_delimiter='.', collapse_homophones", "{}...'.format(query)) call_back(0,len(corpus_context)) cur = 0 if algorithm == 'edit_distance' and", "khorsi(getattr(w, sequence_type), getattr(query, sequence_type), freq_base, sequence_type, max_distance) >= max_distance def", "[] sequence_type = corpus_context.sequence_type query = ensure_query_is_word(query, corpus_context, corpus_context.sequence_type, tier_type)", "corpus_context, sequence_type, tier_type, file_type=file_type) for candidate in generate_neighbor_candidates(corpus_context, query, sequence_type):", "+ [str(char)] + [str(c) for c in sequence[i:]] # insertion", "are counted as their own neighbour, and this is incorrect", "partial from corpustools.corpus.classes import Word from corpustools.symbolsim.edit_distance import edit_distance from", "= 1, collapse_homophones = False, force_quadratic = False, file_type =", "# final pass to get insertion at len+1 if str(char)", "assigned to \"\"\" function = partial(neighborhood_density, corpus_context, tierdict = tierdict,", "neighborhood_density when: n > m * (1 + s), where", "= function(w) # results[str(w)] = [getattr(r, output_format) for r in", "= dict() last_value_removed = None last_key_removed = None if num_cores", "n in neighbors: #Have to look up the key, then", "res[0]) # for w in corpus_context: # if stop_check is", "the function settable_attr: string Name of attribute that neighbourhood density", "len+1 if str(char) not in ['#', sequence[i]]: yield [str(c) for", "1, output_format = 'spelling', num_cores = -1, settable_attr = None,", "optional Optional function to check whether to gracefully terminate early", "None, algorithm = 'edit_distance', max_distance = 1, output_format = 'spelling',", "or len(w_sequence) < len(query_sequence)-1): continue m = al.make_similarity_matrix(query_sequence, w_sequence) if", "m * (1 + s), where n: number of words", "call_back : callable or None Optional function to supply progress", "corpus query : Word The word whose neighborhood density to", "= 'edit_distance', max_distance = 1, output_format = 'spelling', num_cores =", "deletion for char in corpus_context.inventory: if str(char) not in ['#',", "and the set of neighbor Words. \"\"\" matches = []", "sequence_type, max_distance) >= max_distance def neighborhood_density_all_words(corpus_context, tierdict, tier_type = None,", "= collapse_homophones) results[str(w)] = [getattr(r, output_format) for r in res[1]]", "neighbourhood density algorithm else: iterable = ((w,) for w in", "len(query_sequence)+1 or len(w_sequence) < len(query_sequence)-1): continue m = al.make_similarity_matrix(query_sequence, w_sequence)", "= None, algorithm = 'edit_distance', max_distance = 1, output_format =", "sequence_type, tier_type, trans_delimiter='.', file_type=None): if isinstance(query, Word): query_word = query", "progress information during the function settable_attr: string Name of attribute", "corpus_context: w_sequence = getattr(w, sequence_type) query_sequence = getattr(query, sequence_type) if", "or num_cores == 1: for w in corpus_context: if stop_check", "neighborhood density to calculate. algorithm : str The algorithm used", "# for w in corpus_context: # if stop_check is not", "for c in sequence[:i]] + [str(c) for c in sequence[i+1:]]", "+ [str(char)] # insertion def find_mutation_minpairs_all_words(corpus_context, tierdict, tier_type = None,", "yield [str(c) for c in sequence[:]] + [str(char)] # insertion", "file contains spelling try: query_word = corpus.corpus.find(query) except KeyError: #", "the corpus is to be referred to. # the following", "num_cores = -1, collapse_homophones = False, stop_check = None, call_back", "corpus query : Word The word whose minimal pairs to", "Force use of the less efficient quadratic algorithm even when", "elif algorithm == 'khorsi': freq_base = corpus_context.get_frequency_base() is_neighbor = partial(_is_khorsi_neighbor,", "call_back is not None: call_back('Calculating neighborhood densities...') call_back(0,len(corpus_context)) cur =", "in the corpus is to be referred to. # the", "is not None: cur += 1 if cur % 10", "last_value_removed = tierdict[last_key_removed].pop(i) break res = neighborhood_density(corpus_context, w, tierdict, tier_type", "None Optional function to check whether to gracefully terminate early", "[] query = ensure_query_is_word(query, corpus_context, corpus_context.sequence_type, tier_type) if call_back is", "corpus_context) neighbors = score_mp(iterable, function, num_cores, call_back, stop_check, chunk_size =", "% 10 == 0: call_back(cur) if (len(w_sequence) > len(query_sequence)+1 or", "for w in corpus_context: if stop_check is not None and", "call_back = None): \"\"\"Calculate the neighborhood density of all words", "results[str(w)] = res[1]#[str(r) for r in res[1]] # setattr(w.original, corpus_context.attribute.name,", "neighborhoods stop_check : callable, optional Optional function to check whether", "pickles objects setattr(corpus_context.corpus.find(corpus_context.corpus.key(n[0])), corpus_context.attribute.name, n[1][0]) return results def find_mutation_minpairs(corpus_context, query,", "neighbors) def ensure_query_is_word(query, corpus, sequence_type, tier_type, trans_delimiter='.', file_type=None): if isinstance(query,", "attributes of the words. Parameters ---------- corpus_context : CorpusContext Context", "= 'edit_distance', max_distance = 1, collapse_homophones = False, force_quadratic =", "== 0: call_back(cur) if not is_neighbor(w, query): continue matches.append(w) neighbors", "neighborhood density of a particular word in the corpus. Parameters", "CorpusContext Context manager for a corpus query : Word The", "al.make_similarity_matrix(query_sequence, w_sequence) if m[-1][-1]['f'] != 1: continue w_sequence = getattr(w,", "= parse(query, trans_delimiter) query_word = Word(**{sequence_type: new_query}) else: # if", "max_distance): return khorsi(getattr(w, sequence_type), getattr(query, sequence_type), freq_base, sequence_type, max_distance) >=", "#subtracting 1 here is easier than fixing the neighbourhood density", "% 10 == 0: call_back(cur) if not is_neighbor(w, query): continue", "char in corpus_context.inventory: if str(char) not in ['#', sequence[i]]: yield", "distance of 1 neighborhoods stop_check : callable, optional Optional function", "found minimal pairs for the queried word \"\"\" matches =", "c in sequence[:]] + [str(char)] # insertion def find_mutation_minpairs_all_words(corpus_context, tierdict,", "= None, stop_check = None, call_back = None): \"\"\"Calculate the", "yield [str(c) for c in sequence[:i]] + [str(c) for c", "sequence_type)) query_len = len(getattr(query, sequence_type)) if w_len > query_len+max_distance: return", "w_len > query_len+max_distance: return False if w_len < query_len-max_distance: return", "sequence_type), getattr(query, sequence_type), freq_base, sequence_type, max_distance) >= max_distance def neighborhood_density_all_words(corpus_context,", "specifier, max_distance): return phono_edit_distance(getattr(w, sequence_type), getattr(query, sequence_type), sequence_type, specifier) <=", "the neighborhood density of all words in the corpus and", "neighborhood_density(corpus_context, w, tierdict, tier_type = tier_type, sequence_type = sequence_type, algorithm", "not force_quadratic: return fast_neighborhood_density(corpus_context, query, corpus_context.sequence_type, tier_type, tierdict, file_type=file_type, collapse_homophones=collapse_homophones)", "and adds them as attributes of the words. Parameters ----------", "# results[str(w)] = res[1]#[str(r) for r in res[1]] # setattr(w.original,", "= 'spelling', num_cores = -1, settable_attr = None, collapse_homophones =", "return fast_neighborhood_density(corpus_context, query, corpus_context.sequence_type, tier_type, tierdict, file_type=file_type, collapse_homophones=collapse_homophones) if algorithm", "0: call_back(cur) if not is_neighbor(w, query): continue matches.append(w) neighbors =", "sequence_type: query_word = Word(**{sequence_type: list(query)}) else: query_word = query.replace(trans_delimiter, '')", "max_distance : float, optional Maximum edit distance from the queried", "range(len(sequence)): yield [str(c) for c in sequence[:i]] + [str(c) for", "if call_back is not None: call_back('Calculating neighborhood densities...') call_back(0,len(corpus_context)) cur", "= 0 if algorithm == 'edit_distance' and max_distance == 1", "if a word in corpus has the same transcription return", "in sequence[i:]] # insertion yield [str(c) for c in sequence[:i]]", "due to how #multiprocessing pickles objects setattr(corpus_context.corpus.find(corpus_context.corpus.key(n[0])), #corpus_context.attribute.name, n[1][0]) settable_attr.name,", "res = neighborhood_density(corpus_context, w, tierdict, tier_type = tier_type, sequence_type =", "= query.replace(trans_delimiter, '') query_word = Word(**{sequence_type: list(query_word)}) elif tier_type.att_type ==", "for i in range(len(sequence)): yield [str(c) for c in sequence[:i]]", "if last_value_removed: tierdict[last_key_removed].append(last_value_removed) w_sequence = getattr(w, corpus_context.sequence_type) last_key_removed = str(w_sequence)", "query for entry in corpus: corpus_word_with_td = str(getattr(entry, sequence_type)) if", "if tier_type.att_type == 'tier': cand_str = trans_delimiter.join(candidate) else: cand_str =", "max_distance): w_len = len(getattr(w, sequence_type)) query_len = len(getattr(query, sequence_type)) if", "def _is_phono_edit_distance_neighbor(w, query, sequence_type, specifier, max_distance): return phono_edit_distance(getattr(w, sequence_type), getattr(query,", "fact that words are counted as their own neighbour, and", "w_len < query_len-max_distance: return False return edit_distance(getattr(w, sequence_type), getattr(query, sequence_type),", "None: call_back('Finding neighbors for {}...'.format(query)) call_back(0,len(corpus_context)) cur = 0 if", "(len(neighbors), neighbors) def ensure_query_is_word(query, corpus, sequence_type, tier_type, trans_delimiter='.', file_type=None): if", "r in res[1]] # setattr(w.original, corpus_context.attribute.name, res[0]) else: iterable =", "[getattr(r, output_format) for r in res[1]] setattr(w.original, settable_attr.name, res[0]) #", "function(w) # results[str(w)] = [getattr(r, output_format) for r in res[1]]", "sequence[i+1:]] # deletion for char in corpus_context.inventory: if str(char) not", "that neighbourhood density results will be assigned to \"\"\" function", "== w_sequence for word in neighbors): continue else: neighbors.append(w) return", "the key, then look up the object due to how", "in query else query for entry in corpus: corpus_word_with_td =", "yield [str(c) for c in sequence] for i in range(len(sequence)):", "= corpus.corpus.find(query) except KeyError: # if the word in the", "isinstance(query, Word): query_word = query else: if tier_type.att_type == 'spelling':", "should be run if no word found in corpus with", "if query_with̠td == corpus_word_with_td: # if a word in corpus", "* (1 + s), where n: number of words in", "None Optional function to supply progress information during the function", "1 # call_back(cur) # res = function(w) # results[str(w)] =", "fast_neighborhood_density(corpus_context, query, corpus_context.sequence_type, tier_type, tierdict, file_type=file_type, collapse_homophones=collapse_homophones) if algorithm ==", "information during the function Returns ------- tuple(int, set) Tuple of", "call_back = None): \"\"\"Calculate the neighborhood density of a particular", "in res[1]] # setattr(w.original, corpus_context.attribute.name, res[0]) else: iterable = ((w,)", "query s: size of segment inventory \"\"\" neighbors = list()", "sequence_type) if collapse_homophones and any(getattr(word, sequence_type) == w_sequence for word", "return (len(neighbors), neighbors) def ensure_query_is_word(query, corpus, sequence_type, tier_type, trans_delimiter='.', file_type=None):", "getattr(query, sequence_type) yield [str(c) for c in sequence] for i", "['#', sequence[i]]: yield [str(c) for c in sequence[:]] + [str(char)]", "getattr(w, corpus_context.sequence_type) last_key_removed = str(w_sequence) for i, item in enumerate(tierdict[last_key_removed]):", "trans_delimiter='.', collapse_homophones = False): \"\"\"Generates all neighbors of edit distance", "call_back('Finding neighbors...') call_back(0,len(corpus_context)) cur = 0 al = Aligner(features_tf=False, ins_penalty=float('inf'),", "stop_check(): return if last_value_removed: tierdict[last_key_removed].append(last_value_removed) w_sequence = getattr(w, corpus_context.sequence_type) last_key_removed", "edit_distance(getattr(w, sequence_type), getattr(query, sequence_type), sequence_type, max_distance) <= max_distance def _is_phono_edit_distance_neighbor(w,", "None, stop_check = None, call_back = None): \"\"\"Calculate the neighborhood", "from corpustools.symbolsim.edit_distance import edit_distance from corpustools.symbolsim.khorsi import khorsi from corpustools.symbolsim.phono_edit_distance", "trans_delimiter='.', file_type=None): if isinstance(query, Word): query_word = query else: if", "None): \"\"\"Calculate the neighborhood density of a particular word in", "= corpus_context.specifier, sequence_type = corpus_context.sequence_type, max_distance = max_distance) elif algorithm", "[str(c) for c in sequence[:i]] + [str(char)] + [str(c) for", "whose neighborhood density to calculate. algorithm : str The algorithm", "= 0 al = Aligner(features_tf=False, ins_penalty=float('inf'), del_penalty=float('inf'), sub_penalty=1) for w", "for m in matches): continue else: #matches.append(str(w_sequence)) matches.append(w) matches =", "last_key_removed = None if num_cores == -1 or num_cores ==", "= neighborhood_density(corpus_context, w, tierdict, tier_type = tier_type, sequence_type = sequence_type,", "to supply progress information during the function settable_attr: string Name", "corpus_context.specifier, sequence_type = corpus_context.sequence_type, max_distance = max_distance) elif algorithm ==", "word a neighbor. stop_check : callable, optional Optional function to", "in corpus_context) neighbors = score_mp(iterable, function, num_cores, call_back, stop_check, chunk_size", "False if w_len < query_len-max_distance: return False return edit_distance(getattr(w, sequence_type),", "cur = 0 al = Aligner(features_tf=False, ins_penalty=float('inf'), del_penalty=float('inf'), sub_penalty=1) for", "return khorsi(getattr(w, sequence_type), getattr(query, sequence_type), freq_base, sequence_type, max_distance) >= max_distance", "the neighbourhood density algorithm else: iterable = ((w,) for w", "max_distance def neighborhood_density_all_words(corpus_context, tierdict, tier_type = None, sequence_type = None,", "faster than neighborhood_density when: n > m * (1 +", "= corpus_context.sequence_type, max_distance = max_distance) elif algorithm == 'phono_edit_distance': is_neighbor", "own neighbour, and this is incorrect # #subtracting 1 here", "output_format = 'spelling', num_cores = -1, settable_attr = None, collapse_homophones", "1 neighborhoods stop_check : callable, optional Optional function to check", "partial(_is_phono_edit_distance_neighbor, specifier = corpus_context.specifier, sequence_type = corpus_context.sequence_type, max_distance = max_distance)", "is not None: call_back('Finding neighbors...') call_back(0,len(corpus_context)) cur = 0 al", "0 al = Aligner(features_tf=False, ins_penalty=float('inf'), del_penalty=float('inf'), sub_penalty=1) for w in", "num_cores, call_back, stop_check, chunk_size = 1) for n in neighbors:", "= Word(**{sequence_type: list(new_query)}) return query_word def parse(word, delimiter): return word.split(delimiter)", "in corpus m: length of query s: size of segment", "neighborhood density of all words in the corpus and adds", "parse(query, trans_delimiter) query_word = Word(**{sequence_type: list(new_query)}) return query_word def parse(word,", "# if the word in the file can't be found", "[str(c) for c in sequence[:i]] + [str(c) for c in", "False, file_type = None, tier_type=None, sequence_type = None, stop_check =", "max_distance = max_distance, collapse_homophones = collapse_homophones) if call_back is not", "= [] query = ensure_query_is_word(query, corpus_context, corpus_context.sequence_type, tier_type) if call_back", "all neighbors of edit distance <= 1 and searches for", "n: number of words in corpus m: length of query", "for w in corpus_context: # if stop_check is not None", "density of a particular word in the corpus. Parameters ----------", "CorpusContext Context manager for a corpus algorithm : str The", "= collapse_homophones) if call_back is not None: call_back('Calculating neighborhood densities...')", "None, call_back = None): \"\"\"Find all minimal pairs of the", "sequence[i:]] # insertion yield [str(c) for c in sequence[:i]] +", "function to check whether to gracefully terminate early call_back :", "query_word = Word(**{sequence_type: list(query_word)}) elif tier_type.att_type == 'tier': if file_type", "res = function(w) # results[str(w)] = [getattr(r, output_format) for r", "not in ['#', sequence[i]]: yield [str(c) for c in sequence[:i]]", "in tierdict: for w in tierdict[cand_str]: w_sequence = getattr(w, sequence_type)", "file_type = None, tier_type=None, sequence_type = None, stop_check = None,", "stop_check is not None and stop_check(): return if last_value_removed: tierdict[last_key_removed].append(last_value_removed)", "matches = [m.spelling for m in matches] neighbors = list(set(matches)-set([str(query_sequence)]))", "continue w_sequence = getattr(w, sequence_type) if collapse_homophones and any(getattr(m, sequence_type)", "to account for the fact that words are counted as", "partial(_is_khorsi_neighbor, freq_base = freq_base, sequence_type = corpus_context.sequence_type, max_distance = max_distance)", "due to how #multiprocessing pickles objects setattr(corpus_context.corpus.find(corpus_context.corpus.key(n[0])), corpus_context.attribute.name, n[1][0]) return", "matches = [] query = ensure_query_is_word(query, corpus_context, corpus_context.sequence_type, tier_type) if", "file_type=None): if isinstance(query, Word): query_word = query else: if tier_type.att_type", "in matches] neighbors = list(set(matches)-set([str(query_sequence)])) return (len(neighbors), neighbors) def ensure_query_is_word(query,", "sequence_type = None, algorithm = 'edit_distance', max_distance = 1, output_format", "False, stop_check = None, call_back = None): \"\"\"Find all minimal", "neighbors...') call_back(0,len(corpus_context)) cur = 0 al = Aligner(features_tf=False, ins_penalty=float('inf'), del_penalty=float('inf'),", "number of neighbors and the set of neighbor Words. \"\"\"", "= score_mp(iterable, function, num_cores, call_back, stop_check, chunk_size = 1) for", "to get insertion at len+1 if str(char) not in ['#',", "return if call_back is not None: cur += 1 if", "freq_base, sequence_type = corpus_context.sequence_type, max_distance = max_distance) for w in", "queried word \"\"\" matches = [] sequence_type = corpus_context.sequence_type query", "yield [str(c) for c in sequence[:i]] + [str(char)] + [str(c)", "= Word(**{sequence_type: new_query}) else: # if file contains spelling try:", "a corpus algorithm : str The algorithm used to determine", "s: size of segment inventory \"\"\" neighbors = list() query", "str(w): last_value_removed = tierdict[last_key_removed].pop(i) break res = find_mutation_minpairs(corpus_context, w, tier_type=tier_type,", "collapse_homophones = False): \"\"\"Generates all neighbors of edit distance <=", "= None, call_back = None): function = partial(find_mutation_minpairs, corpus_context, tier_type=tier_type,", "Aligner(features_tf=False, ins_penalty=float('inf'), del_penalty=float('inf'), sub_penalty=1) for w in corpus_context: w_sequence =", "sequence_type) == w_sequence for m in matches): continue else: #matches.append(str(w_sequence))", "is_neighbor = partial(_is_khorsi_neighbor, freq_base = freq_base, sequence_type = corpus_context.sequence_type, max_distance", "setattr(w.original, corpus_context.attribute.name, res[0]) # for w in corpus_context: # if", "not None: call_back('Calculating neighborhood densities...') call_back(0,len(corpus_context)) cur = 0 results", "Word): query_word = query else: if tier_type.att_type == 'spelling': if", "to how #multiprocessing pickles objects setattr(corpus_context.corpus.find(corpus_context.corpus.key(n[0])), #corpus_context.attribute.name, n[1][0]) settable_attr.name, n[1][0])", "phono_edit_distance from corpustools.symbolsim.phono_align import Aligner from corpustools.multiproc import filter_mp, score_mp", "corpus_context) neighbors = score_mp(iterable, function, num_cores, call_back, stop_check, chunk_size= 1)", "freq_base, sequence_type, max_distance) >= max_distance def neighborhood_density_all_words(corpus_context, tierdict, tier_type =", "in corpus: corpus_word_with_td = str(getattr(entry, sequence_type)) if query_with̠td == corpus_word_with_td:", "the corpus new_query = parse(query, trans_delimiter) query_word = Word(**{sequence_type: list(new_query)})", "if str(item) == str(w): last_value_removed = tierdict[last_key_removed].pop(i) break res =", "1 and not force_quadratic: return fast_neighborhood_density(corpus_context, query, corpus_context.sequence_type, tier_type, tierdict,", "None, call_back = None): \"\"\"Calculate the neighborhood density of all", "results[str(w)] = [getattr(r, output_format) for r in res[1]] # setattr(w.original,", "no word found in corpus with the transcription new_query =", "be assigned to \"\"\" function = partial(neighborhood_density, corpus_context, tierdict =", "for w in corpus_context: w_sequence = getattr(w, sequence_type) query_sequence =", "sequence_type, specifier) <= max_distance def _is_khorsi_neighbor(w, query, freq_base, sequence_type, max_distance):", "collapse_homophones) if call_back is not None: call_back('Calculating neighborhood densities...') call_back(0,len(corpus_context))", "in corpus_context: if stop_check is not None and stop_check(): return", "file can't be found in the corpus new_query = parse(query,", "of the less efficient quadratic algorithm even when finding edit", "str(getattr(entry, sequence_type)) if query_with̠td == corpus_word_with_td: # if a word", "sequence_type = corpus_context.sequence_type query = ensure_query_is_word(query, corpus_context, corpus_context.sequence_type, tier_type) if", "def find_mutation_minpairs_all_words(corpus_context, tierdict, tier_type = None, num_cores = -1, collapse_homophones", "words. Parameters ---------- corpus_context : CorpusContext Context manager for a", "# that word in the corpus is to be referred", "if num_cores == -1 or num_cores == 1: for w", "score_mp(iterable, function, num_cores, call_back, stop_check, chunk_size= 1) for n in", "all minimal pairs of the query word based only on", "neighbors for {}...'.format(query)) call_back(0,len(corpus_context)) cur = 0 if algorithm ==", "in sequence[:i]] + [str(c) for c in sequence[i+1:]] # deletion", "= list(set(matches)-set([str(query_sequence)])) return (len(neighbors), neighbors) def ensure_query_is_word(query, corpus, sequence_type, tier_type,", "num_cores, call_back, stop_check, chunk_size= 1) for n in neighbors: #Have", "getattr(w, sequence_type) if collapse_homophones and any(getattr(word, sequence_type) == w_sequence for", "= None, num_cores = -1, collapse_homophones = False, stop_check =", "whose minimal pairs to find stop_check : callable or None", "algorithm, max_distance = max_distance, collapse_homophones = collapse_homophones) if call_back is", "and any(getattr(word, sequence_type) == w_sequence for word in neighbors): continue", "function to supply progress information during the function Returns -------", "output_format) for r in res[1]] # setattr(w.original, settable_attr.name, res[0]-1) #", "else: iterable = ((w,) for w in corpus_context) neighbors =", "the neighborhood density of a particular word in the corpus.", "if call_back is not None: call_back('Finding neighbors...') call_back(0,len(corpus_context)) cur =", "the object due to how #multiprocessing pickles objects setattr(corpus_context.corpus.find(corpus_context.corpus.key(n[0])), corpus_context.attribute.name,", "# if stop_check is not None and stop_check(): # return", "pairs to find stop_check : callable or None Optional function", "from the queried word to consider a word a neighbor.", "if file contains spelling try: query_word = corpus.corpus.find(query) except KeyError:", "neighbors.append(w) return (len(neighbors), neighbors) def generate_neighbor_candidates(corpus_context, query, sequence_type): sequence =", "== 'phono_edit_distance': is_neighbor = partial(_is_phono_edit_distance_neighbor, specifier = corpus_context.specifier, sequence_type =", "w_sequence) if m[-1][-1]['f'] != 1: continue w_sequence = getattr(w, sequence_type)", "in enumerate(tierdict[last_key_removed]): if str(item) == str(w): last_value_removed = tierdict[last_key_removed].pop(i) break", "0: call_back(cur) if (len(w_sequence) > len(query_sequence)+1 or len(w_sequence) < len(query_sequence)-1):", "for n in neighbors: #Have to look up the key,", "getattr(query, sequence_type), sequence_type, specifier) <= max_distance def _is_khorsi_neighbor(w, query, freq_base,", "partial(find_mutation_minpairs, corpus_context, tier_type=tier_type, collapse_homophones = collapse_homophones) if call_back is not", "minimal pairs to find stop_check : callable or None Optional", "neighborhood densities...') call_back(0,len(corpus_context)) cur = 0 results = dict() last_value_removed", "in res[1]] setattr(w.original, settable_attr.name, res[0]) # for w in corpus_context:", "\"\"\"Calculate the neighborhood density of all words in the corpus", "= ensure_query_is_word(query, corpus_context, corpus_context.sequence_type, tier_type) if call_back is not None:", "max_distance = max_distance) elif algorithm == 'khorsi': freq_base = corpus_context.get_frequency_base()", "trans_delimiter.join(candidate) else: cand_str = ''.join(candidate) if cand_str in tierdict: for", "force_quadratic : bool Force use of the less efficient quadratic", "# setattr(w.original, corpus_context.attribute.name, res[0]) else: iterable = ((w,) for w", "else query for entry in corpus: corpus_word_with_td = str(getattr(entry, sequence_type))", "query_len = len(getattr(query, sequence_type)) if w_len > query_len+max_distance: return False", "else: cand_str = ''.join(candidate) if cand_str in tierdict: for w", "continue m = al.make_similarity_matrix(query_sequence, w_sequence) if m[-1][-1]['f'] != 1: continue", "corpus_context : CorpusContext Context manager for a corpus algorithm :", "the less efficient quadratic algorithm even when finding edit distance", "if algorithm == 'edit_distance': is_neighbor = partial(_is_edit_distance_neighbor, sequence_type = corpus_context.sequence_type,", "m: length of query s: size of segment inventory \"\"\"", "sequence] for i in range(len(sequence)): yield [str(c) for c in", "find_mutation_minpairs(corpus_context, query, tier_type = None, collapse_homophones = False, stop_check =", "corpus_context, tier_type=tier_type, collapse_homophones = collapse_homophones) if call_back is not None:", "is_neighbor = partial(_is_edit_distance_neighbor, sequence_type = corpus_context.sequence_type, max_distance = max_distance) elif", "score_mp def _is_edit_distance_neighbor(w, query, sequence_type, max_distance): w_len = len(getattr(w, sequence_type))", "to consider a word a neighbor force_quadratic : bool Force", "then look up the object due to how #multiprocessing pickles", "res[1]] # setattr(w.original, settable_attr.name, res[0]-1) # #the -1 is to", "query_with̠td = '.'.join(query) if '.' not in query else query", "else: neighbors.append(w) return (len(neighbors), neighbors) def generate_neighbor_candidates(corpus_context, query, sequence_type): sequence", "to be referred to. # the following should be run", "# res = function(w) # results[str(w)] = [getattr(r, output_format) for", "the same transcription return entry # that word in the", "sequence_type, tier_type, file_type=file_type) for candidate in generate_neighbor_candidates(corpus_context, query, sequence_type): if", "None, collapse_homophones = False, stop_check = None, call_back = None):", "None: call_back('Finding neighbors...') call_back(0,len(corpus_context)) cur = 0 al = Aligner(features_tf=False,", "The word whose minimal pairs to find stop_check : callable", "[getattr(r, output_format) for r in res[1]] # setattr(w.original, settable_attr.name, res[0]-1)", "num_cores == -1 or num_cores == 1: for w in", "is to be referred to. # the following should be", "neighbour, and this is incorrect # #subtracting 1 here is", "of the number of neighbors and the set of neighbor", "distance <= 1 and searches for them in corpus_context. Will", "w_sequence = getattr(w, corpus_context.sequence_type) last_key_removed = str(w_sequence) for i, item", "m in matches): continue else: #matches.append(str(w_sequence)) matches.append(w) matches = [m.spelling", "to calculate. algorithm : str The algorithm used to determine", "key, then look up the object due to how #multiprocessing", "max_distance, collapse_homophones = collapse_homophones) if call_back is not None: call_back('Calculating", "i, item in enumerate(tierdict[last_key_removed]): if str(item) == str(w): last_value_removed =", "distance from the queried word to consider a word a", "= False, file_type = None, tier_type=None, sequence_type = None, stop_check", "'.'.join(query) if '.' not in query else query for entry", "n > m * (1 + s), where n: number", "res[1]#[str(r) for r in res[1]] # setattr(w.original, corpus_context.attribute.name, res[0]) else:", "be found in the corpus new_query = parse(query, trans_delimiter) query_word", "corpus_context.inventory: if str(char) not in ['#', sequence[i]]: yield [str(c) for", "query_sequence = getattr(query, sequence_type) if stop_check is not None and", "\"\"\" function = partial(neighborhood_density, corpus_context, tierdict = tierdict, tier_type =", "corpus is to be referred to. # the following should", "partial(neighborhood_density, corpus_context, tierdict = tierdict, tier_type = tier_type, sequence_type =", "---------- corpus_context : CorpusContext Context manager for a corpus query", "= corpus_context.sequence_type, max_distance = max_distance) elif algorithm == 'khorsi': freq_base", "== str(w): last_value_removed = tierdict[last_key_removed].pop(i) break res = neighborhood_density(corpus_context, w,", "stop_check(): return if call_back is not None: cur += 1", "query else: if tier_type.att_type == 'spelling': if file_type == sequence_type:", "for {}...'.format(query)) call_back(0,len(corpus_context)) cur = 0 if algorithm == 'edit_distance'", "str(char) not in ['#', sequence[i]]: yield [str(c) for c in", "return if last_value_removed: tierdict[last_key_removed].append(last_value_removed) w_sequence = getattr(w, corpus_context.sequence_type) last_key_removed =", "def ensure_query_is_word(query, corpus, sequence_type, tier_type, trans_delimiter='.', file_type=None): if isinstance(query, Word):", "new_query = parse(query, trans_delimiter) query_word = Word(**{sequence_type: new_query}) else: #", "to check whether to gracefully terminate early call_back : callable", "= 1) for n in neighbors: #Have to look up", "in corpus_context: w_sequence = getattr(w, sequence_type) query_sequence = getattr(query, sequence_type)", "trans_delimiter) query_word = Word(**{sequence_type: list(new_query)}) return query_word def parse(word, delimiter):", "sequence_type = None, stop_check = None, call_back = None): \"\"\"Calculate", "less efficient quadratic algorithm even when finding edit distance of", "and stop_check(): # return # cur += 1 # call_back(cur)", "algorithm == 'edit_distance': is_neighbor = partial(_is_edit_distance_neighbor, sequence_type = corpus_context.sequence_type, max_distance", "this is incorrect # #subtracting 1 here is easier than", "for word in neighbors): continue else: neighbors.append(w) return (len(neighbors), neighbors)", "KeyError: # if the word in the file can't be", "file_type=file_type) for candidate in generate_neighbor_candidates(corpus_context, query, sequence_type): if tier_type.att_type ==", "optional Maximum edit distance from the queried word to consider", "tierdict, tier_type = None, sequence_type = None, algorithm = 'edit_distance',", "density to calculate. algorithm : str The algorithm used to", "be referred to. # the following should be run if", "1) for n in neighbors: #Have to look up the", "s), where n: number of words in corpus m: length", "< query_len-max_distance: return False return edit_distance(getattr(w, sequence_type), getattr(query, sequence_type), sequence_type,", "if algorithm == 'edit_distance' and max_distance == 1 and not", "sequence_type), sequence_type, max_distance) <= max_distance def _is_phono_edit_distance_neighbor(w, query, sequence_type, specifier,", "0 if algorithm == 'edit_distance' and max_distance == 1 and", "query_word = Word(**{sequence_type: list(new_query)}) return query_word def parse(word, delimiter): return", "stop_check : callable or None Optional function to check whether", "'.' not in query else query for entry in corpus:", "sequence[:i]] + [str(char)] + [str(c) for c in sequence[i+1:]] #", "object due to how #multiprocessing pickles objects setattr(corpus_context.corpus.find(corpus_context.corpus.key(n[0])), #corpus_context.attribute.name, n[1][0])", "def find_mutation_minpairs(corpus_context, query, tier_type = None, collapse_homophones = False, stop_check", "setattr(corpus_context.corpus.find(corpus_context.corpus.key(n[0])), #corpus_context.attribute.name, n[1][0]) settable_attr.name, n[1][0]) return results def neighborhood_density(corpus_context, query,", "''.join(candidate) if cand_str in tierdict: for w in tierdict[cand_str]: w_sequence", "= None, collapse_homophones = False, stop_check = None, call_back =", "'edit_distance' and max_distance == 1 and not force_quadratic: return fast_neighborhood_density(corpus_context,", "= partial(neighborhood_density, corpus_context, tierdict = tierdict, tier_type = tier_type, sequence_type", "to supply progress information during the function Returns ------- list", "look up the object due to how #multiprocessing pickles objects", "in the corpus new_query = parse(query, trans_delimiter) query_word = Word(**{sequence_type:", "def neighborhood_density(corpus_context, query, tierdict, algorithm = 'edit_distance', max_distance = 1,", "import partial from corpustools.corpus.classes import Word from corpustools.symbolsim.edit_distance import edit_distance", "collapse_homophones and any(getattr(m, sequence_type) == w_sequence for m in matches):", "import Word from corpustools.symbolsim.edit_distance import edit_distance from corpustools.symbolsim.khorsi import khorsi", ": Word The word whose neighborhood density to calculate. algorithm", "res = function(w) # results[str(w)] = res[1]#[str(r) for r in", "the number of neighbors and the set of neighbor Words.", "if cur % 10 == 0: call_back(cur) if (len(w_sequence) >", "transcription return entry # that word in the corpus is", "collapse_homophones = collapse_homophones) if call_back is not None: call_back('Calculating neighborhood", "than neighborhood_density when: n > m * (1 + s),", "list(set(matches)-set([str(query_sequence)])) return (len(neighbors), neighbors) def ensure_query_is_word(query, corpus, sequence_type, tier_type, trans_delimiter='.',", "setattr(w.original, corpus_context.attribute.name, res[0]) else: iterable = ((w,) for w in", "for r in res[1]] # setattr(w.original, settable_attr.name, res[0]-1) # #the", "# if file contains spelling try: query_word = corpus.corpus.find(query) except", "list() query = ensure_query_is_word(query, corpus_context, sequence_type, tier_type, file_type=file_type) for candidate", "return entry # that word in the corpus is to", "the corpus and adds them as attributes of the words.", "settable_attr.name, n[1][0]) return results def neighborhood_density(corpus_context, query, tierdict, algorithm =", "progress information during the function Returns ------- tuple(int, set) Tuple", "query_word def parse(word, delimiter): return word.split(delimiter) if delimiter in word", "call_back(cur) if not is_neighbor(w, query): continue matches.append(w) neighbors = set(matches)-set([query])", "and any(getattr(m, sequence_type) == w_sequence for m in matches): continue", "or None Optional function to supply progress information during the", "neighbors): continue else: neighbors.append(w) return (len(neighbors), neighbors) def generate_neighbor_candidates(corpus_context, query,", "getattr(query, sequence_type), sequence_type, max_distance) <= max_distance def _is_phono_edit_distance_neighbor(w, query, sequence_type,", "callable, optional Optional function to check whether to gracefully terminate", "specifier = corpus_context.specifier, sequence_type = corpus_context.sequence_type, max_distance = max_distance) elif", "for a corpus query : Word The word whose minimal", "item in enumerate(tierdict[last_key_removed]): if str(item) == str(w): last_value_removed = tierdict[last_key_removed].pop(i)", "tier_type = tier_type, sequence_type = sequence_type, algorithm = algorithm, max_distance", "== 0: call_back(cur) if (len(w_sequence) > len(query_sequence)+1 or len(w_sequence) <", "sequence = getattr(query, sequence_type) yield [str(c) for c in sequence]", "progress information during the function Returns ------- list The found", "'tier': if file_type == sequence_type: query_with̠td = '.'.join(query) if '.'", "len(getattr(query, sequence_type)) if w_len > query_len+max_distance: return False if w_len", "= None): \"\"\"Calculate the neighborhood density of a particular word", "be faster than neighborhood_density when: n > m * (1", "1 if cur % 10 == 0: call_back(cur) if (len(w_sequence)", "in neighbors: #Have to look up the key, then look", "== str(w): last_value_removed = tierdict[last_key_removed].pop(i) break res = find_mutation_minpairs(corpus_context, w,", "algorithm = 'edit_distance', max_distance = 1, collapse_homophones = False, force_quadratic", "Word(**{sequence_type: list(query_word)}) elif tier_type.att_type == 'tier': if file_type == sequence_type:", "collapse_homophones=collapse_homophones) if algorithm == 'edit_distance': is_neighbor = partial(_is_edit_distance_neighbor, sequence_type =", "[str(c) for c in sequence[i+1:]] # substitution for char in", "'spelling', num_cores = -1, settable_attr = None, collapse_homophones = False,", "setattr(w.original, settable_attr.name, res[0]-1) # #the -1 is to account for", "for c in sequence] for i in range(len(sequence)): yield [str(c)", "break res = find_mutation_minpairs(corpus_context, w, tier_type=tier_type, collapse_homophones = collapse_homophones) results[str(w)]", "max_distance) elif algorithm == 'khorsi': freq_base = corpus_context.get_frequency_base() is_neighbor =", "tierdict = tierdict, tier_type = tier_type, sequence_type = sequence_type, algorithm", "del_penalty=float('inf'), sub_penalty=1) for w in corpus_context: w_sequence = getattr(w, sequence_type)", "words are counted as their own neighbour, and this is", "queried word to consider a word a neighbor. stop_check :", "None and stop_check(): return if call_back is not None: cur", "callable or None Optional function to check whether to gracefully", "stop_check : callable, optional Optional function to check whether to", "edit distance of 1 neighborhoods stop_check : callable, optional Optional", "call_back is not None: cur += 1 if cur %", "r in res[1]] # setattr(w.original, settable_attr.name, res[0]-1) # #the -1", "bool Force use of the less efficient quadratic algorithm even", "w_sequence = getattr(w, sequence_type) if collapse_homophones and any(getattr(m, sequence_type) ==", "Word from corpustools.symbolsim.edit_distance import edit_distance from corpustools.symbolsim.khorsi import khorsi from", "getattr(w, sequence_type) if collapse_homophones and any(getattr(m, sequence_type) == w_sequence for", "how #multiprocessing pickles objects setattr(corpus_context.corpus.find(corpus_context.corpus.key(n[0])), corpus_context.attribute.name, n[1][0]) return results def", "+ [str(c) for c in sequence[i:]] # insertion yield [str(c)", "minimal pairs of the query word based only on segment", "else: if tier_type.att_type == 'spelling': if file_type == sequence_type: query_word", "the following should be run if no word found in", "of all words in the corpus and adds them as", "deletions/insertions) Parameters ---------- corpus_context : CorpusContext Context manager for a", "Word(**{sequence_type: new_query}) else: # if file contains spelling try: query_word", "has the same transcription return entry # that word in", "in the file can't be found in the corpus new_query", "continue else: neighbors.append(w) return (len(neighbors), neighbors) def generate_neighbor_candidates(corpus_context, query, sequence_type):", "word in the file can't be found in the corpus", "Aligner from corpustools.multiproc import filter_mp, score_mp def _is_edit_distance_neighbor(w, query, sequence_type,", "them as attributes of the words. Parameters ---------- corpus_context :", "sequence_type) if collapse_homophones and any(getattr(m, sequence_type) == w_sequence for m", "= False, stop_check = None, call_back = None): function =", "w, tier_type=tier_type, collapse_homophones = collapse_homophones) results[str(w)] = res[1] setattr(w.original, corpus_context.attribute.name,", "= partial(find_mutation_minpairs, corpus_context, tier_type=tier_type, collapse_homophones = collapse_homophones) if call_back is", "new_query}) else: # if file contains spelling try: query_word =", "collapse_homophones) results[str(w)] = res[1] setattr(w.original, corpus_context.attribute.name, res[0]) # for w", "call_back(0,len(corpus_context)) cur = 0 results = dict() last_value_removed = None", "query, tierdict, algorithm = 'edit_distance', max_distance = 1, collapse_homophones =", "collapse_homophones = False, stop_check = None, call_back = None): \"\"\"Find", "'khorsi': freq_base = corpus_context.get_frequency_base() is_neighbor = partial(_is_khorsi_neighbor, freq_base = freq_base,", "al = Aligner(features_tf=False, ins_penalty=float('inf'), del_penalty=float('inf'), sub_penalty=1) for w in corpus_context:", "neighbors) def generate_neighbor_candidates(corpus_context, query, sequence_type): sequence = getattr(query, sequence_type) yield", "minimal pairs for the queried word \"\"\" matches = []", "+ s), where n: number of words in corpus m:", "setattr(corpus_context.corpus.find(corpus_context.corpus.key(n[0])), corpus_context.attribute.name, n[1][0]) return results def find_mutation_minpairs(corpus_context, query, tier_type =", "corpus_context.sequence_type) last_key_removed = str(w_sequence) for i, item in enumerate(tierdict[last_key_removed]): if", "tierdict, tier_type = None, num_cores = -1, collapse_homophones = False,", "if call_back is not None: call_back('Finding neighbors for {}...'.format(query)) call_back(0,len(corpus_context))", "query, sequence_type, tier_type, tierdict, file_type=None, trans_delimiter='.', collapse_homophones = False): \"\"\"Generates", "= max_distance, collapse_homophones = collapse_homophones) results[str(w)] = [getattr(r, output_format) for", "corpustools.symbolsim.khorsi import khorsi from corpustools.symbolsim.phono_edit_distance import phono_edit_distance from corpustools.symbolsim.phono_align import", "corpus_word_with_td = str(getattr(entry, sequence_type)) if query_with̠td == corpus_word_with_td: # if", "1 if cur % 10 == 0: call_back(cur) if not", "# if a word in corpus has the same transcription", "in corpus_context. Will be faster than neighborhood_density when: n >", "1: continue w_sequence = getattr(w, sequence_type) if collapse_homophones and any(getattr(m,", "matches.append(w) matches = [m.spelling for m in matches] neighbors =", "for the fact that words are counted as their own", "if no word found in corpus with the transcription new_query", "sequence[:i]] + [str(c) for c in sequence[i+1:]] # deletion for", "corpustools.multiproc import filter_mp, score_mp def _is_edit_distance_neighbor(w, query, sequence_type, max_distance): w_len", "= [getattr(r, output_format) for r in res[1]] setattr(w.original, settable_attr.name, res[0])", "and this is incorrect # #subtracting 1 here is easier", "getattr(query, sequence_type), freq_base, sequence_type, max_distance) >= max_distance def neighborhood_density_all_words(corpus_context, tierdict,", "\"\"\"Find all minimal pairs of the query word based only", "neighborhood_density(corpus_context, query, tierdict, algorithm = 'edit_distance', max_distance = 1, collapse_homophones", "file_type=file_type, collapse_homophones=collapse_homophones) if algorithm == 'edit_distance': is_neighbor = partial(_is_edit_distance_neighbor, sequence_type", "max_distance): return phono_edit_distance(getattr(w, sequence_type), getattr(query, sequence_type), sequence_type, specifier) <= max_distance", "during the function Returns ------- tuple(int, set) Tuple of the", "corpus algorithm : str The algorithm used to determine distance", "query.replace(trans_delimiter, '') query_word = Word(**{sequence_type: list(query_word)}) elif tier_type.att_type == 'tier':", "-1, settable_attr = None, collapse_homophones = False, stop_check = None,", "pairs of the query word based only on segment mutations", "sequence_type)) if w_len > query_len+max_distance: return False if w_len <", "a word a neighbor. stop_check : callable, optional Optional function", "c in sequence[:i]] + [str(c) for c in sequence[i+1:]] #", "is to account for the fact that words are counted", "None if num_cores == -1 or num_cores == 1: for", "None and stop_check(): # return # cur += 1 #", "c in sequence[i+1:]] # deletion for char in corpus_context.inventory: if", "algorithm, max_distance = max_distance, collapse_homophones = collapse_homophones) results[str(w)] = [getattr(r,", "set(matches)-set([query]) return (len(neighbors), neighbors) def fast_neighborhood_density(corpus_context, query, sequence_type, tier_type, tierdict,", "function to supply progress information during the function settable_attr: string", "= False, stop_check = None, call_back = None): \"\"\"Calculate the", "corpus_context.sequence_type, tier_type) if call_back is not None: call_back('Finding neighbors for", "None): function = partial(find_mutation_minpairs, corpus_context, tier_type=tier_type, collapse_homophones = collapse_homophones) if", "+ [str(c) for c in sequence[i+1:]] # substitution for char", "Optional function to check whether to gracefully terminate early call_back", "list The found minimal pairs for the queried word \"\"\"", "algorithm = algorithm, max_distance = max_distance, collapse_homophones = collapse_homophones) results[str(w)]", "word in the corpus. Parameters ---------- corpus_context : CorpusContext Context", "call_back(cur) # res = function(w) # results[str(w)] = [getattr(r, output_format)", "== w_sequence for m in matches): continue else: #matches.append(str(w_sequence)) matches.append(w)", "query, sequence_type, specifier, max_distance): return phono_edit_distance(getattr(w, sequence_type), getattr(query, sequence_type), sequence_type,", "== 'khorsi': freq_base = corpus_context.get_frequency_base() is_neighbor = partial(_is_khorsi_neighbor, freq_base =", "information during the function Returns ------- list The found minimal", "not None: call_back('Finding neighbors...') call_back(0,len(corpus_context)) cur = 0 al =", "n[1][0]) settable_attr.name, n[1][0]) return results def neighborhood_density(corpus_context, query, tierdict, algorithm", "try: query_word = corpus.corpus.find(query) except KeyError: # if the word", "def parse(word, delimiter): return word.split(delimiter) if delimiter in word else", "return results def find_mutation_minpairs(corpus_context, query, tier_type = None, collapse_homophones =", "if cand_str in tierdict: for w in tierdict[cand_str]: w_sequence =", "cand_str in tierdict: for w in tierdict[cand_str]: w_sequence = getattr(w,", "call_back, stop_check, chunk_size= 1) for n in neighbors: #Have to", "= False, force_quadratic = False, file_type = None, tier_type=None, sequence_type", "to gracefully terminate early call_back : callable, optional Optional function", "len(query_sequence)-1): continue m = al.make_similarity_matrix(query_sequence, w_sequence) if m[-1][-1]['f'] != 1:", "def neighborhood_density_all_words(corpus_context, tierdict, tier_type = None, sequence_type = None, algorithm", "collapse_homophones = collapse_homophones) results[str(w)] = [getattr(r, output_format) for r in", "for the queried word \"\"\" matches = [] sequence_type =", "tier_type = None, collapse_homophones = False, stop_check = None, call_back", "import Aligner from corpustools.multiproc import filter_mp, score_mp def _is_edit_distance_neighbor(w, query,", "call_back('Calculating neighborhood densities...') call_back(0,len(corpus_context)) cur = 0 results = dict()", "call_back = None): \"\"\"Find all minimal pairs of the query", "= function(w) # results[str(w)] = res[1]#[str(r) for r in res[1]]", "find stop_check : callable or None Optional function to check", "for c in sequence[:]] + [str(char)] # insertion def find_mutation_minpairs_all_words(corpus_context,", "not None and stop_check(): # return # cur += 1", "for m in matches] neighbors = list(set(matches)-set([str(query_sequence)])) return (len(neighbors), neighbors)", "\"\"\" matches = [] sequence_type = corpus_context.sequence_type query = ensure_query_is_word(query,", "corpus_context.attribute.name, res[0]) # for w in corpus_context: # if stop_check", "a word a neighbor force_quadratic : bool Force use of", "in ['#', sequence[i]]: yield [str(c) for c in sequence[:]] +", "when finding edit distance of 1 neighborhoods stop_check : callable,", "-1 is to account for the fact that words are", "None, num_cores = -1, collapse_homophones = False, stop_check = None,", "incorrect # #subtracting 1 here is easier than fixing the", "max_distance) elif algorithm == 'phono_edit_distance': is_neighbor = partial(_is_phono_edit_distance_neighbor, specifier =", "else: # if file contains spelling try: query_word = corpus.corpus.find(query)", "corpus_context.sequence_type, max_distance = max_distance) elif algorithm == 'khorsi': freq_base =", "chunk_size = 1) for n in neighbors: #Have to look", "edit_distance from corpustools.symbolsim.khorsi import khorsi from corpustools.symbolsim.phono_edit_distance import phono_edit_distance from", "call_back(cur) # res = function(w) # results[str(w)] = res[1]#[str(r) for", "distance max_distance : float, optional Maximum edit distance from the", "tierdict, file_type=file_type, collapse_homophones=collapse_homophones) if algorithm == 'edit_distance': is_neighbor = partial(_is_edit_distance_neighbor,", "will be assigned to \"\"\" function = partial(neighborhood_density, corpus_context, tierdict", "for w in corpus_context) neighbors = score_mp(iterable, function, num_cores, call_back,", "to consider a word a neighbor. stop_check : callable, optional", "max_distance = max_distance) elif algorithm == 'phono_edit_distance': is_neighbor = partial(_is_phono_edit_distance_neighbor,", "= None): \"\"\"Calculate the neighborhood density of all words in", "word whose neighborhood density to calculate. algorithm : str The", "the word in the file can't be found in the", "= find_mutation_minpairs(corpus_context, w, tier_type=tier_type, collapse_homophones = collapse_homophones) results[str(w)] = res[1]", "corpus_context, corpus_context.sequence_type, tier_type) if call_back is not None: call_back('Finding neighbors", "['#', sequence[i]]: yield [str(c) for c in sequence[:i]] + [str(char)]", "ins_penalty=float('inf'), del_penalty=float('inf'), sub_penalty=1) for w in corpus_context: w_sequence = getattr(w,", "== 1 and not force_quadratic: return fast_neighborhood_density(corpus_context, query, corpus_context.sequence_type, tier_type," ]
[ "from django.conf import settings from django.db import migrations class Migration(migrations.Migration):", "Django 2.1.1 on 2018-11-06 17:19 from django.conf import settings from", "2018-11-06 17:19 from django.conf import settings from django.db import migrations", "2.1.1 on 2018-11-06 17:19 from django.conf import settings from django.db", "'0002_auto_20181106_1723'), ] operations = [ migrations.AlterUniqueTogether( name='habit', unique_together={('owner', 'name')}, ),", "on 2018-11-06 17:19 from django.conf import settings from django.db import", "class Migration(migrations.Migration): dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ('brokenChains', '0002_auto_20181106_1723'), ] operations", "settings from django.db import migrations class Migration(migrations.Migration): dependencies = [", "<gh_stars>1-10 # Generated by Django 2.1.1 on 2018-11-06 17:19 from", "[ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ('brokenChains', '0002_auto_20181106_1723'), ] operations = [ migrations.AlterUniqueTogether( name='habit',", "import settings from django.db import migrations class Migration(migrations.Migration): dependencies =", "by Django 2.1.1 on 2018-11-06 17:19 from django.conf import settings", "Migration(migrations.Migration): dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ('brokenChains', '0002_auto_20181106_1723'), ] operations =", "= [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ('brokenChains', '0002_auto_20181106_1723'), ] operations = [ migrations.AlterUniqueTogether(", "17:19 from django.conf import settings from django.db import migrations class", "django.db import migrations class Migration(migrations.Migration): dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ('brokenChains',", "migrations class Migration(migrations.Migration): dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ('brokenChains', '0002_auto_20181106_1723'), ]", "Generated by Django 2.1.1 on 2018-11-06 17:19 from django.conf import", "django.conf import settings from django.db import migrations class Migration(migrations.Migration): dependencies", "migrations.swappable_dependency(settings.AUTH_USER_MODEL), ('brokenChains', '0002_auto_20181106_1723'), ] operations = [ migrations.AlterUniqueTogether( name='habit', unique_together={('owner',", "] operations = [ migrations.AlterUniqueTogether( name='habit', unique_together={('owner', 'name')}, ), ]", "from django.db import migrations class Migration(migrations.Migration): dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL),", "('brokenChains', '0002_auto_20181106_1723'), ] operations = [ migrations.AlterUniqueTogether( name='habit', unique_together={('owner', 'name')},", "# Generated by Django 2.1.1 on 2018-11-06 17:19 from django.conf", "import migrations class Migration(migrations.Migration): dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ('brokenChains', '0002_auto_20181106_1723'),", "dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ('brokenChains', '0002_auto_20181106_1723'), ] operations = [" ]
[ "in range(len(Q)): idx = np.unravel_index(s, dims) policy[idx] = moves[np.argmax(Q[s])] if", "= np.array([[1, 0], [0.5, 0.5], [1,1]]) tri = [left, down,", "= None if np.random.uniform(0, 1) < epsilon: action = np.random.randint(env.action_space.n)", "s = env.reset() done = False while not done: a", "Q = sarsa(env) plot_V(Q, env) plot_Q(Q, env) print_policy(Q, env) plt.show()", "right = np.array([[1, 0], [0.5, 0.5], [1,1]]) tri = [left,", "done: a = choose_abs_greedy_action(s, Q, epsilon) s_, r, done, _", "[0.5, 0.5], [1,1]]) tri = [left, down, right, up] pos", "color=cmap(.0))) plt.text(y+0.5, dims[0]-x-0.5, '{:.2f}'.format(.0), horizontalalignment='center', verticalalignment='center') continue for a in", "3-x]), color=cmap(Q[s][a]))) plt.text(y+pos[a][0], dims[0]-1-x+pos[a][1], '{:.2f}'.format(Q[s][a]), horizontalalignment='center', verticalalignment='center', fontsize=9, fontweight=('bold' if", "u'·' print('\\n'.join([''.join([u'{:2}'.format(item) for item in row]) for row in policy]))", "Q using Q learning Q[s, a] = Q[s, a] +", "policy = np.chararray(dims, unicode=True) policy[:] = ' ' for s", "import matplotlib.pyplot as plt def print_policy(Q, env): \"\"\" This is", "using sarsa Q[s, a] = Q[s, a] + alpha *", "r, done, _ = env.step(a) a_ = choose_abs_greedy_action(s_, Q, epsilon)", "the sarsa algorithm # This is some starting point performing", "_ = env.step(a) #update Q using Q learning Q[s, a]", "= idx if env.desc[idx] in ['H', 'G']: ax.add_patch(patches.Rectangle((y, 3-x), 1,", "gamma=0.9, epsilon=0.1, num_ep=int(1e4)): #Q = np.zeros((env.observation_space.n, env.action_space.n)) Q = np.random.rand(env.observation_space.n,", "map_name=\"8x8\") print(\"Running sarsa...\") Q = sarsa(env) plot_V(Q, env) plot_Q(Q, env)", "+ alpha * (r + (gamma * Q[s_,a_]) - Q[s,a])", "env=gym.make('FrozenLake-v0') #env=gym.make('FrozenLake-v0', is_slippery=False) #env=gym.make('FrozenLake-v0', map_name=\"8x8\") print(\"Running sarsa...\") Q = sarsa(env)", "for s in range(len(Q)): idx = np.unravel_index(s, dims) policy[idx] =", "np.random.randint(env.action_space.n) else: action = np.argmax(Q[state,:]) return action def max_action_state(state, Q):", "dims) policy[idx] = moves[np.argmax(Q[s])] if env.desc[idx] in ['H', 'G']: policy[idx]", "fig = plt.figure() ax = fig.gca() if not hasattr(env, 'desc'):", "qlearning algorithm for i in range(num_ep): s = env.reset() done", "Q[s, a] = Q[s, a] + alpha * (r+ (", "plt.imshow(V, origin='upper', extent=[0,dims[0],0,dims[1]], vmin=.0, vmax=.6, cmap=plt.cm.RdYlGn, interpolation='none') for x, y", "idx = np.unravel_index(s, dims) V[idx] = np.max(Q[s]) if env.desc[idx] in", "a_ = choose_abs_greedy_action(s_, Q, epsilon) #update Q using sarsa Q[s,", "dims = env.desc.shape V = np.zeros(dims) for s in range(len(Q)):", "plot_V(Q, env) plot_Q(Q, env) print_policy(Q, env) plt.show() print(\"Running qlearning\") Q", "= np.random.randint(env.action_space.n) else: action = np.argmax(Q[state,:]) return action def max_action_state(state,", "hasattr(env, 'desc'): env = env.env dims = env.desc.shape up =", "V[idx] = np.max(Q[s]) if env.desc[idx] in ['H', 'G']: V[idx] =", "[0,1]]) right = np.array([[1, 0], [0.5, 0.5], [1,1]]) tri =", "# TODO: implement the sarsa algorithm # This is some", "Q[s][a] == np.max(Q[s]) else 'normal')) plt.xticks([]) plt.yticks([]) def choose_abs_greedy_action(state, Q,", "#env=gym.make('FrozenLake-v0', map_name=\"8x8\") print(\"Running sarsa...\") Q = sarsa(env) plot_V(Q, env) plot_Q(Q,", "color='black', linestyle='-', linewidth=2) for s in range(len(Q)): idx = np.unravel_index(s,", "as np from itertools import product import matplotlib.pyplot as plt", "[0.8, 0.5], [0.5, 0.8]] cmap = plt.cm.RdYlGn norm = colors.Normalize(vmin=.0,vmax=.6)", "env = env.env dims = env.desc.shape policy = np.chararray(dims, unicode=True)", "policy])) def plot_V(Q, env): \"\"\" This is a helper function", "in ['H', 'G']: policy[idx] = u'·' print('\\n'.join([''.join([u'{:2}'.format(item) for item in", "dims = env.desc.shape up = np.array([[0, 1], [0.5, 0.5], [1,1]])", "np.array([y, 3-x]), color=cmap(Q[s][a]))) plt.text(y+pos[a][0], dims[0]-1-x+pos[a][1], '{:.2f}'.format(Q[s][a]), horizontalalignment='center', verticalalignment='center', fontsize=9, fontweight=('bold'", "to plot the Q function \"\"\" from matplotlib import colors,", "dims[0]-1-x+pos[a][1], '{:.2f}'.format(Q[s][a]), horizontalalignment='center', verticalalignment='center', fontsize=9, fontweight=('bold' if Q[s][a] == np.max(Q[s])", "is some starting point performing random walks in the environment:", "idx = np.unravel_index(s, dims) policy[idx] = moves[np.argmax(Q[s])] if env.desc[idx] in", "u'↓',u'→', u'↑'] if not hasattr(env, 'desc'): env = env.env dims", "matplotlib.pyplot as plt def print_policy(Q, env): \"\"\" This is a", "x, y = idx if env.desc[idx] in ['H', 'G']: ax.add_patch(patches.Rectangle((y,", "0], [0.5, 0.5], [0,1]]) right = np.array([[1, 0], [0.5, 0.5],", "3-x), 1, 1, color=cmap(.0))) plt.text(y+0.5, dims[0]-x-0.5, '{:.2f}'.format(.0), horizontalalignment='center', verticalalignment='center') continue", "np.array([[1, 0], [0.5, 0.5], [1,1]]) tri = [left, down, right,", "if env.desc[idx] in ['H', 'G']: policy[idx] = u'·' print('\\n'.join([''.join([u'{:2}'.format(item) for", "= False a = choose_abs_greedy_action(s, Q, epsilon) while not done:", "a] + alpha * (r + (gamma * Q[s_,a_]) -", "= env.desc.shape policy = np.chararray(dims, unicode=True) policy[:] = ' '", "to plot the state values from the Q function\"\"\" fig", "TODO: implement the sarsa algorithm # This is some starting", "Q, epsilon) while not done: s_, r, done, _ =", "1], [0.5, 0.5], [1,1]]) down = np.array([[0, 0], [0.5, 0.5],", "s_ a = a_ return Q def qlearning(env, alpha=0.1, gamma=0.9,", "a] = Q[s, a] + alpha * (r + (gamma", "epsilon) s_, r, done, _ = env.step(a) #update Q using", "in ['H', 'G']: ax.add_patch(patches.Rectangle((y, 3-x), 1, 1, color=cmap(.0))) plt.text(y+0.5, dims[0]-x-0.5,", "= np.array([[0, 0], [0.5, 0.5], [1,0]]) left = np.array([[0, 0],", "V[idx] = 0. plt.imshow(V, origin='upper', extent=[0,dims[0],0,dims[1]], vmin=.0, vmax=.6, cmap=plt.cm.RdYlGn, interpolation='none')", "= np.argmax(Q[state,:]) return action def max_action_state(state, Q): action = np.argmax(Q[state,:])", "not hasattr(env, 'desc'): env = env.env dims = env.desc.shape V", "choose_abs_greedy_action(state, Q, epsilon): action = None if np.random.uniform(0, 1) <", "for item in row]) for row in policy])) def plot_V(Q,", "s in range(len(Q)): idx = np.unravel_index(s, dims) V[idx] = np.max(Q[s])", "plt.text(y+0.5, dims[0]-x-0.5, '{:.3f}'.format(V[x,y]), horizontalalignment='center', verticalalignment='center') plt.xticks([]) plt.yticks([]) def plot_Q(Q, env):", "vmin=.0, vmax=.6, cmap=plt.cm.RdYlGn, interpolation='none') for x, y in product(range(dims[0]), range(dims[1])):", "environment: for i in range(num_ep): s = env.reset() done =", "learning Q[s, a] = Q[s, a] + alpha * (r+", "plt.figure() if not hasattr(env, 'desc'): env = env.env dims =", "Q[s,a] ) s = s_ return Q env=gym.make('FrozenLake-v0') #env=gym.make('FrozenLake-v0', is_slippery=False)", "= env.env dims = env.desc.shape up = np.array([[0, 1], [0.5,", "action = np.random.randint(env.action_space.n) else: action = np.argmax(Q[state,:]) return action def", "sarsa algorithm # This is some starting point performing random", "sarsa(env) plot_V(Q, env) plot_Q(Q, env) print_policy(Q, env) plt.show() print(\"Running qlearning\")", "qlearning\") Q = qlearning(env) plot_V(Q, env) plot_Q(Q, env) print_policy(Q, env)", "state values from the Q function\"\"\" fig = plt.figure() if", "Q = qlearning(env) plot_V(Q, env) plot_Q(Q, env) print_policy(Q, env) plt.show()", "vmax=.6, cmap=plt.cm.RdYlGn, interpolation='none') for x, y in product(range(dims[0]), range(dims[1])): plt.text(y+0.5,", "a = choose_abs_greedy_action(s, Q, epsilon) s_, r, done, _ =", "is a helper function to plot the state values from", "def sarsa(env, alpha=0.1, gamma=0.9, epsilon=0.1, num_ep=int(1e4)): #Q = np.zeros((env.observation_space.n, env.action_space.n))", "if not hasattr(env, 'desc'): env = env.env dims = env.desc.shape", "while not done: a = choose_abs_greedy_action(s, Q, epsilon) s_, r,", "env.desc.shape policy = np.chararray(dims, unicode=True) policy[:] = ' ' for", "print_policy(Q, env) plt.show() print(\"Running qlearning\") Q = qlearning(env) plot_V(Q, env)", "0.5], [1,0]]) left = np.array([[0, 0], [0.5, 0.5], [0,1]]) right", "Q[state, action] def sarsa(env, alpha=0.1, gamma=0.9, epsilon=0.1, num_ep=int(1e4)): #Q =", "is a helper function to plot the Q function \"\"\"", "from itertools import product import matplotlib.pyplot as plt def print_policy(Q,", "not done: a = choose_abs_greedy_action(s, Q, epsilon) s_, r, done,", "0.8]] cmap = plt.cm.RdYlGn norm = colors.Normalize(vmin=.0,vmax=.6) ax.imshow(np.zeros(dims), origin='upper', extent=[0,dims[0],0,dims[1]],", "['H', 'G']: ax.add_patch(patches.Rectangle((y, 3-x), 1, 1, color=cmap(.0))) plt.text(y+0.5, dims[0]-x-0.5, '{:.2f}'.format(.0),", "function to plot the Q function \"\"\" from matplotlib import", "s in range(len(Q)): idx = np.unravel_index(s, dims) policy[idx] = moves[np.argmax(Q[s])]", "#update Q using Q learning Q[s, a] = Q[s, a]", "range(len(Q)): idx = np.unravel_index(s, dims) x, y = idx if", "for i in range(num_ep): s = env.reset() done = False", "= plt.cm.RdYlGn norm = colors.Normalize(vmin=.0,vmax=.6) ax.imshow(np.zeros(dims), origin='upper', extent=[0,dims[0],0,dims[1]], vmin=.0, vmax=.6,", "for s in range(len(Q)): idx = np.unravel_index(s, dims) x, y", "import colors, patches fig = plt.figure() ax = fig.gca() if", "cmap=cmap) ax.grid(which='major', color='black', linestyle='-', linewidth=2) for s in range(len(Q)): idx", "Q function \"\"\" from matplotlib import colors, patches fig =", "for x, y in product(range(dims[0]), range(dims[1])): plt.text(y+0.5, dims[0]-x-0.5, '{:.3f}'.format(V[x,y]), horizontalalignment='center',", "in product(range(dims[0]), range(dims[1])): plt.text(y+0.5, dims[0]-x-0.5, '{:.3f}'.format(V[x,y]), horizontalalignment='center', verticalalignment='center') plt.xticks([]) plt.yticks([])", "0], [0.5, 0.5], [1,0]]) left = np.array([[0, 0], [0.5, 0.5],", "0.5], [0,1]]) right = np.array([[1, 0], [0.5, 0.5], [1,1]]) tri", "def plot_V(Q, env): \"\"\" This is a helper function to", "[1,1]]) down = np.array([[0, 0], [0.5, 0.5], [1,0]]) left =", "[0.5, 0.5], [0,1]]) right = np.array([[1, 0], [0.5, 0.5], [1,1]])", "[[0.2, 0.5], [0.5, 0.2], [0.8, 0.5], [0.5, 0.8]] cmap =", "= np.argmax(Q[state,:]) return Q[state, action] def sarsa(env, alpha=0.1, gamma=0.9, epsilon=0.1,", "max_action_state(s_, Q)) - Q[s,a] ) s = s_ return Q", "Q[s_,a_]) - Q[s,a]) s = s_ a = a_ return", "action = np.argmax(Q[state,:]) return Q[state, action] def sarsa(env, alpha=0.1, gamma=0.9,", "'desc'): env = env.env dims = env.desc.shape policy = np.chararray(dims,", "done, _ = env.step(a) a_ = choose_abs_greedy_action(s_, Q, epsilon) #update", "'normal')) plt.xticks([]) plt.yticks([]) def choose_abs_greedy_action(state, Q, epsilon): action = None", "# This is some starting point performing random walks in", "Q using sarsa Q[s, a] = Q[s, a] + alpha", "idx = np.unravel_index(s, dims) x, y = idx if env.desc[idx]", "plt.cm.RdYlGn norm = colors.Normalize(vmin=.0,vmax=.6) ax.imshow(np.zeros(dims), origin='upper', extent=[0,dims[0],0,dims[1]], vmin=.0, vmax=.6, cmap=cmap)", "Q = np.random.rand(env.observation_space.n, env.action_space.n) # TODO: implement the qlearning algorithm", "return Q[state, action] def sarsa(env, alpha=0.1, gamma=0.9, epsilon=0.1, num_ep=int(1e4)): #Q", "values from the Q function\"\"\" fig = plt.figure() if not", "env) plt.show() print(\"Running qlearning\") Q = qlearning(env) plot_V(Q, env) plot_Q(Q,", "qlearning(env, alpha=0.1, gamma=0.9, epsilon=0.1, num_ep=int(1e4)): #Q = np.zeros((env.observation_space.n, env.action_space.n)) Q", "= choose_abs_greedy_action(s, Q, epsilon) while not done: s_, r, done,", "= env.desc.shape V = np.zeros(dims) for s in range(len(Q)): idx", "np.chararray(dims, unicode=True) policy[:] = ' ' for s in range(len(Q)):", "is a helper function to print a nice policy from", "colors.Normalize(vmin=.0,vmax=.6) ax.imshow(np.zeros(dims), origin='upper', extent=[0,dims[0],0,dims[1]], vmin=.0, vmax=.6, cmap=cmap) ax.grid(which='major', color='black', linestyle='-',", "np.random.uniform(0, 1) < epsilon: action = np.random.randint(env.action_space.n) else: action =", "['H', 'G']: policy[idx] = u'·' print('\\n'.join([''.join([u'{:2}'.format(item) for item in row])", "np.argmax(Q[state,:]) return action def max_action_state(state, Q): action = np.argmax(Q[state,:]) return", "a] = Q[s, a] + alpha * (r+ ( gamma", "[0.5, 0.8]] cmap = plt.cm.RdYlGn norm = colors.Normalize(vmin=.0,vmax=.6) ax.imshow(np.zeros(dims), origin='upper',", "s_ return Q env=gym.make('FrozenLake-v0') #env=gym.make('FrozenLake-v0', is_slippery=False) #env=gym.make('FrozenLake-v0', map_name=\"8x8\") print(\"Running sarsa...\")", "the Q function \"\"\" from matplotlib import colors, patches fig", "= np.zeros((env.observation_space.n, env.action_space.n)) Q = np.random.rand(env.observation_space.n, env.action_space.n) # TODO: implement", "env): \"\"\" This is a helper function to print a", "* max_action_state(s_, Q)) - Q[s,a] ) s = s_ return", "action] def sarsa(env, alpha=0.1, gamma=0.9, epsilon=0.1, num_ep=int(1e4)): #Q = np.zeros((env.observation_space.n,", "'desc'): env = env.env dims = env.desc.shape V = np.zeros(dims)", "env.env dims = env.desc.shape policy = np.chararray(dims, unicode=True) policy[:] =", "unicode=True) policy[:] = ' ' for s in range(len(Q)): idx", "if np.random.uniform(0, 1) < epsilon: action = np.random.randint(env.action_space.n) else: action", "in policy])) def plot_V(Q, env): \"\"\" This is a helper", "moves[np.argmax(Q[s])] if env.desc[idx] in ['H', 'G']: policy[idx] = u'·' print('\\n'.join([''.join([u'{:2}'.format(item)", "fig = plt.figure() if not hasattr(env, 'desc'): env = env.env", "policy from the Q function\"\"\" moves = [u'←', u'↓',u'→', u'↑']", "for row in policy])) def plot_V(Q, env): \"\"\" This is", "extent=[0,dims[0],0,dims[1]], vmin=.0, vmax=.6, cmap=cmap) ax.grid(which='major', color='black', linestyle='-', linewidth=2) for s", "a = a_ return Q def qlearning(env, alpha=0.1, gamma=0.9, epsilon=0.1,", "s = env.reset() done = False a = choose_abs_greedy_action(s, Q,", "= env.env dims = env.desc.shape policy = np.chararray(dims, unicode=True) policy[:]", "= env.reset() done = False while not done: a =", "the qlearning algorithm for i in range(num_ep): s = env.reset()", "print_policy(Q, env): \"\"\" This is a helper function to print", "np.unravel_index(s, dims) policy[idx] = moves[np.argmax(Q[s])] if env.desc[idx] in ['H', 'G']:", "= np.zeros(dims) for s in range(len(Q)): idx = np.unravel_index(s, dims)", "plt.show() print(\"Running qlearning\") Q = qlearning(env) plot_V(Q, env) plot_Q(Q, env)", "left = np.array([[0, 0], [0.5, 0.5], [0,1]]) right = np.array([[1,", "[0.5, 0.5], [1,0]]) left = np.array([[0, 0], [0.5, 0.5], [0,1]])", "0. plt.imshow(V, origin='upper', extent=[0,dims[0],0,dims[1]], vmin=.0, vmax=.6, cmap=plt.cm.RdYlGn, interpolation='none') for x,", "helper function to plot the state values from the Q", "= plt.figure() if not hasattr(env, 'desc'): env = env.env dims", "right, up] pos = [[0.2, 0.5], [0.5, 0.2], [0.8, 0.5],", "range(dims[1])): plt.text(y+0.5, dims[0]-x-0.5, '{:.3f}'.format(V[x,y]), horizontalalignment='center', verticalalignment='center') plt.xticks([]) plt.yticks([]) def plot_Q(Q,", "0.2], [0.8, 0.5], [0.5, 0.8]] cmap = plt.cm.RdYlGn norm =", "= fig.gca() if not hasattr(env, 'desc'): env = env.env dims", "not hasattr(env, 'desc'): env = env.env dims = env.desc.shape up", "matplotlib import colors, patches fig = plt.figure() ax = fig.gca()", "= [left, down, right, up] pos = [[0.2, 0.5], [0.5,", "action = np.argmax(Q[state,:]) return action def max_action_state(state, Q): action =", "gym import numpy as np from itertools import product import", "env.desc.shape up = np.array([[0, 1], [0.5, 0.5], [1,1]]) down =", "choose_abs_greedy_action(s, Q, epsilon) while not done: s_, r, done, _", "env.step(a) #update Q using Q learning Q[s, a] = Q[s,", "function to print a nice policy from the Q function\"\"\"", "pos = [[0.2, 0.5], [0.5, 0.2], [0.8, 0.5], [0.5, 0.8]]", "'G']: ax.add_patch(patches.Rectangle((y, 3-x), 1, 1, color=cmap(.0))) plt.text(y+0.5, dims[0]-x-0.5, '{:.2f}'.format(.0), horizontalalignment='center',", "np.max(Q[s]) else 'normal')) plt.xticks([]) plt.yticks([]) def choose_abs_greedy_action(state, Q, epsilon): action", "epsilon=0.1, num_ep=int(1e4)): #Q = np.zeros((env.observation_space.n, env.action_space.n)) Q = np.random.rand(env.observation_space.n, env.action_space.n)", "def print_policy(Q, env): \"\"\" This is a helper function to", "ax.grid(which='major', color='black', linestyle='-', linewidth=2) for s in range(len(Q)): idx =", "implement the qlearning algorithm for i in range(num_ep): s =", "0.5], [1,1]]) down = np.array([[0, 0], [0.5, 0.5], [1,0]]) left", "' ' for s in range(len(Q)): idx = np.unravel_index(s, dims)", "Q learning Q[s, a] = Q[s, a] + alpha *", "def qlearning(env, alpha=0.1, gamma=0.9, epsilon=0.1, num_ep=int(1e4)): #Q = np.zeros((env.observation_space.n, env.action_space.n))", "+ alpha * (r+ ( gamma * max_action_state(s_, Q)) -", "plot_Q(Q, env) print_policy(Q, env) plt.show() print(\"Running qlearning\") Q = qlearning(env)", "False while not done: a = choose_abs_greedy_action(s, Q, epsilon) s_,", "helper function to plot the Q function \"\"\" from matplotlib", "env) print_policy(Q, env) plt.show() print(\"Running qlearning\") Q = qlearning(env) plot_V(Q,", "= env.step(a) a_ = choose_abs_greedy_action(s_, Q, epsilon) #update Q using", "dims) x, y = idx if env.desc[idx] in ['H', 'G']:", "in range(len(Q)): idx = np.unravel_index(s, dims) V[idx] = np.max(Q[s]) if", "down, right, up] pos = [[0.2, 0.5], [0.5, 0.2], [0.8,", "s = s_ return Q env=gym.make('FrozenLake-v0') #env=gym.make('FrozenLake-v0', is_slippery=False) #env=gym.make('FrozenLake-v0', map_name=\"8x8\")", "plt.text(y+pos[a][0], dims[0]-1-x+pos[a][1], '{:.2f}'.format(Q[s][a]), horizontalalignment='center', verticalalignment='center', fontsize=9, fontweight=('bold' if Q[s][a] ==", "row]) for row in policy])) def plot_V(Q, env): \"\"\" This", "policy[:] = ' ' for s in range(len(Q)): idx =", "done = False a = choose_abs_greedy_action(s, Q, epsilon) while not", "norm = colors.Normalize(vmin=.0,vmax=.6) ax.imshow(np.zeros(dims), origin='upper', extent=[0,dims[0],0,dims[1]], vmin=.0, vmax=.6, cmap=cmap) ax.grid(which='major',", "1, 1, color=cmap(.0))) plt.text(y+0.5, dims[0]-x-0.5, '{:.2f}'.format(.0), horizontalalignment='center', verticalalignment='center') continue for", "env.reset() done = False while not done: a = choose_abs_greedy_action(s,", "in range(num_ep): s = env.reset() done = False a =", ") s = s_ return Q env=gym.make('FrozenLake-v0') #env=gym.make('FrozenLake-v0', is_slippery=False) #env=gym.make('FrozenLake-v0',", "verticalalignment='center', fontsize=9, fontweight=('bold' if Q[s][a] == np.max(Q[s]) else 'normal')) plt.xticks([])", "== np.max(Q[s]) else 'normal')) plt.xticks([]) plt.yticks([]) def choose_abs_greedy_action(state, Q, epsilon):", "helper function to print a nice policy from the Q", "policy[idx] = u'·' print('\\n'.join([''.join([u'{:2}'.format(item) for item in row]) for row", "< epsilon: action = np.random.randint(env.action_space.n) else: action = np.argmax(Q[state,:]) return", "= colors.Normalize(vmin=.0,vmax=.6) ax.imshow(np.zeros(dims), origin='upper', extent=[0,dims[0],0,dims[1]], vmin=.0, vmax=.6, cmap=cmap) ax.grid(which='major', color='black',", "not done: s_, r, done, _ = env.step(a) a_ =", "np.zeros(dims) for s in range(len(Q)): idx = np.unravel_index(s, dims) V[idx]", "for a in range(len(tri)): ax.add_patch(patches.Polygon(tri[a] + np.array([y, 3-x]), color=cmap(Q[s][a]))) plt.text(y+pos[a][0],", "plt.yticks([]) def plot_Q(Q, env): \"\"\" This is a helper function", "np.random.rand(env.observation_space.n, env.action_space.n) # TODO: implement the sarsa algorithm # This", "= s_ return Q env=gym.make('FrozenLake-v0') #env=gym.make('FrozenLake-v0', is_slippery=False) #env=gym.make('FrozenLake-v0', map_name=\"8x8\") print(\"Running", "print(\"Running qlearning\") Q = qlearning(env) plot_V(Q, env) plot_Q(Q, env) print_policy(Q,", "[u'←', u'↓',u'→', u'↑'] if not hasattr(env, 'desc'): env = env.env", "a in range(len(tri)): ax.add_patch(patches.Polygon(tri[a] + np.array([y, 3-x]), color=cmap(Q[s][a]))) plt.text(y+pos[a][0], dims[0]-1-x+pos[a][1],", "ax.imshow(np.zeros(dims), origin='upper', extent=[0,dims[0],0,dims[1]], vmin=.0, vmax=.6, cmap=cmap) ax.grid(which='major', color='black', linestyle='-', linewidth=2)", "[1,0]]) left = np.array([[0, 0], [0.5, 0.5], [0,1]]) right =", "Q, epsilon) s_, r, done, _ = env.step(a) #update Q", "function\"\"\" fig = plt.figure() if not hasattr(env, 'desc'): env =", "horizontalalignment='center', verticalalignment='center') plt.xticks([]) plt.yticks([]) def plot_Q(Q, env): \"\"\" This is", "some starting point performing random walks in the environment: for", "* (r+ ( gamma * max_action_state(s_, Q)) - Q[s,a] )", "i in range(num_ep): s = env.reset() done = False a", "[left, down, right, up] pos = [[0.2, 0.5], [0.5, 0.2],", "s_, r, done, _ = env.step(a) a_ = choose_abs_greedy_action(s_, Q,", "= choose_abs_greedy_action(s_, Q, epsilon) #update Q using sarsa Q[s, a]", "a helper function to plot the state values from the", "ax = fig.gca() if not hasattr(env, 'desc'): env = env.env", "= moves[np.argmax(Q[s])] if env.desc[idx] in ['H', 'G']: policy[idx] = u'·'", "+ np.array([y, 3-x]), color=cmap(Q[s][a]))) plt.text(y+pos[a][0], dims[0]-1-x+pos[a][1], '{:.2f}'.format(Q[s][a]), horizontalalignment='center', verticalalignment='center', fontsize=9,", "alpha * (r + (gamma * Q[s_,a_]) - Q[s,a]) s", "return Q def qlearning(env, alpha=0.1, gamma=0.9, epsilon=0.1, num_ep=int(1e4)): #Q =", "+ (gamma * Q[s_,a_]) - Q[s,a]) s = s_ a", "choose_abs_greedy_action(s_, Q, epsilon) #update Q using sarsa Q[s, a] =", "continue for a in range(len(tri)): ax.add_patch(patches.Polygon(tri[a] + np.array([y, 3-x]), color=cmap(Q[s][a])))", "= np.random.rand(env.observation_space.n, env.action_space.n) # TODO: implement the sarsa algorithm #", "env = env.env dims = env.desc.shape V = np.zeros(dims) for", "function\"\"\" moves = [u'←', u'↓',u'→', u'↑'] if not hasattr(env, 'desc'):", "done: s_, r, done, _ = env.step(a) a_ = choose_abs_greedy_action(s_,", "sarsa...\") Q = sarsa(env) plot_V(Q, env) plot_Q(Q, env) print_policy(Q, env)", "print a nice policy from the Q function\"\"\" moves =", "np.unravel_index(s, dims) V[idx] = np.max(Q[s]) if env.desc[idx] in ['H', 'G']:", "starting point performing random walks in the environment: for i", "plot_V(Q, env): \"\"\" This is a helper function to plot", "_ = env.step(a) a_ = choose_abs_greedy_action(s_, Q, epsilon) #update Q", "import numpy as np from itertools import product import matplotlib.pyplot", "random walks in the environment: for i in range(num_ep): s", "1, color=cmap(.0))) plt.text(y+0.5, dims[0]-x-0.5, '{:.2f}'.format(.0), horizontalalignment='center', verticalalignment='center') continue for a", "[0.5, 0.2], [0.8, 0.5], [0.5, 0.8]] cmap = plt.cm.RdYlGn norm", "r, done, _ = env.step(a) #update Q using Q learning", "- Q[s,a] ) s = s_ return Q env=gym.make('FrozenLake-v0') #env=gym.make('FrozenLake-v0',", "a = choose_abs_greedy_action(s, Q, epsilon) while not done: s_, r,", "s_, r, done, _ = env.step(a) #update Q using Q", "from matplotlib import colors, patches fig = plt.figure() ax =", "the Q function\"\"\" fig = plt.figure() if not hasattr(env, 'desc'):", "horizontalalignment='center', verticalalignment='center') continue for a in range(len(tri)): ax.add_patch(patches.Polygon(tri[a] + np.array([y,", "0], [0.5, 0.5], [1,1]]) tri = [left, down, right, up]", "num_ep=int(1e4)): #Q = np.zeros((env.observation_space.n, env.action_space.n)) Q = np.random.rand(env.observation_space.n, env.action_space.n) #", "is_slippery=False) #env=gym.make('FrozenLake-v0', map_name=\"8x8\") print(\"Running sarsa...\") Q = sarsa(env) plot_V(Q, env)", "'desc'): env = env.env dims = env.desc.shape up = np.array([[0,", "[0.5, 0.5], [1,1]]) down = np.array([[0, 0], [0.5, 0.5], [1,0]])", "= [u'←', u'↓',u'→', u'↑'] if not hasattr(env, 'desc'): env =", "vmin=.0, vmax=.6, cmap=cmap) ax.grid(which='major', color='black', linestyle='-', linewidth=2) for s in", "not hasattr(env, 'desc'): env = env.env dims = env.desc.shape policy", "a helper function to print a nice policy from the", "env.reset() done = False a = choose_abs_greedy_action(s, Q, epsilon) while", "itertools import product import matplotlib.pyplot as plt def print_policy(Q, env):", "as plt def print_policy(Q, env): \"\"\" This is a helper", "horizontalalignment='center', verticalalignment='center', fontsize=9, fontweight=('bold' if Q[s][a] == np.max(Q[s]) else 'normal'))", "a nice policy from the Q function\"\"\" moves = [u'←',", "sarsa(env, alpha=0.1, gamma=0.9, epsilon=0.1, num_ep=int(1e4)): #Q = np.zeros((env.observation_space.n, env.action_space.n)) Q", "linestyle='-', linewidth=2) for s in range(len(Q)): idx = np.unravel_index(s, dims)", "print(\"Running sarsa...\") Q = sarsa(env) plot_V(Q, env) plot_Q(Q, env) print_policy(Q,", "' for s in range(len(Q)): idx = np.unravel_index(s, dims) policy[idx]", "x, y in product(range(dims[0]), range(dims[1])): plt.text(y+0.5, dims[0]-x-0.5, '{:.3f}'.format(V[x,y]), horizontalalignment='center', verticalalignment='center')", "action = None if np.random.uniform(0, 1) < epsilon: action =", "#update Q using sarsa Q[s, a] = Q[s, a] +", "in row]) for row in policy])) def plot_V(Q, env): \"\"\"", "epsilon): action = None if np.random.uniform(0, 1) < epsilon: action", "linewidth=2) for s in range(len(Q)): idx = np.unravel_index(s, dims) x,", "Q env=gym.make('FrozenLake-v0') #env=gym.make('FrozenLake-v0', is_slippery=False) #env=gym.make('FrozenLake-v0', map_name=\"8x8\") print(\"Running sarsa...\") Q =", "colors, patches fig = plt.figure() ax = fig.gca() if not", "product(range(dims[0]), range(dims[1])): plt.text(y+0.5, dims[0]-x-0.5, '{:.3f}'.format(V[x,y]), horizontalalignment='center', verticalalignment='center') plt.xticks([]) plt.yticks([]) def", "dims[0]-x-0.5, '{:.3f}'.format(V[x,y]), horizontalalignment='center', verticalalignment='center') plt.xticks([]) plt.yticks([]) def plot_Q(Q, env): \"\"\"", "\"\"\" This is a helper function to plot the Q", "color=cmap(Q[s][a]))) plt.text(y+pos[a][0], dims[0]-1-x+pos[a][1], '{:.2f}'.format(Q[s][a]), horizontalalignment='center', verticalalignment='center', fontsize=9, fontweight=('bold' if Q[s][a]", "Q, epsilon) #update Q using sarsa Q[s, a] = Q[s,", "Q)) - Q[s,a] ) s = s_ return Q env=gym.make('FrozenLake-v0')", "row in policy])) def plot_V(Q, env): \"\"\" This is a", "interpolation='none') for x, y in product(range(dims[0]), range(dims[1])): plt.text(y+0.5, dims[0]-x-0.5, '{:.3f}'.format(V[x,y]),", "up] pos = [[0.2, 0.5], [0.5, 0.2], [0.8, 0.5], [0.5,", "in range(len(Q)): idx = np.unravel_index(s, dims) x, y = idx", "plt.yticks([]) def choose_abs_greedy_action(state, Q, epsilon): action = None if np.random.uniform(0,", "np.array([[0, 1], [0.5, 0.5], [1,1]]) down = np.array([[0, 0], [0.5,", "np.random.rand(env.observation_space.n, env.action_space.n) # TODO: implement the qlearning algorithm for i", "import product import matplotlib.pyplot as plt def print_policy(Q, env): \"\"\"", "point performing random walks in the environment: for i in", "def choose_abs_greedy_action(state, Q, epsilon): action = None if np.random.uniform(0, 1)", "if Q[s][a] == np.max(Q[s]) else 'normal')) plt.xticks([]) plt.yticks([]) def choose_abs_greedy_action(state,", "policy[idx] = moves[np.argmax(Q[s])] if env.desc[idx] in ['H', 'G']: policy[idx] =", "'{:.3f}'.format(V[x,y]), horizontalalignment='center', verticalalignment='center') plt.xticks([]) plt.yticks([]) def plot_Q(Q, env): \"\"\" This", "np.array([[0, 0], [0.5, 0.5], [1,0]]) left = np.array([[0, 0], [0.5,", "= np.unravel_index(s, dims) V[idx] = np.max(Q[s]) if env.desc[idx] in ['H',", "* Q[s_,a_]) - Q[s,a]) s = s_ a = a_", "env): \"\"\" This is a helper function to plot the", "This is a helper function to print a nice policy", "cmap = plt.cm.RdYlGn norm = colors.Normalize(vmin=.0,vmax=.6) ax.imshow(np.zeros(dims), origin='upper', extent=[0,dims[0],0,dims[1]], vmin=.0,", "This is a helper function to plot the state values", "- Q[s,a]) s = s_ a = a_ return Q", "fontweight=('bold' if Q[s][a] == np.max(Q[s]) else 'normal')) plt.xticks([]) plt.yticks([]) def", "nice policy from the Q function\"\"\" moves = [u'←', u'↓',u'→',", "using Q learning Q[s, a] = Q[s, a] + alpha", "\"\"\" This is a helper function to print a nice", "epsilon) #update Q using sarsa Q[s, a] = Q[s, a]", "* (r + (gamma * Q[s_,a_]) - Q[s,a]) s =", "= np.random.rand(env.observation_space.n, env.action_space.n) # TODO: implement the qlearning algorithm for", "vmax=.6, cmap=cmap) ax.grid(which='major', color='black', linestyle='-', linewidth=2) for s in range(len(Q)):", "Q[s, a] + alpha * (r + (gamma * Q[s_,a_])", "s in range(len(Q)): idx = np.unravel_index(s, dims) x, y =", "range(num_ep): s = env.reset() done = False while not done:", "0.5], [0.5, 0.8]] cmap = plt.cm.RdYlGn norm = colors.Normalize(vmin=.0,vmax=.6) ax.imshow(np.zeros(dims),", "= np.unravel_index(s, dims) policy[idx] = moves[np.argmax(Q[s])] if env.desc[idx] in ['H',", "(r+ ( gamma * max_action_state(s_, Q)) - Q[s,a] ) s", "hasattr(env, 'desc'): env = env.env dims = env.desc.shape policy =", "alpha * (r+ ( gamma * max_action_state(s_, Q)) - Q[s,a]", "walks in the environment: for i in range(num_ep): s =", "= sarsa(env) plot_V(Q, env) plot_Q(Q, env) print_policy(Q, env) plt.show() print(\"Running", "in range(len(tri)): ax.add_patch(patches.Polygon(tri[a] + np.array([y, 3-x]), color=cmap(Q[s][a]))) plt.text(y+pos[a][0], dims[0]-1-x+pos[a][1], '{:.2f}'.format(Q[s][a]),", "= env.desc.shape up = np.array([[0, 1], [0.5, 0.5], [1,1]]) down", "max_action_state(state, Q): action = np.argmax(Q[state,:]) return Q[state, action] def sarsa(env,", "hasattr(env, 'desc'): env = env.env dims = env.desc.shape V =", "Q function\"\"\" moves = [u'←', u'↓',u'→', u'↑'] if not hasattr(env,", "plot the state values from the Q function\"\"\" fig =", "verticalalignment='center') continue for a in range(len(tri)): ax.add_patch(patches.Polygon(tri[a] + np.array([y, 3-x]),", "else: action = np.argmax(Q[state,:]) return action def max_action_state(state, Q): action", "if env.desc[idx] in ['H', 'G']: ax.add_patch(patches.Rectangle((y, 3-x), 1, 1, color=cmap(.0)))", "to print a nice policy from the Q function\"\"\" moves", "idx if env.desc[idx] in ['H', 'G']: ax.add_patch(patches.Rectangle((y, 3-x), 1, 1,", "Q = np.random.rand(env.observation_space.n, env.action_space.n) # TODO: implement the sarsa algorithm", "moves = [u'←', u'↓',u'→', u'↑'] if not hasattr(env, 'desc'): env", "dims = env.desc.shape policy = np.chararray(dims, unicode=True) policy[:] = '", "return action def max_action_state(state, Q): action = np.argmax(Q[state,:]) return Q[state,", "\"\"\" This is a helper function to plot the state", "algorithm for i in range(num_ep): s = env.reset() done =", "numpy as np from itertools import product import matplotlib.pyplot as", "= env.env dims = env.desc.shape V = np.zeros(dims) for s", "Q def qlearning(env, alpha=0.1, gamma=0.9, epsilon=0.1, num_ep=int(1e4)): #Q = np.zeros((env.observation_space.n,", "ax.add_patch(patches.Polygon(tri[a] + np.array([y, 3-x]), color=cmap(Q[s][a]))) plt.text(y+pos[a][0], dims[0]-1-x+pos[a][1], '{:.2f}'.format(Q[s][a]), horizontalalignment='center', verticalalignment='center',", "= np.array([[0, 1], [0.5, 0.5], [1,1]]) down = np.array([[0, 0],", "#env=gym.make('FrozenLake-v0', is_slippery=False) #env=gym.make('FrozenLake-v0', map_name=\"8x8\") print(\"Running sarsa...\") Q = sarsa(env) plot_V(Q,", "= np.unravel_index(s, dims) x, y = idx if env.desc[idx] in", "the environment: for i in range(num_ep): s = env.reset() done", "a_ return Q def qlearning(env, alpha=0.1, gamma=0.9, epsilon=0.1, num_ep=int(1e4)): #Q", "ax.add_patch(patches.Rectangle((y, 3-x), 1, 1, color=cmap(.0))) plt.text(y+0.5, dims[0]-x-0.5, '{:.2f}'.format(.0), horizontalalignment='center', verticalalignment='center')", "This is some starting point performing random walks in the", "= Q[s, a] + alpha * (r + (gamma *", "if env.desc[idx] in ['H', 'G']: V[idx] = 0. plt.imshow(V, origin='upper',", "import gym import numpy as np from itertools import product", "sarsa Q[s, a] = Q[s, a] + alpha * (r", "= False while not done: a = choose_abs_greedy_action(s, Q, epsilon)", "dims) V[idx] = np.max(Q[s]) if env.desc[idx] in ['H', 'G']: V[idx]", "origin='upper', extent=[0,dims[0],0,dims[1]], vmin=.0, vmax=.6, cmap=cmap) ax.grid(which='major', color='black', linestyle='-', linewidth=2) for", "function \"\"\" from matplotlib import colors, patches fig = plt.figure()", "in ['H', 'G']: V[idx] = 0. plt.imshow(V, origin='upper', extent=[0,dims[0],0,dims[1]], vmin=.0,", "product import matplotlib.pyplot as plt def print_policy(Q, env): \"\"\" This", "= u'·' print('\\n'.join([''.join([u'{:2}'.format(item) for item in row]) for row in", "patches fig = plt.figure() ax = fig.gca() if not hasattr(env,", "action def max_action_state(state, Q): action = np.argmax(Q[state,:]) return Q[state, action]", "in the environment: for i in range(num_ep): s = env.reset()", "env.env dims = env.desc.shape up = np.array([[0, 1], [0.5, 0.5],", "def plot_Q(Q, env): \"\"\" This is a helper function to", "print('\\n'.join([''.join([u'{:2}'.format(item) for item in row]) for row in policy])) def", "range(len(tri)): ax.add_patch(patches.Polygon(tri[a] + np.array([y, 3-x]), color=cmap(Q[s][a]))) plt.text(y+pos[a][0], dims[0]-1-x+pos[a][1], '{:.2f}'.format(Q[s][a]), horizontalalignment='center',", "def max_action_state(state, Q): action = np.argmax(Q[state,:]) return Q[state, action] def", "else 'normal')) plt.xticks([]) plt.yticks([]) def choose_abs_greedy_action(state, Q, epsilon): action =", "verticalalignment='center') plt.xticks([]) plt.yticks([]) def plot_Q(Q, env): \"\"\" This is a", "# TODO: implement the qlearning algorithm for i in range(num_ep):", "env.desc[idx] in ['H', 'G']: policy[idx] = u'·' print('\\n'.join([''.join([u'{:2}'.format(item) for item", "from the Q function\"\"\" fig = plt.figure() if not hasattr(env,", "choose_abs_greedy_action(s, Q, epsilon) s_, r, done, _ = env.step(a) #update", "0.5], [1,1]]) tri = [left, down, right, up] pos =", "= choose_abs_greedy_action(s, Q, epsilon) s_, r, done, _ = env.step(a)", "(r + (gamma * Q[s_,a_]) - Q[s,a]) s = s_", "down = np.array([[0, 0], [0.5, 0.5], [1,0]]) left = np.array([[0,", "= 0. plt.imshow(V, origin='upper', extent=[0,dims[0],0,dims[1]], vmin=.0, vmax=.6, cmap=plt.cm.RdYlGn, interpolation='none') for", "range(len(Q)): idx = np.unravel_index(s, dims) policy[idx] = moves[np.argmax(Q[s])] if env.desc[idx]", "algorithm # This is some starting point performing random walks", "\"\"\" from matplotlib import colors, patches fig = plt.figure() ax", "origin='upper', extent=[0,dims[0],0,dims[1]], vmin=.0, vmax=.6, cmap=plt.cm.RdYlGn, interpolation='none') for x, y in", "env.env dims = env.desc.shape V = np.zeros(dims) for s in", "= s_ a = a_ return Q def qlearning(env, alpha=0.1,", "= [[0.2, 0.5], [0.5, 0.2], [0.8, 0.5], [0.5, 0.8]] cmap", "'G']: policy[idx] = u'·' print('\\n'.join([''.join([u'{:2}'.format(item) for item in row]) for", "alpha=0.1, gamma=0.9, epsilon=0.1, num_ep=int(1e4)): #Q = np.zeros((env.observation_space.n, env.action_space.n)) Q =", "np.zeros((env.observation_space.n, env.action_space.n)) Q = np.random.rand(env.observation_space.n, env.action_space.n) # TODO: implement the", "= a_ return Q def qlearning(env, alpha=0.1, gamma=0.9, epsilon=0.1, num_ep=int(1e4)):", "performing random walks in the environment: for i in range(num_ep):", "np.argmax(Q[state,:]) return Q[state, action] def sarsa(env, alpha=0.1, gamma=0.9, epsilon=0.1, num_ep=int(1e4)):", "while not done: s_, r, done, _ = env.step(a) a_", "Q[s, a] + alpha * (r+ ( gamma * max_action_state(s_,", "range(num_ep): s = env.reset() done = False a = choose_abs_greedy_action(s,", "This is a helper function to plot the Q function", "Q, epsilon): action = None if np.random.uniform(0, 1) < epsilon:", "( gamma * max_action_state(s_, Q)) - Q[s,a] ) s =", "gamma * max_action_state(s_, Q)) - Q[s,a] ) s = s_", "'{:.2f}'.format(Q[s][a]), horizontalalignment='center', verticalalignment='center', fontsize=9, fontweight=('bold' if Q[s][a] == np.max(Q[s]) else", "env.desc.shape V = np.zeros(dims) for s in range(len(Q)): idx =", "plot_Q(Q, env): \"\"\" This is a helper function to plot", "env) plot_Q(Q, env) print_policy(Q, env) plt.show() print(\"Running qlearning\") Q =", "for s in range(len(Q)): idx = np.unravel_index(s, dims) V[idx] =", "= env.step(a) #update Q using Q learning Q[s, a] =", "u'↑'] if not hasattr(env, 'desc'): env = env.env dims =", "#Q = np.zeros((env.observation_space.n, env.action_space.n)) Q = np.random.rand(env.observation_space.n, env.action_space.n) # TODO:", "env.action_space.n) # TODO: implement the sarsa algorithm # This is", "plt.xticks([]) plt.yticks([]) def choose_abs_greedy_action(state, Q, epsilon): action = None if", "(gamma * Q[s_,a_]) - Q[s,a]) s = s_ a =", "np from itertools import product import matplotlib.pyplot as plt def", "plt.xticks([]) plt.yticks([]) def plot_Q(Q, env): \"\"\" This is a helper", "y in product(range(dims[0]), range(dims[1])): plt.text(y+0.5, dims[0]-x-0.5, '{:.3f}'.format(V[x,y]), horizontalalignment='center', verticalalignment='center') plt.xticks([])", "env.step(a) a_ = choose_abs_greedy_action(s_, Q, epsilon) #update Q using sarsa", "the state values from the Q function\"\"\" fig = plt.figure()", "env.action_space.n) # TODO: implement the qlearning algorithm for i in", "plot the Q function \"\"\" from matplotlib import colors, patches", "'G']: V[idx] = 0. plt.imshow(V, origin='upper', extent=[0,dims[0],0,dims[1]], vmin=.0, vmax=.6, cmap=plt.cm.RdYlGn,", "= np.array([[0, 0], [0.5, 0.5], [0,1]]) right = np.array([[1, 0],", "a] + alpha * (r+ ( gamma * max_action_state(s_, Q))", "the Q function\"\"\" moves = [u'←', u'↓',u'→', u'↑'] if not", "env.action_space.n)) Q = np.random.rand(env.observation_space.n, env.action_space.n) # TODO: implement the qlearning", "= ' ' for s in range(len(Q)): idx = np.unravel_index(s,", "Q[s, a] = Q[s, a] + alpha * (r +", "plt def print_policy(Q, env): \"\"\" This is a helper function", "implement the sarsa algorithm # This is some starting point", "fontsize=9, fontweight=('bold' if Q[s][a] == np.max(Q[s]) else 'normal')) plt.xticks([]) plt.yticks([])", "[1,1]]) tri = [left, down, right, up] pos = [[0.2,", "Q function\"\"\" fig = plt.figure() if not hasattr(env, 'desc'): env", "done = False while not done: a = choose_abs_greedy_action(s, Q,", "item in row]) for row in policy])) def plot_V(Q, env):", "= np.chararray(dims, unicode=True) policy[:] = ' ' for s in", "from the Q function\"\"\" moves = [u'←', u'↓',u'→', u'↑'] if", "= np.max(Q[s]) if env.desc[idx] in ['H', 'G']: V[idx] = 0.", "up = np.array([[0, 1], [0.5, 0.5], [1,1]]) down = np.array([[0,", "Q): action = np.argmax(Q[state,:]) return Q[state, action] def sarsa(env, alpha=0.1,", "in range(num_ep): s = env.reset() done = False while not", "s = s_ a = a_ return Q def qlearning(env,", "y = idx if env.desc[idx] in ['H', 'G']: ax.add_patch(patches.Rectangle((y, 3-x),", "False a = choose_abs_greedy_action(s, Q, epsilon) while not done: s_,", "= Q[s, a] + alpha * (r+ ( gamma *", "cmap=plt.cm.RdYlGn, interpolation='none') for x, y in product(range(dims[0]), range(dims[1])): plt.text(y+0.5, dims[0]-x-0.5,", "plt.text(y+0.5, dims[0]-x-0.5, '{:.2f}'.format(.0), horizontalalignment='center', verticalalignment='center') continue for a in range(len(tri)):", "np.array([[0, 0], [0.5, 0.5], [0,1]]) right = np.array([[1, 0], [0.5,", "'{:.2f}'.format(.0), horizontalalignment='center', verticalalignment='center') continue for a in range(len(tri)): ax.add_patch(patches.Polygon(tri[a] +", "np.max(Q[s]) if env.desc[idx] in ['H', 'G']: V[idx] = 0. plt.imshow(V,", "= plt.figure() ax = fig.gca() if not hasattr(env, 'desc'): env", "epsilon: action = np.random.randint(env.action_space.n) else: action = np.argmax(Q[state,:]) return action", "= env.reset() done = False a = choose_abs_greedy_action(s, Q, epsilon)", "fig.gca() if not hasattr(env, 'desc'): env = env.env dims =", "tri = [left, down, right, up] pos = [[0.2, 0.5],", "range(len(Q)): idx = np.unravel_index(s, dims) V[idx] = np.max(Q[s]) if env.desc[idx]", "env = env.env dims = env.desc.shape up = np.array([[0, 1],", "env.desc[idx] in ['H', 'G']: ax.add_patch(patches.Rectangle((y, 3-x), 1, 1, color=cmap(.0))) plt.text(y+0.5,", "1) < epsilon: action = np.random.randint(env.action_space.n) else: action = np.argmax(Q[state,:])", "Q[s,a]) s = s_ a = a_ return Q def", "TODO: implement the qlearning algorithm for i in range(num_ep): s", "None if np.random.uniform(0, 1) < epsilon: action = np.random.randint(env.action_space.n) else:", "epsilon) while not done: s_, r, done, _ = env.step(a)", "dims[0]-x-0.5, '{:.2f}'.format(.0), horizontalalignment='center', verticalalignment='center') continue for a in range(len(tri)): ax.add_patch(patches.Polygon(tri[a]", "return Q env=gym.make('FrozenLake-v0') #env=gym.make('FrozenLake-v0', is_slippery=False) #env=gym.make('FrozenLake-v0', map_name=\"8x8\") print(\"Running sarsa...\") Q", "function to plot the state values from the Q function\"\"\"", "extent=[0,dims[0],0,dims[1]], vmin=.0, vmax=.6, cmap=plt.cm.RdYlGn, interpolation='none') for x, y in product(range(dims[0]),", "a helper function to plot the Q function \"\"\" from", "V = np.zeros(dims) for s in range(len(Q)): idx = np.unravel_index(s,", "['H', 'G']: V[idx] = 0. plt.imshow(V, origin='upper', extent=[0,dims[0],0,dims[1]], vmin=.0, vmax=.6,", "done, _ = env.step(a) #update Q using Q learning Q[s,", "i in range(num_ep): s = env.reset() done = False while", "0.5], [0.5, 0.2], [0.8, 0.5], [0.5, 0.8]] cmap = plt.cm.RdYlGn", "np.unravel_index(s, dims) x, y = idx if env.desc[idx] in ['H',", "plt.figure() ax = fig.gca() if not hasattr(env, 'desc'): env =", "env.action_space.n)) Q = np.random.rand(env.observation_space.n, env.action_space.n) # TODO: implement the sarsa", "env.desc[idx] in ['H', 'G']: V[idx] = 0. plt.imshow(V, origin='upper', extent=[0,dims[0],0,dims[1]]," ]
[ "from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies", "by Django 3.0.4 on 2020-07-14 11:00 from django.db import migrations,", "Django 3.0.4 on 2020-07-14 11:00 from django.db import migrations, models", "migrations.CreateModel( name=\"AiLabCaseStudy\", fields=[ ( \"articlepage_ptr\", models.OneToOneField( auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True,", "primary_key=True, serialize=False, to=\"core.ArticlePage\", ), ), ( \"use_case\", models.ForeignKey( on_delete=django.db.models.deletion.PROTECT, to=\"ai_lab.AiLabUseCase\",", "fields=[ ( \"articlepage_ptr\", models.OneToOneField( auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to=\"core.ArticlePage\",", "( \"articlepage_ptr\", models.OneToOneField( auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to=\"core.ArticlePage\", ),", "\"articlepage_ptr\", models.OneToOneField( auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to=\"core.ArticlePage\", ), ),", "serialize=False, to=\"core.ArticlePage\", ), ), ( \"use_case\", models.ForeignKey( on_delete=django.db.models.deletion.PROTECT, to=\"ai_lab.AiLabUseCase\", ),", "), ( \"use_case\", models.ForeignKey( on_delete=django.db.models.deletion.PROTECT, to=\"ai_lab.AiLabUseCase\", ), ), ], options={\"abstract\":", "[ (\"core\", \"0026_auto_20200713_1535\"), (\"ai_lab\", \"0002_ailabusecase\"), ] operations = [ migrations.CreateModel(", "models.ForeignKey( on_delete=django.db.models.deletion.PROTECT, to=\"ai_lab.AiLabUseCase\", ), ), ], options={\"abstract\": False,}, bases=(\"core.articlepage\", models.Model),", "on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to=\"core.ArticlePage\", ), ), ( \"use_case\", models.ForeignKey(", "name=\"AiLabCaseStudy\", fields=[ ( \"articlepage_ptr\", models.OneToOneField( auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False,", "), ), ( \"use_case\", models.ForeignKey( on_delete=django.db.models.deletion.PROTECT, to=\"ai_lab.AiLabUseCase\", ), ), ],", "dependencies = [ (\"core\", \"0026_auto_20200713_1535\"), (\"ai_lab\", \"0002_ailabusecase\"), ] operations =", "\"use_case\", models.ForeignKey( on_delete=django.db.models.deletion.PROTECT, to=\"ai_lab.AiLabUseCase\", ), ), ], options={\"abstract\": False,}, bases=(\"core.articlepage\",", "2020-07-14 11:00 from django.db import migrations, models import django.db.models.deletion class", "models.OneToOneField( auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to=\"core.ArticlePage\", ), ), (", "operations = [ migrations.CreateModel( name=\"AiLabCaseStudy\", fields=[ ( \"articlepage_ptr\", models.OneToOneField( auto_created=True,", "\"0002_ailabusecase\"), ] operations = [ migrations.CreateModel( name=\"AiLabCaseStudy\", fields=[ ( \"articlepage_ptr\",", "class Migration(migrations.Migration): dependencies = [ (\"core\", \"0026_auto_20200713_1535\"), (\"ai_lab\", \"0002_ailabusecase\"), ]", "( \"use_case\", models.ForeignKey( on_delete=django.db.models.deletion.PROTECT, to=\"ai_lab.AiLabUseCase\", ), ), ], options={\"abstract\": False,},", "to=\"core.ArticlePage\", ), ), ( \"use_case\", models.ForeignKey( on_delete=django.db.models.deletion.PROTECT, to=\"ai_lab.AiLabUseCase\", ), ),", "on_delete=django.db.models.deletion.PROTECT, to=\"ai_lab.AiLabUseCase\", ), ), ], options={\"abstract\": False,}, bases=(\"core.articlepage\", models.Model), ),", "django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies =", "to=\"ai_lab.AiLabUseCase\", ), ), ], options={\"abstract\": False,}, bases=(\"core.articlepage\", models.Model), ), ]", "11:00 from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration):", "import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [", "] operations = [ migrations.CreateModel( name=\"AiLabCaseStudy\", fields=[ ( \"articlepage_ptr\", models.OneToOneField(", "django.db.models.deletion class Migration(migrations.Migration): dependencies = [ (\"core\", \"0026_auto_20200713_1535\"), (\"ai_lab\", \"0002_ailabusecase\"),", "\"0026_auto_20200713_1535\"), (\"ai_lab\", \"0002_ailabusecase\"), ] operations = [ migrations.CreateModel( name=\"AiLabCaseStudy\", fields=[", "= [ (\"core\", \"0026_auto_20200713_1535\"), (\"ai_lab\", \"0002_ailabusecase\"), ] operations = [", "Migration(migrations.Migration): dependencies = [ (\"core\", \"0026_auto_20200713_1535\"), (\"ai_lab\", \"0002_ailabusecase\"), ] operations", "# Generated by Django 3.0.4 on 2020-07-14 11:00 from django.db", "= [ migrations.CreateModel( name=\"AiLabCaseStudy\", fields=[ ( \"articlepage_ptr\", models.OneToOneField( auto_created=True, on_delete=django.db.models.deletion.CASCADE,", "(\"ai_lab\", \"0002_ailabusecase\"), ] operations = [ migrations.CreateModel( name=\"AiLabCaseStudy\", fields=[ (", "on 2020-07-14 11:00 from django.db import migrations, models import django.db.models.deletion", "parent_link=True, primary_key=True, serialize=False, to=\"core.ArticlePage\", ), ), ( \"use_case\", models.ForeignKey( on_delete=django.db.models.deletion.PROTECT,", "import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ (\"core\", \"0026_auto_20200713_1535\"), (\"ai_lab\",", "auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to=\"core.ArticlePage\", ), ), ( \"use_case\",", "[ migrations.CreateModel( name=\"AiLabCaseStudy\", fields=[ ( \"articlepage_ptr\", models.OneToOneField( auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True,", "Generated by Django 3.0.4 on 2020-07-14 11:00 from django.db import", "3.0.4 on 2020-07-14 11:00 from django.db import migrations, models import", "models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ (\"core\", \"0026_auto_20200713_1535\"),", "(\"core\", \"0026_auto_20200713_1535\"), (\"ai_lab\", \"0002_ailabusecase\"), ] operations = [ migrations.CreateModel( name=\"AiLabCaseStudy\",", "migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ (\"core\"," ]
[ "] @property def optional_parameter_values_exist(self): return [ self._server_address, self._port, self._connection_username, self._connection_password,", "'userName', 'password', 'embedPassword' ] @property def optional_parameter_values_exist(self): return [ self._server_address,", "def optional_parameter_keys(self): return [ 'serverAddress', 'serverPort', 'userName', 'password', 'embedPassword' ]", "The new server for the connection. :type server_address: string :param", "Server connection object. :type ts_connection: class :param server_address: The new", "server for the connection. :type server_address: string :param port: The", "username for the connection. :type connection_username: string :param connection_password: The", "def _get_parameters_dict(param_keys, param_values): \"\"\"Override the inherited _get_parameters_dict() method to allow", "_get_parameters_dict(param_keys, param_values): \"\"\"Override the inherited _get_parameters_dict() method to allow passing", "connection. :type server_address: string :param port: The new port for", "self._port, self._connection_username, self._connection_password, True if self._embed_password_flag is not None else", "port: The new port for the connection. :type port: string", "def modified_update_workbook_connection_request(self): if any(self.optional_parameter_values_exist): self._request_body['connection'].update( self._get_parameters_dict(self.optional_parameter_keys, self.optional_parameter_values)) return self._request_body @staticmethod", "new port for the connection. :type port: string :param connection_username:", "self._connection_username = connection_username self._connection_password = <PASSWORD> self._embed_password_flag = embed_password_flag self.base_update_workbook_connection_request", "@property def optional_parameter_values(self): return [ self._server_address, self._port, self._connection_username, self._connection_password, self._embed_password_flag", ":type embed_password_flag: boolean \"\"\" def __init__(self, ts_connection, server_address=None, port=None, connection_username=None,", "values directly\"\"\" params_dict = {} for i, key in enumerate(param_keys):", "[ self._server_address, self._port, self._connection_username, self._connection_password, True if self._embed_password_flag is not", "embed_password_flag self.base_update_workbook_connection_request @property def optional_parameter_keys(self): return [ 'serverAddress', 'serverPort', 'userName',", "the password in the connection, False otherwise. :type embed_password_flag: boolean", "return [ self._server_address, self._port, self._connection_username, self._connection_password, True if self._embed_password_flag is", "@property def optional_parameter_keys(self): return [ 'serverAddress', 'serverPort', 'userName', 'password', 'embedPassword'", "connection. :type connection_password: string :param embed_password_flag: Boolean; True to embed", "modified_update_workbook_connection_request(self): if any(self.optional_parameter_values_exist): self._request_body['connection'].update( self._get_parameters_dict(self.optional_parameter_keys, self.optional_parameter_values)) return self._request_body @staticmethod def", "<PASSWORD> self._embed_password_flag = embed_password_flag self.base_update_workbook_connection_request @property def optional_parameter_keys(self): return [", "for the connection. :type server_address: string :param port: The new", "'serverPort', 'userName', 'password', 'embedPassword' ] @property def optional_parameter_values_exist(self): return [", "if any(self.optional_parameter_values_exist): self._request_body['connection'].update( self._get_parameters_dict(self.optional_parameter_keys, self.optional_parameter_values)) return self._request_body @staticmethod def _get_parameters_dict(param_keys,", "{}}) return self._request_body @property def modified_update_workbook_connection_request(self): if any(self.optional_parameter_values_exist): self._request_body['connection'].update( self._get_parameters_dict(self.optional_parameter_keys,", "embed_password_flag=None): super().__init__(ts_connection) self._server_address = server_address self._port = port self._connection_username =", "False otherwise. :type embed_password_flag: boolean \"\"\" def __init__(self, ts_connection, server_address=None,", "not None else None ] @property def optional_parameter_values(self): return [", "def __init__(self, ts_connection, server_address=None, port=None, connection_username=None, connection_password=<PASSWORD>, embed_password_flag=None): super().__init__(ts_connection) self._server_address", "server_address=None, port=None, connection_username=None, connection_password=<PASSWORD>, embed_password_flag=None): super().__init__(ts_connection) self._server_address = server_address self._port", "@property def optional_parameter_values_exist(self): return [ self._server_address, self._port, self._connection_username, self._connection_password, True", "port=None, connection_username=None, connection_password=<PASSWORD>, embed_password_flag=None): super().__init__(ts_connection) self._server_address = server_address self._port =", "is not None: params_dict.update({key: param_values[i]}) return params_dict def get_request(self): return", "class :param server_address: The new server for the connection. :type", "] @property def optional_parameter_values(self): return [ self._server_address, self._port, self._connection_username, self._connection_password,", "_get_parameters_dict() method to allow passing boolean values directly\"\"\" params_dict =", "self._get_parameters_dict(self.optional_parameter_keys, self.optional_parameter_values)) return self._request_body @staticmethod def _get_parameters_dict(param_keys, param_values): \"\"\"Override the", "True if self._embed_password_flag is not None else None ] @property", "= embed_password_flag self.base_update_workbook_connection_request @property def optional_parameter_keys(self): return [ 'serverAddress', 'serverPort',", "server_address: string :param port: The new port for the connection.", "connection, False otherwise. :type embed_password_flag: boolean \"\"\" def __init__(self, ts_connection,", "is not None else None ] @property def optional_parameter_values(self): return", "Tableau Server. :param ts_connection: The Tableau Server connection object. :type", ":type server_address: string :param port: The new port for the", "connection_password: string :param embed_password_flag: Boolean; True to embed the password", "inherited _get_parameters_dict() method to allow passing boolean values directly\"\"\" params_dict", "param_values[i] is not None: params_dict.update({key: param_values[i]}) return params_dict def get_request(self):", "server_address: The new server for the connection. :type server_address: string", "connection_username self._connection_password = <PASSWORD> self._embed_password_flag = embed_password_flag self.base_update_workbook_connection_request @property def", "= <PASSWORD> self._embed_password_flag = embed_password_flag self.base_update_workbook_connection_request @property def optional_parameter_keys(self): return", "None else None ] @property def optional_parameter_values(self): return [ self._server_address,", "param_values): \"\"\"Override the inherited _get_parameters_dict() method to allow passing boolean", "The new port for the connection. :type port: string :param", "API requests to Tableau Server. :param ts_connection: The Tableau Server", "embed the password in the connection, False otherwise. :type embed_password_flag:", "workbook connection request for sending API requests to Tableau Server.", "any(self.optional_parameter_values_exist): self._request_body['connection'].update( self._get_parameters_dict(self.optional_parameter_keys, self.optional_parameter_values)) return self._request_body @staticmethod def _get_parameters_dict(param_keys, param_values):", "the connection. :type connection_password: string :param embed_password_flag: Boolean; True to", "self._request_body @staticmethod def _get_parameters_dict(param_keys, param_values): \"\"\"Override the inherited _get_parameters_dict() method", "= port self._connection_username = connection_username self._connection_password = <PASSWORD> self._embed_password_flag =", "in enumerate(param_keys): if param_values[i] is not None: params_dict.update({key: param_values[i]}) return", "= server_address self._port = port self._connection_username = connection_username self._connection_password =", "password in the connection, False otherwise. :type embed_password_flag: boolean \"\"\"", "if param_values[i] is not None: params_dict.update({key: param_values[i]}) return params_dict def", "return self._request_body @property def modified_update_workbook_connection_request(self): if any(self.optional_parameter_values_exist): self._request_body['connection'].update( self._get_parameters_dict(self.optional_parameter_keys, self.optional_parameter_values))", "self._embed_password_flag ] @property def base_update_workbook_connection_request(self): self._request_body.update({'connection': {}}) return self._request_body @property", "for the connection. :type port: string :param connection_username: The new", "self._request_body @property def modified_update_workbook_connection_request(self): if any(self.optional_parameter_values_exist): self._request_body['connection'].update( self._get_parameters_dict(self.optional_parameter_keys, self.optional_parameter_values)) return", "port: string :param connection_username: The new username for the connection.", "allow passing boolean values directly\"\"\" params_dict = {} for i,", "'password', 'embedPassword' ] @property def optional_parameter_values_exist(self): return [ self._server_address, self._port,", "passing boolean values directly\"\"\" params_dict = {} for i, key", "port for the connection. :type port: string :param connection_username: The", "connection_username: The new username for the connection. :type connection_username: string", "server_address self._port = port self._connection_username = connection_username self._connection_password = <PASSWORD>", "return [ self._server_address, self._port, self._connection_username, self._connection_password, self._embed_password_flag ] @property def", "connection_password: The new password for the connection. :type connection_password: string", "[ 'serverAddress', 'serverPort', 'userName', 'password', 'embedPassword' ] @property def optional_parameter_values_exist(self):", "\"\"\"Override the inherited _get_parameters_dict() method to allow passing boolean values", ":type ts_connection: class :param server_address: The new server for the", "self._port = port self._connection_username = connection_username self._connection_password = <PASSWORD> self._embed_password_flag", "ts_connection: The Tableau Server connection object. :type ts_connection: class :param", "self._server_address, self._port, self._connection_username, self._connection_password, True if self._embed_password_flag is not None", ":param ts_connection: The Tableau Server connection object. :type ts_connection: class", ":param port: The new port for the connection. :type port:", "the connection, False otherwise. :type embed_password_flag: boolean \"\"\" def __init__(self,", "requests to Tableau Server. :param ts_connection: The Tableau Server connection", "string :param embed_password_flag: Boolean; True to embed the password in", "connection_password=<PASSWORD>, embed_password_flag=None): super().__init__(ts_connection) self._server_address = server_address self._port = port self._connection_username", "The new username for the connection. :type connection_username: string :param", "self._request_body.update({'connection': {}}) return self._request_body @property def modified_update_workbook_connection_request(self): if any(self.optional_parameter_values_exist): self._request_body['connection'].update(", "port self._connection_username = connection_username self._connection_password = <PASSWORD> self._embed_password_flag = embed_password_flag", "enumerate(param_keys): if param_values[i] is not None: params_dict.update({key: param_values[i]}) return params_dict", "Server. :param ts_connection: The Tableau Server connection object. :type ts_connection:", "string :param port: The new port for the connection. :type", "for sending API requests to Tableau Server. :param ts_connection: The", "string :param connection_username: The new username for the connection. :type", "def optional_parameter_values_exist(self): return [ self._server_address, self._port, self._connection_username, self._connection_password, True if", "directly\"\"\" params_dict = {} for i, key in enumerate(param_keys): if", "{} for i, key in enumerate(param_keys): if param_values[i] is not", "def base_update_workbook_connection_request(self): self._request_body.update({'connection': {}}) return self._request_body @property def modified_update_workbook_connection_request(self): if", "the connection. :type port: string :param connection_username: The new username", "self._embed_password_flag is not None else None ] @property def optional_parameter_values(self):", "@property def modified_update_workbook_connection_request(self): if any(self.optional_parameter_values_exist): self._request_body['connection'].update( self._get_parameters_dict(self.optional_parameter_keys, self.optional_parameter_values)) return self._request_body", "connection_username=None, connection_password=<PASSWORD>, embed_password_flag=None): super().__init__(ts_connection) self._server_address = server_address self._port = port", ":type port: string :param connection_username: The new username for the", "class UpdateWorkbookConnectionRequest(BaseRequest): \"\"\" Update workbook connection request for sending API", "None ] @property def optional_parameter_values(self): return [ self._server_address, self._port, self._connection_username,", "ts_connection, server_address=None, port=None, connection_username=None, connection_password=<PASSWORD>, embed_password_flag=None): super().__init__(ts_connection) self._server_address = server_address", "connection. :type connection_username: string :param connection_password: The new password for", "for i, key in enumerate(param_keys): if param_values[i] is not None:", "new server for the connection. :type server_address: string :param port:", "from .BaseRequest import BaseRequest class UpdateWorkbookConnectionRequest(BaseRequest): \"\"\" Update workbook connection", "the connection. :type connection_username: string :param connection_password: The new password", "self._connection_password, self._embed_password_flag ] @property def base_update_workbook_connection_request(self): self._request_body.update({'connection': {}}) return self._request_body", "to allow passing boolean values directly\"\"\" params_dict = {} for", "the inherited _get_parameters_dict() method to allow passing boolean values directly\"\"\"", "key in enumerate(param_keys): if param_values[i] is not None: params_dict.update({key: param_values[i]})", "self._server_address, self._port, self._connection_username, self._connection_password, self._embed_password_flag ] @property def base_update_workbook_connection_request(self): self._request_body.update({'connection':", "@property def base_update_workbook_connection_request(self): self._request_body.update({'connection': {}}) return self._request_body @property def modified_update_workbook_connection_request(self):", "self.base_update_workbook_connection_request @property def optional_parameter_keys(self): return [ 'serverAddress', 'serverPort', 'userName', 'password',", "<filename>requests/UpdateWorkbookConnectionRequest.py<gh_stars>1-10 from .BaseRequest import BaseRequest class UpdateWorkbookConnectionRequest(BaseRequest): \"\"\" Update workbook", "if self._embed_password_flag is not None else None ] @property def", "embed_password_flag: Boolean; True to embed the password in the connection,", "= {} for i, key in enumerate(param_keys): if param_values[i] is", ":type connection_password: string :param embed_password_flag: Boolean; True to embed the", "__init__(self, ts_connection, server_address=None, port=None, connection_username=None, connection_password=<PASSWORD>, embed_password_flag=None): super().__init__(ts_connection) self._server_address =", "self._embed_password_flag = embed_password_flag self.base_update_workbook_connection_request @property def optional_parameter_keys(self): return [ 'serverAddress',", "object. :type ts_connection: class :param server_address: The new server for", "super().__init__(ts_connection) self._server_address = server_address self._port = port self._connection_username = connection_username", "[ self._server_address, self._port, self._connection_username, self._connection_password, self._embed_password_flag ] @property def base_update_workbook_connection_request(self):", "sending API requests to Tableau Server. :param ts_connection: The Tableau", "to embed the password in the connection, False otherwise. :type", "self._connection_username, self._connection_password, True if self._embed_password_flag is not None else None", "for the connection. :type connection_password: string :param embed_password_flag: Boolean; True", "embed_password_flag: boolean \"\"\" def __init__(self, ts_connection, server_address=None, port=None, connection_username=None, connection_password=<PASSWORD>,", "return [ 'serverAddress', 'serverPort', 'userName', 'password', 'embedPassword' ] @property def", "Update workbook connection request for sending API requests to Tableau", "self._connection_username, self._connection_password, self._embed_password_flag ] @property def base_update_workbook_connection_request(self): self._request_body.update({'connection': {}}) return", "connection request for sending API requests to Tableau Server. :param", "new password for the connection. :type connection_password: string :param embed_password_flag:", "connection_username: string :param connection_password: The new password for the connection.", "the connection. :type server_address: string :param port: The new port", "to Tableau Server. :param ts_connection: The Tableau Server connection object.", "self._connection_password, True if self._embed_password_flag is not None else None ]", "base_update_workbook_connection_request(self): self._request_body.update({'connection': {}}) return self._request_body @property def modified_update_workbook_connection_request(self): if any(self.optional_parameter_values_exist):", ":param embed_password_flag: Boolean; True to embed the password in the", "@staticmethod def _get_parameters_dict(param_keys, param_values): \"\"\"Override the inherited _get_parameters_dict() method to", "The new password for the connection. :type connection_password: string :param", "self._request_body['connection'].update( self._get_parameters_dict(self.optional_parameter_keys, self.optional_parameter_values)) return self._request_body @staticmethod def _get_parameters_dict(param_keys, param_values): \"\"\"Override", "connection. :type port: string :param connection_username: The new username for", "in the connection, False otherwise. :type embed_password_flag: boolean \"\"\" def", ":param connection_username: The new username for the connection. :type connection_username:", "'embedPassword' ] @property def optional_parameter_values_exist(self): return [ self._server_address, self._port, self._connection_username,", "string :param connection_password: The new password for the connection. :type", "= connection_username self._connection_password = <PASSWORD> self._embed_password_flag = embed_password_flag self.base_update_workbook_connection_request @property", "self._server_address = server_address self._port = port self._connection_username = connection_username self._connection_password", "\"\"\" def __init__(self, ts_connection, server_address=None, port=None, connection_username=None, connection_password=<PASSWORD>, embed_password_flag=None): super().__init__(ts_connection)", "self.optional_parameter_values)) return self._request_body @staticmethod def _get_parameters_dict(param_keys, param_values): \"\"\"Override the inherited", "ts_connection: class :param server_address: The new server for the connection.", ".BaseRequest import BaseRequest class UpdateWorkbookConnectionRequest(BaseRequest): \"\"\" Update workbook connection request", "Tableau Server connection object. :type ts_connection: class :param server_address: The", "new username for the connection. :type connection_username: string :param connection_password:", "\"\"\" Update workbook connection request for sending API requests to", "else None ] @property def optional_parameter_values(self): return [ self._server_address, self._port,", ":param connection_password: The new password for the connection. :type connection_password:", "method to allow passing boolean values directly\"\"\" params_dict = {}", "return self._request_body @staticmethod def _get_parameters_dict(param_keys, param_values): \"\"\"Override the inherited _get_parameters_dict()", "optional_parameter_values(self): return [ self._server_address, self._port, self._connection_username, self._connection_password, self._embed_password_flag ] @property", "import BaseRequest class UpdateWorkbookConnectionRequest(BaseRequest): \"\"\" Update workbook connection request for", ":type connection_username: string :param connection_password: The new password for the", "self._port, self._connection_username, self._connection_password, self._embed_password_flag ] @property def base_update_workbook_connection_request(self): self._request_body.update({'connection': {}})", "password for the connection. :type connection_password: string :param embed_password_flag: Boolean;", "connection object. :type ts_connection: class :param server_address: The new server", ":param server_address: The new server for the connection. :type server_address:", "i, key in enumerate(param_keys): if param_values[i] is not None: params_dict.update({key:", "boolean \"\"\" def __init__(self, ts_connection, server_address=None, port=None, connection_username=None, connection_password=<PASSWORD>, embed_password_flag=None):", "for the connection. :type connection_username: string :param connection_password: The new", "self._connection_password = <PASSWORD> self._embed_password_flag = embed_password_flag self.base_update_workbook_connection_request @property def optional_parameter_keys(self):", "The Tableau Server connection object. :type ts_connection: class :param server_address:", "request for sending API requests to Tableau Server. :param ts_connection:", "Boolean; True to embed the password in the connection, False", "'serverAddress', 'serverPort', 'userName', 'password', 'embedPassword' ] @property def optional_parameter_values_exist(self): return", "boolean values directly\"\"\" params_dict = {} for i, key in", "BaseRequest class UpdateWorkbookConnectionRequest(BaseRequest): \"\"\" Update workbook connection request for sending", "optional_parameter_values_exist(self): return [ self._server_address, self._port, self._connection_username, self._connection_password, True if self._embed_password_flag", "] @property def base_update_workbook_connection_request(self): self._request_body.update({'connection': {}}) return self._request_body @property def", "UpdateWorkbookConnectionRequest(BaseRequest): \"\"\" Update workbook connection request for sending API requests", "not None: params_dict.update({key: param_values[i]}) return params_dict def get_request(self): return self.modified_update_workbook_connection_request", "True to embed the password in the connection, False otherwise.", "optional_parameter_keys(self): return [ 'serverAddress', 'serverPort', 'userName', 'password', 'embedPassword' ] @property", "params_dict = {} for i, key in enumerate(param_keys): if param_values[i]", "def optional_parameter_values(self): return [ self._server_address, self._port, self._connection_username, self._connection_password, self._embed_password_flag ]", "otherwise. :type embed_password_flag: boolean \"\"\" def __init__(self, ts_connection, server_address=None, port=None," ]
[ "{\"key\": \"og:title\", \"value\": \"My Blog\"}) doc.name = blog.route doc.insert() #", "# License: MIT. See LICENSE import unittest import frappe from", "import get_response test_dependencies = [\"Blog Post\"] class TestWebsiteRouteMeta(unittest.TestCase): def test_meta_tag_generation(self):", "\"blog_post\"}) doc.append(\"meta_tags\", {\"key\": \"og:title\", \"value\": \"My Blog\"}) doc.name = blog.route", "2019, Frappe Technologies and Contributors # License: MIT. See LICENSE", "\"My Blog\"}) doc.name = blog.route doc.insert() # set request on", "See LICENSE import unittest import frappe from frappe.utils import set_request", "{\"key\": \"type\", \"value\": \"blog_post\"}) doc.append(\"meta_tags\", {\"key\": \"og:title\", \"value\": \"My Blog\"})", "set_request(path=blog.route) response = get_response() self.assertTrue(response.status_code, 200) html = response.get_data().decode() self.assertTrue(\"\"\"<meta", "for this route doc = frappe.new_doc(\"Website Route Meta\") doc.append(\"meta_tags\", {\"key\":", "blogs = frappe.get_all( \"Blog Post\", fields=[\"name\", \"route\"], filters={\"published\": 1, \"route\":", "test_meta_tag_generation(self): blogs = frappe.get_all( \"Blog Post\", fields=[\"name\", \"route\"], filters={\"published\": 1,", "get_response test_dependencies = [\"Blog Post\"] class TestWebsiteRouteMeta(unittest.TestCase): def test_meta_tag_generation(self): blogs", "= frappe.new_doc(\"Website Route Meta\") doc.append(\"meta_tags\", {\"key\": \"type\", \"value\": \"blog_post\"}) doc.append(\"meta_tags\",", "= get_response() self.assertTrue(response.status_code, 200) html = response.get_data().decode() self.assertTrue(\"\"\"<meta name=\"type\" content=\"blog_post\">\"\"\"", "fields=[\"name\", \"route\"], filters={\"published\": 1, \"route\": (\"!=\", \"\")}, limit=1 ) blog", "blog.route doc.insert() # set request on this route set_request(path=blog.route) response", "limit=1 ) blog = blogs[0] # create meta tags for", "in html) self.assertTrue(\"\"\"<meta property=\"og:title\" content=\"My Blog\">\"\"\" in html) def tearDown(self):", "tags for this route doc = frappe.new_doc(\"Website Route Meta\") doc.append(\"meta_tags\",", "self.assertTrue(response.status_code, 200) html = response.get_data().decode() self.assertTrue(\"\"\"<meta name=\"type\" content=\"blog_post\">\"\"\" in html)", "Frappe Technologies and Contributors # License: MIT. See LICENSE import", "(c) 2019, Frappe Technologies and Contributors # License: MIT. See", "License: MIT. See LICENSE import unittest import frappe from frappe.utils", "frappe from frappe.utils import set_request from frappe.website.serve import get_response test_dependencies", "this route set_request(path=blog.route) response = get_response() self.assertTrue(response.status_code, 200) html =", "self.assertTrue(\"\"\"<meta name=\"type\" content=\"blog_post\">\"\"\" in html) self.assertTrue(\"\"\"<meta property=\"og:title\" content=\"My Blog\">\"\"\" in", "= [\"Blog Post\"] class TestWebsiteRouteMeta(unittest.TestCase): def test_meta_tag_generation(self): blogs = frappe.get_all(", "frappe.new_doc(\"Website Route Meta\") doc.append(\"meta_tags\", {\"key\": \"type\", \"value\": \"blog_post\"}) doc.append(\"meta_tags\", {\"key\":", "doc.name = blog.route doc.insert() # set request on this route", "# create meta tags for this route doc = frappe.new_doc(\"Website", "= response.get_data().decode() self.assertTrue(\"\"\"<meta name=\"type\" content=\"blog_post\">\"\"\" in html) self.assertTrue(\"\"\"<meta property=\"og:title\" content=\"My", "= blogs[0] # create meta tags for this route doc", "1, \"route\": (\"!=\", \"\")}, limit=1 ) blog = blogs[0] #", "\"route\"], filters={\"published\": 1, \"route\": (\"!=\", \"\")}, limit=1 ) blog =", "name=\"type\" content=\"blog_post\">\"\"\" in html) self.assertTrue(\"\"\"<meta property=\"og:title\" content=\"My Blog\">\"\"\" in html)", "import frappe from frappe.utils import set_request from frappe.website.serve import get_response", "frappe.get_all( \"Blog Post\", fields=[\"name\", \"route\"], filters={\"published\": 1, \"route\": (\"!=\", \"\")},", "def test_meta_tag_generation(self): blogs = frappe.get_all( \"Blog Post\", fields=[\"name\", \"route\"], filters={\"published\":", "Contributors # License: MIT. See LICENSE import unittest import frappe", "doc.append(\"meta_tags\", {\"key\": \"type\", \"value\": \"blog_post\"}) doc.append(\"meta_tags\", {\"key\": \"og:title\", \"value\": \"My", "response = get_response() self.assertTrue(response.status_code, 200) html = response.get_data().decode() self.assertTrue(\"\"\"<meta name=\"type\"", "on this route set_request(path=blog.route) response = get_response() self.assertTrue(response.status_code, 200) html", "filters={\"published\": 1, \"route\": (\"!=\", \"\")}, limit=1 ) blog = blogs[0]", "route set_request(path=blog.route) response = get_response() self.assertTrue(response.status_code, 200) html = response.get_data().decode()", "TestWebsiteRouteMeta(unittest.TestCase): def test_meta_tag_generation(self): blogs = frappe.get_all( \"Blog Post\", fields=[\"name\", \"route\"],", "frappe.website.serve import get_response test_dependencies = [\"Blog Post\"] class TestWebsiteRouteMeta(unittest.TestCase): def", "test_dependencies = [\"Blog Post\"] class TestWebsiteRouteMeta(unittest.TestCase): def test_meta_tag_generation(self): blogs =", "doc = frappe.new_doc(\"Website Route Meta\") doc.append(\"meta_tags\", {\"key\": \"type\", \"value\": \"blog_post\"})", "import set_request from frappe.website.serve import get_response test_dependencies = [\"Blog Post\"]", "content=\"blog_post\">\"\"\" in html) self.assertTrue(\"\"\"<meta property=\"og:title\" content=\"My Blog\">\"\"\" in html) def", "set_request from frappe.website.serve import get_response test_dependencies = [\"Blog Post\"] class", "frappe.utils import set_request from frappe.website.serve import get_response test_dependencies = [\"Blog", "import unittest import frappe from frappe.utils import set_request from frappe.website.serve", "route doc = frappe.new_doc(\"Website Route Meta\") doc.append(\"meta_tags\", {\"key\": \"type\", \"value\":", "class TestWebsiteRouteMeta(unittest.TestCase): def test_meta_tag_generation(self): blogs = frappe.get_all( \"Blog Post\", fields=[\"name\",", "\"og:title\", \"value\": \"My Blog\"}) doc.name = blog.route doc.insert() # set", "200) html = response.get_data().decode() self.assertTrue(\"\"\"<meta name=\"type\" content=\"blog_post\">\"\"\" in html) self.assertTrue(\"\"\"<meta", "\"value\": \"blog_post\"}) doc.append(\"meta_tags\", {\"key\": \"og:title\", \"value\": \"My Blog\"}) doc.name =", "# set request on this route set_request(path=blog.route) response = get_response()", "\"Blog Post\", fields=[\"name\", \"route\"], filters={\"published\": 1, \"route\": (\"!=\", \"\")}, limit=1", "blogs[0] # create meta tags for this route doc =", "blog = blogs[0] # create meta tags for this route", "Copyright (c) 2019, Frappe Technologies and Contributors # License: MIT.", "request on this route set_request(path=blog.route) response = get_response() self.assertTrue(response.status_code, 200)", "get_response() self.assertTrue(response.status_code, 200) html = response.get_data().decode() self.assertTrue(\"\"\"<meta name=\"type\" content=\"blog_post\">\"\"\" in", "Technologies and Contributors # License: MIT. See LICENSE import unittest", "(\"!=\", \"\")}, limit=1 ) blog = blogs[0] # create meta", "Meta\") doc.append(\"meta_tags\", {\"key\": \"type\", \"value\": \"blog_post\"}) doc.append(\"meta_tags\", {\"key\": \"og:title\", \"value\":", "LICENSE import unittest import frappe from frappe.utils import set_request from", "= frappe.get_all( \"Blog Post\", fields=[\"name\", \"route\"], filters={\"published\": 1, \"route\": (\"!=\",", "-*- # Copyright (c) 2019, Frappe Technologies and Contributors #", "Route Meta\") doc.append(\"meta_tags\", {\"key\": \"type\", \"value\": \"blog_post\"}) doc.append(\"meta_tags\", {\"key\": \"og:title\",", "doc.append(\"meta_tags\", {\"key\": \"og:title\", \"value\": \"My Blog\"}) doc.name = blog.route doc.insert()", "unittest import frappe from frappe.utils import set_request from frappe.website.serve import", "\"value\": \"My Blog\"}) doc.name = blog.route doc.insert() # set request", "MIT. See LICENSE import unittest import frappe from frappe.utils import", "html = response.get_data().decode() self.assertTrue(\"\"\"<meta name=\"type\" content=\"blog_post\">\"\"\" in html) self.assertTrue(\"\"\"<meta property=\"og:title\"", "this route doc = frappe.new_doc(\"Website Route Meta\") doc.append(\"meta_tags\", {\"key\": \"type\",", "html) self.assertTrue(\"\"\"<meta property=\"og:title\" content=\"My Blog\">\"\"\" in html) def tearDown(self): frappe.db.rollback()", "from frappe.utils import set_request from frappe.website.serve import get_response test_dependencies =", "coding: utf-8 -*- # Copyright (c) 2019, Frappe Technologies and", "response.get_data().decode() self.assertTrue(\"\"\"<meta name=\"type\" content=\"blog_post\">\"\"\" in html) self.assertTrue(\"\"\"<meta property=\"og:title\" content=\"My Blog\">\"\"\"", "Blog\"}) doc.name = blog.route doc.insert() # set request on this", "doc.insert() # set request on this route set_request(path=blog.route) response =", "utf-8 -*- # Copyright (c) 2019, Frappe Technologies and Contributors", "create meta tags for this route doc = frappe.new_doc(\"Website Route", "[\"Blog Post\"] class TestWebsiteRouteMeta(unittest.TestCase): def test_meta_tag_generation(self): blogs = frappe.get_all( \"Blog", "from frappe.website.serve import get_response test_dependencies = [\"Blog Post\"] class TestWebsiteRouteMeta(unittest.TestCase):", ") blog = blogs[0] # create meta tags for this", "-*- coding: utf-8 -*- # Copyright (c) 2019, Frappe Technologies", "meta tags for this route doc = frappe.new_doc(\"Website Route Meta\")", "and Contributors # License: MIT. See LICENSE import unittest import", "# Copyright (c) 2019, Frappe Technologies and Contributors # License:", "Post\"] class TestWebsiteRouteMeta(unittest.TestCase): def test_meta_tag_generation(self): blogs = frappe.get_all( \"Blog Post\",", "Post\", fields=[\"name\", \"route\"], filters={\"published\": 1, \"route\": (\"!=\", \"\")}, limit=1 )", "\"type\", \"value\": \"blog_post\"}) doc.append(\"meta_tags\", {\"key\": \"og:title\", \"value\": \"My Blog\"}) doc.name", "\"route\": (\"!=\", \"\")}, limit=1 ) blog = blogs[0] # create", "\"\")}, limit=1 ) blog = blogs[0] # create meta tags", "set request on this route set_request(path=blog.route) response = get_response() self.assertTrue(response.status_code,", "# -*- coding: utf-8 -*- # Copyright (c) 2019, Frappe", "= blog.route doc.insert() # set request on this route set_request(path=blog.route)" ]
[ "from .VariableUnitTest import VariableUnitTest from gwlfe.MultiUse_Fxns.Runoff import AgRunoff class TestAgRunoff(VariableUnitTest):", "z.InitSnow_0, z.Prec, z.NRur, z.CN, z.AntMoist_0, z.NUrb, z.Grow_0, z.Landuse, z.Area), decimal=7)", "<reponame>rajadain/gwlf-e<filename>test/unittests/test_AgRunoff.py import numpy as np from .VariableUnitTest import VariableUnitTest from", "z.CN, z.AntMoist_0, z.NUrb, z.Grow_0, z.Landuse, z.Area), AgRunoff.AgRunoff(z.NYrs, z.DaysMonth, z.Temp, z.InitSnow_0,", "z.AntMoist_0, z.NUrb, z.Grow_0, z.Landuse, z.Area), AgRunoff.AgRunoff(z.NYrs, z.DaysMonth, z.Temp, z.InitSnow_0, z.Prec,", "z.Prec, z.NRur, z.CN, z.AntMoist_0, z.NUrb, z.Grow_0, z.Landuse, z.Area), AgRunoff.AgRunoff(z.NYrs, z.DaysMonth,", "z.NRur, z.CN, z.AntMoist_0, z.NUrb, z.Grow_0, z.Landuse, z.Area), AgRunoff.AgRunoff(z.NYrs, z.DaysMonth, z.Temp,", "@skip(\"not ready\") def test_AgRunoff(self): z = self.z np.testing.assert_array_almost_equal( AgRunoff.AgRunoff_f(z.NYrs, z.DaysMonth,", "z = self.z np.testing.assert_array_almost_equal( AgRunoff.AgRunoff_f(z.NYrs, z.DaysMonth, z.Temp, z.InitSnow_0, z.Prec, z.NRur,", "import AgRunoff class TestAgRunoff(VariableUnitTest): # @skip(\"not ready\") def test_AgRunoff(self): z", "# @skip(\"not ready\") def test_AgRunoff(self): z = self.z np.testing.assert_array_almost_equal( AgRunoff.AgRunoff_f(z.NYrs,", "self.z np.testing.assert_array_almost_equal( AgRunoff.AgRunoff_f(z.NYrs, z.DaysMonth, z.Temp, z.InitSnow_0, z.Prec, z.NRur, z.CN, z.AntMoist_0,", "z.NUrb, z.Grow_0, z.Landuse, z.Area), AgRunoff.AgRunoff(z.NYrs, z.DaysMonth, z.Temp, z.InitSnow_0, z.Prec, z.NRur,", "test_AgRunoff(self): z = self.z np.testing.assert_array_almost_equal( AgRunoff.AgRunoff_f(z.NYrs, z.DaysMonth, z.Temp, z.InitSnow_0, z.Prec,", "= self.z np.testing.assert_array_almost_equal( AgRunoff.AgRunoff_f(z.NYrs, z.DaysMonth, z.Temp, z.InitSnow_0, z.Prec, z.NRur, z.CN,", "as np from .VariableUnitTest import VariableUnitTest from gwlfe.MultiUse_Fxns.Runoff import AgRunoff", ".VariableUnitTest import VariableUnitTest from gwlfe.MultiUse_Fxns.Runoff import AgRunoff class TestAgRunoff(VariableUnitTest): #", "def test_AgRunoff(self): z = self.z np.testing.assert_array_almost_equal( AgRunoff.AgRunoff_f(z.NYrs, z.DaysMonth, z.Temp, z.InitSnow_0,", "from gwlfe.MultiUse_Fxns.Runoff import AgRunoff class TestAgRunoff(VariableUnitTest): # @skip(\"not ready\") def", "class TestAgRunoff(VariableUnitTest): # @skip(\"not ready\") def test_AgRunoff(self): z = self.z", "z.InitSnow_0, z.Prec, z.NRur, z.CN, z.AntMoist_0, z.NUrb, z.Grow_0, z.Landuse, z.Area), AgRunoff.AgRunoff(z.NYrs,", "AgRunoff.AgRunoff(z.NYrs, z.DaysMonth, z.Temp, z.InitSnow_0, z.Prec, z.NRur, z.CN, z.AntMoist_0, z.NUrb, z.Grow_0,", "AgRunoff class TestAgRunoff(VariableUnitTest): # @skip(\"not ready\") def test_AgRunoff(self): z =", "import VariableUnitTest from gwlfe.MultiUse_Fxns.Runoff import AgRunoff class TestAgRunoff(VariableUnitTest): # @skip(\"not", "z.Area), AgRunoff.AgRunoff(z.NYrs, z.DaysMonth, z.Temp, z.InitSnow_0, z.Prec, z.NRur, z.CN, z.AntMoist_0, z.NUrb,", "numpy as np from .VariableUnitTest import VariableUnitTest from gwlfe.MultiUse_Fxns.Runoff import", "AgRunoff.AgRunoff_f(z.NYrs, z.DaysMonth, z.Temp, z.InitSnow_0, z.Prec, z.NRur, z.CN, z.AntMoist_0, z.NUrb, z.Grow_0,", "z.Temp, z.InitSnow_0, z.Prec, z.NRur, z.CN, z.AntMoist_0, z.NUrb, z.Grow_0, z.Landuse, z.Area),", "np.testing.assert_array_almost_equal( AgRunoff.AgRunoff_f(z.NYrs, z.DaysMonth, z.Temp, z.InitSnow_0, z.Prec, z.NRur, z.CN, z.AntMoist_0, z.NUrb,", "import numpy as np from .VariableUnitTest import VariableUnitTest from gwlfe.MultiUse_Fxns.Runoff", "np from .VariableUnitTest import VariableUnitTest from gwlfe.MultiUse_Fxns.Runoff import AgRunoff class", "VariableUnitTest from gwlfe.MultiUse_Fxns.Runoff import AgRunoff class TestAgRunoff(VariableUnitTest): # @skip(\"not ready\")", "TestAgRunoff(VariableUnitTest): # @skip(\"not ready\") def test_AgRunoff(self): z = self.z np.testing.assert_array_almost_equal(", "z.Grow_0, z.Landuse, z.Area), AgRunoff.AgRunoff(z.NYrs, z.DaysMonth, z.Temp, z.InitSnow_0, z.Prec, z.NRur, z.CN,", "z.DaysMonth, z.Temp, z.InitSnow_0, z.Prec, z.NRur, z.CN, z.AntMoist_0, z.NUrb, z.Grow_0, z.Landuse,", "gwlfe.MultiUse_Fxns.Runoff import AgRunoff class TestAgRunoff(VariableUnitTest): # @skip(\"not ready\") def test_AgRunoff(self):", "ready\") def test_AgRunoff(self): z = self.z np.testing.assert_array_almost_equal( AgRunoff.AgRunoff_f(z.NYrs, z.DaysMonth, z.Temp,", "z.Landuse, z.Area), AgRunoff.AgRunoff(z.NYrs, z.DaysMonth, z.Temp, z.InitSnow_0, z.Prec, z.NRur, z.CN, z.AntMoist_0," ]
[ "ball_radius=0.2, group_size=group_size, sample_neighbors_uniformly=True) grouped_points_shape = py_utils.NestedMap( features=(8, 256, group_size, input_dims),", "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "maxval=16, dtype=tf.int32)) result = net.FPropDefaultTheta(input_data) with self.session(graph=g): self.evaluate(tf.global_variables_initializer()) np_result =", "python3 # Copyright 2019 The TensorFlow Authors. All Rights Reserved.", "for group_size in [32, 64]: p = car_layers.SamplingAndGroupingLayer.Params().Set( name='SampleGroupTest', num_samples=256,", "# Copyright 2019 The TensorFlow Authors. All Rights Reserved. #", "# # Licensed under the Apache License, Version 2.0 (the", "compliance with the License. # You may obtain a copy", "an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF", "All Rights Reserved. # # Licensed under the Apache License,", "2.0 (the \"License\"); # you may not use this file", "file except in compliance with the License. # You may", "agreed to in writing, software # distributed under the License", "Unless required by applicable law or agreed to in writing,", "on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS", "import compat as tf from lingvo.core import py_utils from lingvo.core", "num_points, _ = input_shape g = tf.Graph() with g.as_default(): net", "p.Instantiate() input_data = py_utils.NestedMap( points=tf.random.uniform(input_shape[:-1] + (3,)), features=tf.random.uniform(input_shape), padding=tf.zeros((batch_size, num_points),", "\"\"\"Tests for car_layers.\"\"\" from lingvo import compat as tf from", "for input_dims in [3, 6, 9]: for group_size in [32,", "py_utils.NestedMap( features=(8, 256, group_size, input_dims), points=(8, 256, group_size, 3), padding=(8,", "grouped_points_shape, 'query_points': query_points_shape }) self._testNestedOutShape(p, (8, num_points, input_dims), expected_shape) if", "distributed under the License is distributed on an \"AS IS\"", "p = car_layers.SamplingAndGroupingLayer.Params().Set( name='SampleGroupTest', num_samples=256, ball_radius=0.2, group_size=group_size, sample_neighbors_uniformly=True) grouped_points_shape =", "256, group_size, input_dims), points=(8, 256, group_size, 3), padding=(8, 256, group_size))", "self.session(graph=g): self.evaluate(tf.global_variables_initializer()) np_result = self.evaluate(result) grouped_points_result = np_result.grouped_points self.assertEqual(grouped_points_result.features.shape, expected_shape.grouped_points.features)", "the License. # ============================================================================== \"\"\"Tests for car_layers.\"\"\" from lingvo import", "the specific language governing permissions and # limitations under the", "input_dims), points=(8, 256, group_size, 3), padding=(8, 256, group_size)) query_points_shape =", "lingvo.core import py_utils from lingvo.core import test_utils from lingvo.tasks.car import", "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "py_utils.NestedMap( points=tf.random.uniform(input_shape[:-1] + (3,)), features=tf.random.uniform(input_shape), padding=tf.zeros((batch_size, num_points), dtype=tf.float32), label=tf.random.uniform((batch_size,), minval=0,", "points=(8, 256, 3), padding=(8, 256)) expected_shape = py_utils.NestedMap({ 'grouped_points': grouped_points_shape,", "expected_shape.grouped_points.features) self.assertEqual(grouped_points_result.points.shape, expected_shape.grouped_points.points) self.assertEqual(grouped_points_result.padding.shape, expected_shape.grouped_points.padding) query_points_result = np_result.query_points self.assertEqual(query_points_result.points.shape, expected_shape.query_points.points)", "express or implied. # See the License for the specific", "applicable law or agreed to in writing, software # distributed", "64]: p = car_layers.SamplingAndGroupingLayer.Params().Set( name='SampleGroupTest', num_samples=256, ball_radius=0.2, group_size=group_size, sample_neighbors_uniformly=True) grouped_points_shape", "except in compliance with the License. # You may obtain", "import py_utils from lingvo.core import test_utils from lingvo.tasks.car import car_layers", "of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "num_points in [1024, 256]: for input_dims in [3, 6, 9]:", "= net.FPropDefaultTheta(input_data) with self.session(graph=g): self.evaluate(tf.global_variables_initializer()) np_result = self.evaluate(result) grouped_points_result =", "256, 3), padding=(8, 256)) expected_shape = py_utils.NestedMap({ 'grouped_points': grouped_points_shape, 'query_points':", "Licensed under the Apache License, Version 2.0 (the \"License\"); #", "for car_layers.\"\"\" from lingvo import compat as tf from lingvo.core", "import car_layers class CarLayersTest(test_utils.TestCase): def _testNestedOutShape(self, p, input_shape, expected_shape): batch_size,", "============================================================================== \"\"\"Tests for car_layers.\"\"\" from lingvo import compat as tf", "not use this file except in compliance with the License.", "expected_shape): batch_size, num_points, _ = input_shape g = tf.Graph() with", "sample_neighbors_uniformly=True) grouped_points_shape = py_utils.NestedMap( features=(8, 256, group_size, input_dims), points=(8, 256,", "Lint as: python3 # Copyright 2019 The TensorFlow Authors. All", "group_size, input_dims), points=(8, 256, group_size, 3), padding=(8, 256, group_size)) query_points_shape", "as: python3 # Copyright 2019 The TensorFlow Authors. All Rights", "writing, software # distributed under the License is distributed on", "np_result = self.evaluate(result) grouped_points_result = np_result.grouped_points self.assertEqual(grouped_points_result.features.shape, expected_shape.grouped_points.features) self.assertEqual(grouped_points_result.points.shape, expected_shape.grouped_points.points)", "in writing, software # distributed under the License is distributed", "= input_shape g = tf.Graph() with g.as_default(): net = p.Instantiate()", "you may not use this file except in compliance with", "self.evaluate(result) grouped_points_result = np_result.grouped_points self.assertEqual(grouped_points_result.features.shape, expected_shape.grouped_points.features) self.assertEqual(grouped_points_result.points.shape, expected_shape.grouped_points.points) self.assertEqual(grouped_points_result.padding.shape, expected_shape.grouped_points.padding)", "self.assertEqual(query_points_result.padding.shape, expected_shape.query_points.padding) def testSamplingAndGrouping(self): for num_points in [1024, 256]: for", "# Licensed under the Apache License, Version 2.0 (the \"License\");", "limitations under the License. # ============================================================================== \"\"\"Tests for car_layers.\"\"\" from", "np_result.query_points self.assertEqual(query_points_result.points.shape, expected_shape.query_points.points) self.assertEqual(query_points_result.padding.shape, expected_shape.query_points.padding) def testSamplingAndGrouping(self): for num_points in", "tf.Graph() with g.as_default(): net = p.Instantiate() input_data = py_utils.NestedMap( points=tf.random.uniform(input_shape[:-1]", "num_points), dtype=tf.float32), label=tf.random.uniform((batch_size,), minval=0, maxval=16, dtype=tf.int32)) result = net.FPropDefaultTheta(input_data) with", "dtype=tf.float32), label=tf.random.uniform((batch_size,), minval=0, maxval=16, dtype=tf.int32)) result = net.FPropDefaultTheta(input_data) with self.session(graph=g):", "use this file except in compliance with the License. #", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "_testNestedOutShape(self, p, input_shape, expected_shape): batch_size, num_points, _ = input_shape g", "compat as tf from lingvo.core import py_utils from lingvo.core import", "tf from lingvo.core import py_utils from lingvo.core import test_utils from", "in [32, 64]: p = car_layers.SamplingAndGroupingLayer.Params().Set( name='SampleGroupTest', num_samples=256, ball_radius=0.2, group_size=group_size,", "grouped_points_shape = py_utils.NestedMap( features=(8, 256, group_size, input_dims), points=(8, 256, group_size,", "'query_points': query_points_shape }) self._testNestedOutShape(p, (8, num_points, input_dims), expected_shape) if __name__", "+ (3,)), features=tf.random.uniform(input_shape), padding=tf.zeros((batch_size, num_points), dtype=tf.float32), label=tf.random.uniform((batch_size,), minval=0, maxval=16, dtype=tf.int32))", "lingvo import compat as tf from lingvo.core import py_utils from", "CONDITIONS OF ANY KIND, either express or implied. # See", "the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "[32, 64]: p = car_layers.SamplingAndGroupingLayer.Params().Set( name='SampleGroupTest', num_samples=256, ball_radius=0.2, group_size=group_size, sample_neighbors_uniformly=True)", "or implied. # See the License for the specific language", "License is distributed on an \"AS IS\" BASIS, # WITHOUT", "Rights Reserved. # # Licensed under the Apache License, Version", "256)) expected_shape = py_utils.NestedMap({ 'grouped_points': grouped_points_shape, 'query_points': query_points_shape }) self._testNestedOutShape(p,", "License. # You may obtain a copy of the License", "is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES", "License, Version 2.0 (the \"License\"); # you may not use", "query_points_shape = py_utils.NestedMap( points=(8, 256, 3), padding=(8, 256)) expected_shape =", "(3,)), features=tf.random.uniform(input_shape), padding=tf.zeros((batch_size, num_points), dtype=tf.float32), label=tf.random.uniform((batch_size,), minval=0, maxval=16, dtype=tf.int32)) result", "= self.evaluate(result) grouped_points_result = np_result.grouped_points self.assertEqual(grouped_points_result.features.shape, expected_shape.grouped_points.features) self.assertEqual(grouped_points_result.points.shape, expected_shape.grouped_points.points) self.assertEqual(grouped_points_result.padding.shape,", "The TensorFlow Authors. All Rights Reserved. # # Licensed under", "256, group_size)) query_points_shape = py_utils.NestedMap( points=(8, 256, 3), padding=(8, 256))", "# You may obtain a copy of the License at", "name='SampleGroupTest', num_samples=256, ball_radius=0.2, group_size=group_size, sample_neighbors_uniformly=True) grouped_points_shape = py_utils.NestedMap( features=(8, 256,", "KIND, either express or implied. # See the License for", "specific language governing permissions and # limitations under the License.", "grouped_points_result = np_result.grouped_points self.assertEqual(grouped_points_result.features.shape, expected_shape.grouped_points.features) self.assertEqual(grouped_points_result.points.shape, expected_shape.grouped_points.points) self.assertEqual(grouped_points_result.padding.shape, expected_shape.grouped_points.padding) query_points_result", "permissions and # limitations under the License. # ============================================================================== \"\"\"Tests", "= py_utils.NestedMap( points=tf.random.uniform(input_shape[:-1] + (3,)), features=tf.random.uniform(input_shape), padding=tf.zeros((batch_size, num_points), dtype=tf.float32), label=tf.random.uniform((batch_size,),", "under the License is distributed on an \"AS IS\" BASIS,", "points=tf.random.uniform(input_shape[:-1] + (3,)), features=tf.random.uniform(input_shape), padding=tf.zeros((batch_size, num_points), dtype=tf.float32), label=tf.random.uniform((batch_size,), minval=0, maxval=16,", "= np_result.query_points self.assertEqual(query_points_result.points.shape, expected_shape.query_points.points) self.assertEqual(query_points_result.padding.shape, expected_shape.query_points.padding) def testSamplingAndGrouping(self): for num_points", "copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #", "[3, 6, 9]: for group_size in [32, 64]: p =", "car_layers.SamplingAndGroupingLayer.Params().Set( name='SampleGroupTest', num_samples=256, ball_radius=0.2, group_size=group_size, sample_neighbors_uniformly=True) grouped_points_shape = py_utils.NestedMap( features=(8,", "License for the specific language governing permissions and # limitations", "self.assertEqual(grouped_points_result.features.shape, expected_shape.grouped_points.features) self.assertEqual(grouped_points_result.points.shape, expected_shape.grouped_points.points) self.assertEqual(grouped_points_result.padding.shape, expected_shape.grouped_points.padding) query_points_result = np_result.query_points self.assertEqual(query_points_result.points.shape,", "Authors. All Rights Reserved. # # Licensed under the Apache", "group_size, 3), padding=(8, 256, group_size)) query_points_shape = py_utils.NestedMap( points=(8, 256,", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "3), padding=(8, 256)) expected_shape = py_utils.NestedMap({ 'grouped_points': grouped_points_shape, 'query_points': query_points_shape", "result = net.FPropDefaultTheta(input_data) with self.session(graph=g): self.evaluate(tf.global_variables_initializer()) np_result = self.evaluate(result) grouped_points_result", "Reserved. # # Licensed under the Apache License, Version 2.0", "expected_shape.grouped_points.points) self.assertEqual(grouped_points_result.padding.shape, expected_shape.grouped_points.padding) query_points_result = np_result.query_points self.assertEqual(query_points_result.points.shape, expected_shape.query_points.points) self.assertEqual(query_points_result.padding.shape, expected_shape.query_points.padding)", "py_utils from lingvo.core import test_utils from lingvo.tasks.car import car_layers class", "governing permissions and # limitations under the License. # ==============================================================================", "with self.session(graph=g): self.evaluate(tf.global_variables_initializer()) np_result = self.evaluate(result) grouped_points_result = np_result.grouped_points self.assertEqual(grouped_points_result.features.shape,", "6, 9]: for group_size in [32, 64]: p = car_layers.SamplingAndGroupingLayer.Params().Set(", "group_size=group_size, sample_neighbors_uniformly=True) grouped_points_shape = py_utils.NestedMap( features=(8, 256, group_size, input_dims), points=(8,", "the License for the specific language governing permissions and #", "(the \"License\"); # you may not use this file except", "TensorFlow Authors. All Rights Reserved. # # Licensed under the", "Apache License, Version 2.0 (the \"License\"); # you may not", "self.assertEqual(grouped_points_result.points.shape, expected_shape.grouped_points.points) self.assertEqual(grouped_points_result.padding.shape, expected_shape.grouped_points.padding) query_points_result = np_result.query_points self.assertEqual(query_points_result.points.shape, expected_shape.query_points.points) self.assertEqual(query_points_result.padding.shape,", "# you may not use this file except in compliance", "query_points_shape }) self._testNestedOutShape(p, (8, num_points, input_dims), expected_shape) if __name__ ==", "either express or implied. # See the License for the", "OR CONDITIONS OF ANY KIND, either express or implied. #", "class CarLayersTest(test_utils.TestCase): def _testNestedOutShape(self, p, input_shape, expected_shape): batch_size, num_points, _", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "group_size in [32, 64]: p = car_layers.SamplingAndGroupingLayer.Params().Set( name='SampleGroupTest', num_samples=256, ball_radius=0.2,", "the License is distributed on an \"AS IS\" BASIS, #", "}) self._testNestedOutShape(p, (8, num_points, input_dims), expected_shape) if __name__ == '__main__':", "in compliance with the License. # You may obtain a", "software # distributed under the License is distributed on an", "minval=0, maxval=16, dtype=tf.int32)) result = net.FPropDefaultTheta(input_data) with self.session(graph=g): self.evaluate(tf.global_variables_initializer()) np_result", "from lingvo.core import py_utils from lingvo.core import test_utils from lingvo.tasks.car", "[1024, 256]: for input_dims in [3, 6, 9]: for group_size", "np_result.grouped_points self.assertEqual(grouped_points_result.features.shape, expected_shape.grouped_points.features) self.assertEqual(grouped_points_result.points.shape, expected_shape.grouped_points.points) self.assertEqual(grouped_points_result.padding.shape, expected_shape.grouped_points.padding) query_points_result = np_result.query_points", "self.assertEqual(grouped_points_result.padding.shape, expected_shape.grouped_points.padding) query_points_result = np_result.query_points self.assertEqual(query_points_result.points.shape, expected_shape.query_points.points) self.assertEqual(query_points_result.padding.shape, expected_shape.query_points.padding) def", "= tf.Graph() with g.as_default(): net = p.Instantiate() input_data = py_utils.NestedMap(", "with g.as_default(): net = p.Instantiate() input_data = py_utils.NestedMap( points=tf.random.uniform(input_shape[:-1] +", "3), padding=(8, 256, group_size)) query_points_shape = py_utils.NestedMap( points=(8, 256, 3),", "# limitations under the License. # ============================================================================== \"\"\"Tests for car_layers.\"\"\"", "num_samples=256, ball_radius=0.2, group_size=group_size, sample_neighbors_uniformly=True) grouped_points_shape = py_utils.NestedMap( features=(8, 256, group_size,", "features=(8, 256, group_size, input_dims), points=(8, 256, group_size, 3), padding=(8, 256,", "9]: for group_size in [32, 64]: p = car_layers.SamplingAndGroupingLayer.Params().Set( name='SampleGroupTest',", "# # Unless required by applicable law or agreed to", "CarLayersTest(test_utils.TestCase): def _testNestedOutShape(self, p, input_shape, expected_shape): batch_size, num_points, _ =", "= py_utils.NestedMap({ 'grouped_points': grouped_points_shape, 'query_points': query_points_shape }) self._testNestedOutShape(p, (8, num_points,", "_ = input_shape g = tf.Graph() with g.as_default(): net =", "lingvo.core import test_utils from lingvo.tasks.car import car_layers class CarLayersTest(test_utils.TestCase): def", "input_shape g = tf.Graph() with g.as_default(): net = p.Instantiate() input_data", "py_utils.NestedMap( points=(8, 256, 3), padding=(8, 256)) expected_shape = py_utils.NestedMap({ 'grouped_points':", "self.assertEqual(query_points_result.points.shape, expected_shape.query_points.points) self.assertEqual(query_points_result.padding.shape, expected_shape.query_points.padding) def testSamplingAndGrouping(self): for num_points in [1024,", "a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #", "obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0", "under the License. # ============================================================================== \"\"\"Tests for car_layers.\"\"\" from lingvo", "= car_layers.SamplingAndGroupingLayer.Params().Set( name='SampleGroupTest', num_samples=256, ball_radius=0.2, group_size=group_size, sample_neighbors_uniformly=True) grouped_points_shape = py_utils.NestedMap(", "input_data = py_utils.NestedMap( points=tf.random.uniform(input_shape[:-1] + (3,)), features=tf.random.uniform(input_shape), padding=tf.zeros((batch_size, num_points), dtype=tf.float32),", "Version 2.0 (the \"License\"); # you may not use this", "License. # ============================================================================== \"\"\"Tests for car_layers.\"\"\" from lingvo import compat", "label=tf.random.uniform((batch_size,), minval=0, maxval=16, dtype=tf.int32)) result = net.FPropDefaultTheta(input_data) with self.session(graph=g): self.evaluate(tf.global_variables_initializer())", "from lingvo.core import test_utils from lingvo.tasks.car import car_layers class CarLayersTest(test_utils.TestCase):", "in [1024, 256]: for input_dims in [3, 6, 9]: for", "dtype=tf.int32)) result = net.FPropDefaultTheta(input_data) with self.session(graph=g): self.evaluate(tf.global_variables_initializer()) np_result = self.evaluate(result)", "law or agreed to in writing, software # distributed under", "group_size)) query_points_shape = py_utils.NestedMap( points=(8, 256, 3), padding=(8, 256)) expected_shape", "self._testNestedOutShape(p, (8, num_points, input_dims), expected_shape) if __name__ == '__main__': tf.test.main()", "g = tf.Graph() with g.as_default(): net = p.Instantiate() input_data =", "padding=(8, 256)) expected_shape = py_utils.NestedMap({ 'grouped_points': grouped_points_shape, 'query_points': query_points_shape })", "256, group_size, 3), padding=(8, 256, group_size)) query_points_shape = py_utils.NestedMap( points=(8,", "256]: for input_dims in [3, 6, 9]: for group_size in", "expected_shape = py_utils.NestedMap({ 'grouped_points': grouped_points_shape, 'query_points': query_points_shape }) self._testNestedOutShape(p, (8,", "'grouped_points': grouped_points_shape, 'query_points': query_points_shape }) self._testNestedOutShape(p, (8, num_points, input_dims), expected_shape)", "# ============================================================================== \"\"\"Tests for car_layers.\"\"\" from lingvo import compat as", "py_utils.NestedMap({ 'grouped_points': grouped_points_shape, 'query_points': query_points_shape }) self._testNestedOutShape(p, (8, num_points, input_dims),", "implied. # See the License for the specific language governing", "test_utils from lingvo.tasks.car import car_layers class CarLayersTest(test_utils.TestCase): def _testNestedOutShape(self, p,", "2019 The TensorFlow Authors. All Rights Reserved. # # Licensed", "under the Apache License, Version 2.0 (the \"License\"); # you", "as tf from lingvo.core import py_utils from lingvo.core import test_utils", "\"License\"); # you may not use this file except in", "points=(8, 256, group_size, 3), padding=(8, 256, group_size)) query_points_shape = py_utils.NestedMap(", "distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR", "net = p.Instantiate() input_data = py_utils.NestedMap( points=tf.random.uniform(input_shape[:-1] + (3,)), features=tf.random.uniform(input_shape),", "testSamplingAndGrouping(self): for num_points in [1024, 256]: for input_dims in [3,", "p, input_shape, expected_shape): batch_size, num_points, _ = input_shape g =", "net.FPropDefaultTheta(input_data) with self.session(graph=g): self.evaluate(tf.global_variables_initializer()) np_result = self.evaluate(result) grouped_points_result = np_result.grouped_points", "= np_result.grouped_points self.assertEqual(grouped_points_result.features.shape, expected_shape.grouped_points.features) self.assertEqual(grouped_points_result.points.shape, expected_shape.grouped_points.points) self.assertEqual(grouped_points_result.padding.shape, expected_shape.grouped_points.padding) query_points_result =", "= py_utils.NestedMap( points=(8, 256, 3), padding=(8, 256)) expected_shape = py_utils.NestedMap({", "# Lint as: python3 # Copyright 2019 The TensorFlow Authors.", "by applicable law or agreed to in writing, software #", "# distributed under the License is distributed on an \"AS", "OF ANY KIND, either express or implied. # See the", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "may obtain a copy of the License at # #", "# Unless required by applicable law or agreed to in", "ANY KIND, either express or implied. # See the License", "See the License for the specific language governing permissions and", "expected_shape.query_points.points) self.assertEqual(query_points_result.padding.shape, expected_shape.query_points.padding) def testSamplingAndGrouping(self): for num_points in [1024, 256]:", "for num_points in [1024, 256]: for input_dims in [3, 6,", "padding=(8, 256, group_size)) query_points_shape = py_utils.NestedMap( points=(8, 256, 3), padding=(8,", "the License. # You may obtain a copy of the", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "for the specific language governing permissions and # limitations under", "and # limitations under the License. # ============================================================================== \"\"\"Tests for", "def _testNestedOutShape(self, p, input_shape, expected_shape): batch_size, num_points, _ = input_shape", "self.evaluate(tf.global_variables_initializer()) np_result = self.evaluate(result) grouped_points_result = np_result.grouped_points self.assertEqual(grouped_points_result.features.shape, expected_shape.grouped_points.features) self.assertEqual(grouped_points_result.points.shape,", "\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY", "= py_utils.NestedMap( features=(8, 256, group_size, input_dims), points=(8, 256, group_size, 3),", "to in writing, software # distributed under the License is", "from lingvo.tasks.car import car_layers class CarLayersTest(test_utils.TestCase): def _testNestedOutShape(self, p, input_shape,", "query_points_result = np_result.query_points self.assertEqual(query_points_result.points.shape, expected_shape.query_points.points) self.assertEqual(query_points_result.padding.shape, expected_shape.query_points.padding) def testSamplingAndGrouping(self): for", "IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "# See the License for the specific language governing permissions", "expected_shape.grouped_points.padding) query_points_result = np_result.query_points self.assertEqual(query_points_result.points.shape, expected_shape.query_points.points) self.assertEqual(query_points_result.padding.shape, expected_shape.query_points.padding) def testSamplingAndGrouping(self):", "lingvo.tasks.car import car_layers class CarLayersTest(test_utils.TestCase): def _testNestedOutShape(self, p, input_shape, expected_shape):", "in [3, 6, 9]: for group_size in [32, 64]: p", "You may obtain a copy of the License at #", "padding=tf.zeros((batch_size, num_points), dtype=tf.float32), label=tf.random.uniform((batch_size,), minval=0, maxval=16, dtype=tf.int32)) result = net.FPropDefaultTheta(input_data)", "language governing permissions and # limitations under the License. #", "= p.Instantiate() input_data = py_utils.NestedMap( points=tf.random.uniform(input_shape[:-1] + (3,)), features=tf.random.uniform(input_shape), padding=tf.zeros((batch_size,", "may not use this file except in compliance with the", "or agreed to in writing, software # distributed under the", "features=tf.random.uniform(input_shape), padding=tf.zeros((batch_size, num_points), dtype=tf.float32), label=tf.random.uniform((batch_size,), minval=0, maxval=16, dtype=tf.int32)) result =", "required by applicable law or agreed to in writing, software", "Copyright 2019 The TensorFlow Authors. All Rights Reserved. # #", "car_layers.\"\"\" from lingvo import compat as tf from lingvo.core import", "def testSamplingAndGrouping(self): for num_points in [1024, 256]: for input_dims in", "BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "import test_utils from lingvo.tasks.car import car_layers class CarLayersTest(test_utils.TestCase): def _testNestedOutShape(self,", "from lingvo import compat as tf from lingvo.core import py_utils", "with the License. # You may obtain a copy of", "this file except in compliance with the License. # You", "expected_shape.query_points.padding) def testSamplingAndGrouping(self): for num_points in [1024, 256]: for input_dims", "batch_size, num_points, _ = input_shape g = tf.Graph() with g.as_default():", "the Apache License, Version 2.0 (the \"License\"); # you may", "input_shape, expected_shape): batch_size, num_points, _ = input_shape g = tf.Graph()", "input_dims in [3, 6, 9]: for group_size in [32, 64]:", "g.as_default(): net = p.Instantiate() input_data = py_utils.NestedMap( points=tf.random.uniform(input_shape[:-1] + (3,)),", "car_layers class CarLayersTest(test_utils.TestCase): def _testNestedOutShape(self, p, input_shape, expected_shape): batch_size, num_points," ]
[ "import oblate import numpy as np import pytest # TODO!", "<reponame>rodluger/starry<filename>starry/_core/ops/lib/include/oblate/tests/test_derivs.py import oblate import numpy as np import pytest #" ]
[ "subdirectories for subdir, dirs, files in os.walk(sniff_path): if subdir not", "sniff_path: relative or absolute path :return: void \"\"\" sniff_path =", "re import argparse from datetime import datetime def clear_path_string(s): \"\"\"", "not in dir_store: dir_store[subdir] = {} dir_store[subdir]['subdirs'] = dirs dir_store[subdir]['files']", "\"\"\" Simple function that removes chars that are not allowed", "The information that will be store for each of the", "each of the files - in this case last file", "data with open(dump_name + '.pkl', 'wb') as output: pickle.dump(dir_store, output,", "taken:\", dump_name) if __name__ == '__main__': parser = argparse.ArgumentParser(description='Directory Sniffer')", "# Name of a file in which data will be", "data will be stored dump_name = clear_path_string(sniff_path) + '_' +", "that walks through all of the subdirectories for subdir, dirs,", "ITC-sniff-for-changes-in-directory # @Author: <NAME> # @Website: http://itcave.eu # @Email: <EMAIL>", "cleaned_path_string \"\"\" return (re.sub('[^a-zA-Z]+', '#', s)).lower() def sniff(sniff_path): \"\"\" Walks", "sniff_path = str(sniff_path).lower() # Variable in which information will be", "coding: utf-8 -*- # @Filename : take_snapshot.py # @Date :", "this case last file modification date # Important: it's cross-platform", "pickled data with open(dump_name + '.pkl', 'wb') as output: pickle.dump(dir_store,", "files dir_store[subdir]['file_details'] = {} for file in files: f_path =", "in os.walk(sniff_path): if subdir not in dir_store: dir_store[subdir] = {}", "\"\"\" return (re.sub('[^a-zA-Z]+', '#', s)).lower() def sniff(sniff_path): \"\"\" Walks the", "that removes chars that are not allowed in file names", "will be stored dir_store = {} # Recursive loop that", "for each of the files - in this case last", ":return: cleaned_path_string \"\"\" return (re.sub('[^a-zA-Z]+', '#', s)).lower() def sniff(sniff_path): \"\"\"", "- in this case last file modification date # Important:", "the path and stores information about directory content :param sniff_path:", "to the directory that you want to take a snapshot", "dir_store[subdir] = {} dir_store[subdir]['subdirs'] = dirs dir_store[subdir]['files'] = files dir_store[subdir]['file_details']", "<EMAIL> # @License: MIT # @Copyright (C) 2019 ITGO <NAME>", "# @Date : 2019-07-15-13-44 # @Project: ITC-sniff-for-changes-in-directory # @Author: <NAME>", "os.walk(sniff_path): if subdir not in dir_store: dir_store[subdir] = {} dir_store[subdir]['subdirs']", "<NAME> # Generic imports import os import pickle import re", "the directory that you want to take a snapshot of')", "# Save pickled data with open(dump_name + '.pkl', 'wb') as", "be stored dir_store = {} # Recursive loop that walks", "+ '_' + datetime.now().strftime('%Y%m%d%H%M%S') # Save pickled data with open(dump_name", "will be stored dump_name = clear_path_string(sniff_path) + '_' + datetime.now().strftime('%Y%m%d%H%M%S')", "in files: f_path = os.path.join(subdir, file) # The information that", "you want to take a snapshot of') args = parser.parse_args()", "# @License: MIT # @Copyright (C) 2019 ITGO <NAME> #", ":param s: path_string :return: cleaned_path_string \"\"\" return (re.sub('[^a-zA-Z]+', '#', s)).lower()", "MIT # @Copyright (C) 2019 ITGO <NAME> # Generic imports", "# Generic imports import os import pickle import re import", "# Variable in which information will be stored dir_store =", "file in which data will be stored dump_name = clear_path_string(sniff_path)", "not allowed in file names :param s: path_string :return: cleaned_path_string", "as output: pickle.dump(dir_store, output, pickle.HIGHEST_PROTOCOL) print(\"Directory Snapshot taken:\", dump_name) if", "that will be store for each of the files -", "# @Website: http://itcave.eu # @Email: <EMAIL> # @License: MIT #", "dump_name = clear_path_string(sniff_path) + '_' + datetime.now().strftime('%Y%m%d%H%M%S') # Save pickled", "file) # The information that will be store for each", "cross-platform relevant! modified_date = os.path.getmtime(f_path) dir_store[subdir]['file_details'][file] = (modified_date,) # Name", "ITGO <NAME> # Generic imports import os import pickle import", "(modified_date,) # Name of a file in which data will", "= clear_path_string(sniff_path) + '_' + datetime.now().strftime('%Y%m%d%H%M%S') # Save pickled data", "s: path_string :return: cleaned_path_string \"\"\" return (re.sub('[^a-zA-Z]+', '#', s)).lower() def", "dump_name) if __name__ == '__main__': parser = argparse.ArgumentParser(description='Directory Sniffer') parser.add_argument('path',", "def sniff(sniff_path): \"\"\" Walks the path and stores information about", "date # Important: it's cross-platform relevant! modified_date = os.path.getmtime(f_path) dir_store[subdir]['file_details'][file]", "last file modification date # Important: it's cross-platform relevant! modified_date", "# @Project: ITC-sniff-for-changes-in-directory # @Author: <NAME> # @Website: http://itcave.eu #", "# Recursive loop that walks through all of the subdirectories", "Sniffer') parser.add_argument('path', help='Path to the directory that you want to", "of the subdirectories for subdir, dirs, files in os.walk(sniff_path): if", "dir_store = {} # Recursive loop that walks through all", "that you want to take a snapshot of') args =", "utf-8 -*- # @Filename : take_snapshot.py # @Date : 2019-07-15-13-44", "\"\"\" Walks the path and stores information about directory content", "file modification date # Important: it's cross-platform relevant! modified_date =", "2019 ITGO <NAME> # Generic imports import os import pickle", ": 2019-07-15-13-44 # @Project: ITC-sniff-for-changes-in-directory # @Author: <NAME> # @Website:", "walks through all of the subdirectories for subdir, dirs, files", ":return: void \"\"\" sniff_path = str(sniff_path).lower() # Variable in which", "= os.path.getmtime(f_path) dir_store[subdir]['file_details'][file] = (modified_date,) # Name of a file", "a file in which data will be stored dump_name =", "names :param s: path_string :return: cleaned_path_string \"\"\" return (re.sub('[^a-zA-Z]+', '#',", "argparse from datetime import datetime def clear_path_string(s): \"\"\" Simple function", "the subdirectories for subdir, dirs, files in os.walk(sniff_path): if subdir", "dir_store[subdir]['files'] = files dir_store[subdir]['file_details'] = {} for file in files:", "in file names :param s: path_string :return: cleaned_path_string \"\"\" return", "if subdir not in dir_store: dir_store[subdir] = {} dir_store[subdir]['subdirs'] =", "(re.sub('[^a-zA-Z]+', '#', s)).lower() def sniff(sniff_path): \"\"\" Walks the path and", "about directory content :param sniff_path: relative or absolute path :return:", "f_path = os.path.join(subdir, file) # The information that will be", "files in os.walk(sniff_path): if subdir not in dir_store: dir_store[subdir] =", "dir_store[subdir]['file_details'][file] = (modified_date,) # Name of a file in which", "parser = argparse.ArgumentParser(description='Directory Sniffer') parser.add_argument('path', help='Path to the directory that", "{} for file in files: f_path = os.path.join(subdir, file) #", "are not allowed in file names :param s: path_string :return:", "dirs dir_store[subdir]['files'] = files dir_store[subdir]['file_details'] = {} for file in", "void \"\"\" sniff_path = str(sniff_path).lower() # Variable in which information", "str(sniff_path).lower() # Variable in which information will be stored dir_store", "print(\"Directory Snapshot taken:\", dump_name) if __name__ == '__main__': parser =", "want to take a snapshot of') args = parser.parse_args() sniff(args.path)", "for subdir, dirs, files in os.walk(sniff_path): if subdir not in", "the files - in this case last file modification date", "case last file modification date # Important: it's cross-platform relevant!", "-*- # @Filename : take_snapshot.py # @Date : 2019-07-15-13-44 #", "Save pickled data with open(dump_name + '.pkl', 'wb') as output:", "for file in files: f_path = os.path.join(subdir, file) # The", "datetime.now().strftime('%Y%m%d%H%M%S') # Save pickled data with open(dump_name + '.pkl', 'wb')", "Walks the path and stores information about directory content :param", "output, pickle.HIGHEST_PROTOCOL) print(\"Directory Snapshot taken:\", dump_name) if __name__ == '__main__':", "@Filename : take_snapshot.py # @Date : 2019-07-15-13-44 # @Project: ITC-sniff-for-changes-in-directory", "Simple function that removes chars that are not allowed in", "pickle import re import argparse from datetime import datetime def", "= (modified_date,) # Name of a file in which data", "# Important: it's cross-platform relevant! modified_date = os.path.getmtime(f_path) dir_store[subdir]['file_details'][file] =", "argparse.ArgumentParser(description='Directory Sniffer') parser.add_argument('path', help='Path to the directory that you want", "import datetime def clear_path_string(s): \"\"\" Simple function that removes chars", "@Copyright (C) 2019 ITGO <NAME> # Generic imports import os", "@Email: <EMAIL> # @License: MIT # @Copyright (C) 2019 ITGO", "# @Filename : take_snapshot.py # @Date : 2019-07-15-13-44 # @Project:", "loop that walks through all of the subdirectories for subdir,", "output: pickle.dump(dir_store, output, pickle.HIGHEST_PROTOCOL) print(\"Directory Snapshot taken:\", dump_name) if __name__", "or absolute path :return: void \"\"\" sniff_path = str(sniff_path).lower() #", "import pickle import re import argparse from datetime import datetime", "__name__ == '__main__': parser = argparse.ArgumentParser(description='Directory Sniffer') parser.add_argument('path', help='Path to", "os import pickle import re import argparse from datetime import", "= files dir_store[subdir]['file_details'] = {} for file in files: f_path", "relevant! modified_date = os.path.getmtime(f_path) dir_store[subdir]['file_details'][file] = (modified_date,) # Name of", "# -*- coding: utf-8 -*- # @Filename : take_snapshot.py #", "chars that are not allowed in file names :param s:", "allowed in file names :param s: path_string :return: cleaned_path_string \"\"\"", "through all of the subdirectories for subdir, dirs, files in", "{} # Recursive loop that walks through all of the", "import re import argparse from datetime import datetime def clear_path_string(s):", "dir_store[subdir]['subdirs'] = dirs dir_store[subdir]['files'] = files dir_store[subdir]['file_details'] = {} for", "content :param sniff_path: relative or absolute path :return: void \"\"\"", "\"\"\" sniff_path = str(sniff_path).lower() # Variable in which information will", "stored dir_store = {} # Recursive loop that walks through", "all of the subdirectories for subdir, dirs, files in os.walk(sniff_path):", "clear_path_string(sniff_path) + '_' + datetime.now().strftime('%Y%m%d%H%M%S') # Save pickled data with", "pickle.HIGHEST_PROTOCOL) print(\"Directory Snapshot taken:\", dump_name) if __name__ == '__main__': parser", "function that removes chars that are not allowed in file", "that are not allowed in file names :param s: path_string", "in this case last file modification date # Important: it's", "if __name__ == '__main__': parser = argparse.ArgumentParser(description='Directory Sniffer') parser.add_argument('path', help='Path", "{} dir_store[subdir]['subdirs'] = dirs dir_store[subdir]['files'] = files dir_store[subdir]['file_details'] = {}", "clear_path_string(s): \"\"\" Simple function that removes chars that are not", "+ datetime.now().strftime('%Y%m%d%H%M%S') # Save pickled data with open(dump_name + '.pkl',", "import argparse from datetime import datetime def clear_path_string(s): \"\"\" Simple", "information about directory content :param sniff_path: relative or absolute path", "take_snapshot.py # @Date : 2019-07-15-13-44 # @Project: ITC-sniff-for-changes-in-directory # @Author:", "which information will be stored dir_store = {} # Recursive", "pickle.dump(dir_store, output, pickle.HIGHEST_PROTOCOL) print(\"Directory Snapshot taken:\", dump_name) if __name__ ==", "== '__main__': parser = argparse.ArgumentParser(description='Directory Sniffer') parser.add_argument('path', help='Path to the", "'__main__': parser = argparse.ArgumentParser(description='Directory Sniffer') parser.add_argument('path', help='Path to the directory", "dir_store[subdir]['file_details'] = {} for file in files: f_path = os.path.join(subdir,", "datetime def clear_path_string(s): \"\"\" Simple function that removes chars that", "+ '.pkl', 'wb') as output: pickle.dump(dir_store, output, pickle.HIGHEST_PROTOCOL) print(\"Directory Snapshot", "http://itcave.eu # @Email: <EMAIL> # @License: MIT # @Copyright (C)", "files: f_path = os.path.join(subdir, file) # The information that will", "sniff(sniff_path): \"\"\" Walks the path and stores information about directory", "# @Email: <EMAIL> # @License: MIT # @Copyright (C) 2019", "open(dump_name + '.pkl', 'wb') as output: pickle.dump(dir_store, output, pickle.HIGHEST_PROTOCOL) print(\"Directory", "imports import os import pickle import re import argparse from", "information will be stored dir_store = {} # Recursive loop", "= {} dir_store[subdir]['subdirs'] = dirs dir_store[subdir]['files'] = files dir_store[subdir]['file_details'] =", "and stores information about directory content :param sniff_path: relative or", "subdir not in dir_store: dir_store[subdir] = {} dir_store[subdir]['subdirs'] = dirs", "with open(dump_name + '.pkl', 'wb') as output: pickle.dump(dir_store, output, pickle.HIGHEST_PROTOCOL)", "@Date : 2019-07-15-13-44 # @Project: ITC-sniff-for-changes-in-directory # @Author: <NAME> #", "def clear_path_string(s): \"\"\" Simple function that removes chars that are", "Important: it's cross-platform relevant! modified_date = os.path.getmtime(f_path) dir_store[subdir]['file_details'][file] = (modified_date,)", "file in files: f_path = os.path.join(subdir, file) # The information", "@License: MIT # @Copyright (C) 2019 ITGO <NAME> # Generic", "store for each of the files - in this case", "in which data will be stored dump_name = clear_path_string(sniff_path) +", "subdir, dirs, files in os.walk(sniff_path): if subdir not in dir_store:", "= argparse.ArgumentParser(description='Directory Sniffer') parser.add_argument('path', help='Path to the directory that you", "= {} for file in files: f_path = os.path.join(subdir, file)", "os.path.join(subdir, file) # The information that will be store for", "will be store for each of the files - in", "import os import pickle import re import argparse from datetime", "help='Path to the directory that you want to take a", "in dir_store: dir_store[subdir] = {} dir_store[subdir]['subdirs'] = dirs dir_store[subdir]['files'] =", "= str(sniff_path).lower() # Variable in which information will be stored", "Generic imports import os import pickle import re import argparse", "modification date # Important: it's cross-platform relevant! modified_date = os.path.getmtime(f_path)", "files - in this case last file modification date #", "Name of a file in which data will be stored", "be stored dump_name = clear_path_string(sniff_path) + '_' + datetime.now().strftime('%Y%m%d%H%M%S') #", ": take_snapshot.py # @Date : 2019-07-15-13-44 # @Project: ITC-sniff-for-changes-in-directory #", "# The information that will be store for each of", "= os.path.join(subdir, file) # The information that will be store", "Snapshot taken:\", dump_name) if __name__ == '__main__': parser = argparse.ArgumentParser(description='Directory", "dir_store: dir_store[subdir] = {} dir_store[subdir]['subdirs'] = dirs dir_store[subdir]['files'] = files", "@Author: <NAME> # @Website: http://itcave.eu # @Email: <EMAIL> # @License:", "parser.add_argument('path', help='Path to the directory that you want to take", "<NAME> # @Website: http://itcave.eu # @Email: <EMAIL> # @License: MIT", "absolute path :return: void \"\"\" sniff_path = str(sniff_path).lower() # Variable", "= {} # Recursive loop that walks through all of", "of the files - in this case last file modification", "it's cross-platform relevant! modified_date = os.path.getmtime(f_path) dir_store[subdir]['file_details'][file] = (modified_date,) #", "return (re.sub('[^a-zA-Z]+', '#', s)).lower() def sniff(sniff_path): \"\"\" Walks the path", "directory that you want to take a snapshot of') args", "= dirs dir_store[subdir]['files'] = files dir_store[subdir]['file_details'] = {} for file", "s)).lower() def sniff(sniff_path): \"\"\" Walks the path and stores information", "directory content :param sniff_path: relative or absolute path :return: void", "path_string :return: cleaned_path_string \"\"\" return (re.sub('[^a-zA-Z]+', '#', s)).lower() def sniff(sniff_path):", "modified_date = os.path.getmtime(f_path) dir_store[subdir]['file_details'][file] = (modified_date,) # Name of a", "relative or absolute path :return: void \"\"\" sniff_path = str(sniff_path).lower()", "# @Author: <NAME> # @Website: http://itcave.eu # @Email: <EMAIL> #", "be store for each of the files - in this", "removes chars that are not allowed in file names :param", "in which information will be stored dir_store = {} #", "'_' + datetime.now().strftime('%Y%m%d%H%M%S') # Save pickled data with open(dump_name +", "stored dump_name = clear_path_string(sniff_path) + '_' + datetime.now().strftime('%Y%m%d%H%M%S') # Save", "2019-07-15-13-44 # @Project: ITC-sniff-for-changes-in-directory # @Author: <NAME> # @Website: http://itcave.eu", "which data will be stored dump_name = clear_path_string(sniff_path) + '_'", "-*- coding: utf-8 -*- # @Filename : take_snapshot.py # @Date", "Variable in which information will be stored dir_store = {}", "datetime import datetime def clear_path_string(s): \"\"\" Simple function that removes", "'#', s)).lower() def sniff(sniff_path): \"\"\" Walks the path and stores", "(C) 2019 ITGO <NAME> # Generic imports import os import", "# @Copyright (C) 2019 ITGO <NAME> # Generic imports import", "from datetime import datetime def clear_path_string(s): \"\"\" Simple function that", "of a file in which data will be stored dump_name", ":param sniff_path: relative or absolute path :return: void \"\"\" sniff_path", "'.pkl', 'wb') as output: pickle.dump(dir_store, output, pickle.HIGHEST_PROTOCOL) print(\"Directory Snapshot taken:\",", "'wb') as output: pickle.dump(dir_store, output, pickle.HIGHEST_PROTOCOL) print(\"Directory Snapshot taken:\", dump_name)", "Recursive loop that walks through all of the subdirectories for", "@Project: ITC-sniff-for-changes-in-directory # @Author: <NAME> # @Website: http://itcave.eu # @Email:", "information that will be store for each of the files", "path and stores information about directory content :param sniff_path: relative", "os.path.getmtime(f_path) dir_store[subdir]['file_details'][file] = (modified_date,) # Name of a file in", "path :return: void \"\"\" sniff_path = str(sniff_path).lower() # Variable in", "@Website: http://itcave.eu # @Email: <EMAIL> # @License: MIT # @Copyright", "stores information about directory content :param sniff_path: relative or absolute", "file names :param s: path_string :return: cleaned_path_string \"\"\" return (re.sub('[^a-zA-Z]+',", "dirs, files in os.walk(sniff_path): if subdir not in dir_store: dir_store[subdir]" ]
[ "kind = \"EXPRESSION_BUILTIN_DIR1\" def computeExpression(self, trace_collection): # TODO: Quite some", "from .VariableRefNodes import ExpressionTempVariableRef, ExpressionVariableRef class ExpressionBuiltinGlobals(ExpressionBase): kind = \"EXPRESSION_BUILTIN_GLOBALS\"", "= ExpressionMakeDict(pairs=_sorted(pairs), source_ref=self.source_ref) return result, \"new_expression\", \"Statically predicted locals dictionary.\"", "2.0 (the \"License\"); # you may not use this file", "kind = \"EXPRESSION_BUILTIN_LOCALS_UPDATED\" def __init__(self, locals_scope, source_ref): ExpressionBuiltinLocalsBase.__init__( self, locals_scope=locals_scope,", "Any exception may be raised. trace_collection.onExceptionRaiseExit(BaseException) return self, None, None", "source_ref=source_ref ) def getLocalsScope(self): return self.locals_scope def computeExpressionRaw(self, trace_collection): if", "), value=ExpressionVariableRef( variable=variable, source_ref=self.source_ref ), source_ref=self.source_ref, ) ) # Locals", "from .DictionaryNodes import ExpressionKeyValuePair, ExpressionMakeDict from .ExpressionBases import ExpressionBase, ExpressionBuiltinSingleArgBase", "= \"EXPRESSION_BUILTIN_GLOBALS\" def __init__(self, source_ref): ExpressionBase.__init__(self, source_ref=source_ref) def finalize(self): del", "Quite some cases should be possible to predict and this", "None class ExpressionBuiltinLocalsCopy(ExpressionBuiltinLocalsBase): kind = \"EXPRESSION_BUILTIN_LOCALS_COPY\" def computeExpressionRaw(self, trace_collection): #", "optimizing Python compiler that is compatible and # integrates with", "class ExpressionBuiltinLocalsBase(ExpressionBase): # Base classes can be abstract, pylint: disable=abstract-method", "where their value goes. The \"dir()\" call without arguments is", "new_result = result.computeExpressionRaw(trace_collection) assert new_result[0] is result self.finalize() return result,", "result = ExpressionMakeDict(pairs=_sorted(pairs), source_ref=self.source_ref) return result, \"new_expression\", \"Statically predicted locals", "None, None class ExpressionBuiltinLocalsCopy(ExpressionBuiltinLocalsBase): kind = \"EXPRESSION_BUILTIN_LOCALS_COPY\" def computeExpressionRaw(self, trace_collection):", "of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "pairs, key=lambda pair: names.index(pair.getKey().getCompileTimeConstant()), ) result = ExpressionMakeDict(pairs=_sorted(pairs), source_ref=self.source_ref) return", ") for variable, variable_trace in self.variable_traces: if ( not variable_trace.mustHaveValue()", "using them, the code may change or access anything about", "self.locals_scope def computeExpressionRaw(self, trace_collection): # Just inform the collection that", "value=ExpressionVariableRef( variable=variable, source_ref=self.source_ref ), source_ref=self.source_ref, ) ) # Locals is", "to variables, highly problematic, because using them, the code may", "if not self.getParent().isStatementReturn(): trace_collection.onLocalsUsage(self.getParentVariableProvider()) return self, None, None class ExpressionBuiltinLocalsCopy(ExpressionBuiltinLocalsBase):", "The \"dir()\" call without arguments is reformulated to locals or", "variable_trace in self.variable_traces: if variable_trace.mustHaveValue(): pairs.append( ExpressionKeyValuePair( key=makeConstantRefNode( constant=variable.getName(), user_provided=True,", "arg dir nodes These nodes give access to variables, highly", "all escaped. self.variable_traces = trace_collection.onLocalsUsage( self.getParentVariableProvider() ) trace_collection.onLocalsDictEscaped(self.locals_scope) return self,", "locals or globals calls. \"\"\" from .ConstantRefNodes import makeConstantRefNode from", "mayRaiseException(self, exception_type): return False def getVariableTraces(self): return self.variable_traces class ExpressionBuiltinLocalsUpdated(ExpressionBuiltinLocalsBase):", "this # should be using a slot, with \"__dir__\" being", "\"EXPRESSION_BUILTIN_GLOBALS\" def __init__(self, source_ref): ExpressionBase.__init__(self, source_ref=source_ref) def finalize(self): del self.parent", "locals_scope, source_ref): ExpressionBase.__init__(self, source_ref=source_ref) self.variable_traces = None self.locals_scope = locals_scope", "exception_type): return False def getVariableTraces(self): return self.variable_traces class ExpressionBuiltinLocalsUpdated(ExpressionBuiltinLocalsBase): kind", "# # Part of \"Nuitka\", an optimizing Python compiler that", "ExpressionBase, ExpressionBuiltinSingleArgBase from .VariableRefNodes import ExpressionTempVariableRef, ExpressionVariableRef class ExpressionBuiltinGlobals(ExpressionBase): kind", "__init__(self, locals_scope, source_ref): ExpressionBuiltinLocalsBase.__init__( self, locals_scope=locals_scope, source_ref=source_ref ) def getLocalsScope(self):", "use this file except in compliance with the License. #", "# limitations under the License. # \"\"\" Globals/locals/single arg dir", "__init__(self, source_ref): ExpressionBase.__init__(self, source_ref=source_ref) def finalize(self): del self.parent def computeExpressionRaw(self,", "self.finalize() return result, \"new_expression\", \"Propagated locals dictionary reference.\" # Just", "the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "self.getParentVariableProvider() ) for variable, variable_trace in self.variable_traces: if ( not", "globals calls. \"\"\" from .ConstantRefNodes import makeConstantRefNode from .DictionaryNodes import", "return self, None, None pairs = [] for variable, variable_trace", "License. # You may obtain a copy of the License", "variable_trace.getNameUsageCount() > 1: return self, None, None pairs = []", "should be using a slot, with \"__dir__\" being overloaded or", "): return self, None, None # Other locals elsewhere. if", "in self.locals_scope.getPropagationVariables().items() ), source_ref=self.source_ref, ) new_result = result.computeExpressionRaw(trace_collection) assert new_result[0]", "# \"\"\" Globals/locals/single arg dir nodes These nodes give access", "These nodes give access to variables, highly problematic, because using", "kind = \"EXPRESSION_BUILTIN_LOCALS_COPY\" def computeExpressionRaw(self, trace_collection): # Just inform the", "names.index(pair.getKey().getCompileTimeConstant()), ) result = ExpressionMakeDict(pairs=_sorted(pairs), source_ref=self.source_ref) return result, \"new_expression\", \"Statically", "under the License is distributed on an \"AS IS\" BASIS,", "self.variable_traces: if variable_trace.mustHaveValue(): pairs.append( ExpressionKeyValuePair( key=makeConstantRefNode( constant=variable.getName(), user_provided=True, source_ref=self.source_ref, ),", "License for the specific language governing permissions and # limitations", "Any code could be run, note that. trace_collection.onControlFlowEscape(self) # Any", "can be abstract, pylint: disable=abstract-method __slots__ = (\"variable_traces\", \"locals_scope\") def", "class ExpressionBuiltinLocalsRef(ExpressionBuiltinLocalsBase): kind = \"EXPRESSION_BUILTIN_LOCALS_REF\" def __init__(self, locals_scope, source_ref): ExpressionBuiltinLocalsBase.__init__(", "result self.finalize() return result, \"new_expression\", \"Propagated locals dictionary reference.\" #", "self.variable_traces = trace_collection.onLocalsUsage( self.getParentVariableProvider() ) trace_collection.onLocalsDictEscaped(self.locals_scope) return self, None, None", "and # integrates with CPython, but also works on its", "about them, so nothing can be trusted anymore, if we", "under the License. # \"\"\" Globals/locals/single arg dir nodes These", "is compatible and # integrates with CPython, but also works", "if we start to not know where their value goes.", "cases should be possible to predict and this # should", "on its own. # # Licensed under the Apache License,", "finalize(self): del self.parent def computeExpressionRaw(self, trace_collection): return self, None, None", "start to not know where their value goes. The \"dir()\"", "source_ref=self.source_ref, ) new_result = result.computeExpressionRaw(trace_collection) assert new_result[0] is result self.finalize()", "source_ref=self.source_ref ), source_ref=self.source_ref, ) for variable_name, variable in self.locals_scope.getPropagationVariables().items() ),", "is not None def getLocalsScope(self): return self.locals_scope def computeExpressionRaw(self, trace_collection):", "exception_type): return False class ExpressionBuiltinLocalsBase(ExpressionBase): # Base classes can be", "class ExpressionBuiltinGlobals(ExpressionBase): kind = \"EXPRESSION_BUILTIN_GLOBALS\" def __init__(self, source_ref): ExpressionBase.__init__(self, source_ref=source_ref)", "# integrates with CPython, but also works on its own.", "in compliance with the License. # You may obtain a", "software # distributed under the License is distributed on an", "the collection that all escaped unless it is abortative. if", "ExpressionBuiltinSingleArgBase from .VariableRefNodes import ExpressionTempVariableRef, ExpressionVariableRef class ExpressionBuiltinGlobals(ExpressionBase): kind =", "Other locals elsewhere. if variable_trace.getNameUsageCount() > 1: return self, None,", "source_ref=self.source_ref ), value=ExpressionTempVariableRef( variable=variable, source_ref=self.source_ref ), source_ref=self.source_ref, ) for variable_name,", "assert new_result[0] is result self.finalize() return result, \"new_expression\", \"Propagated locals", "assert locals_scope is not None def getLocalsScope(self): return self.locals_scope def", "self.variable_traces = trace_collection.onLocalsUsage( self.getParentVariableProvider() ) for variable, variable_trace in self.variable_traces:", "self, locals_scope=locals_scope, source_ref=source_ref ) def getLocalsScope(self): return self.locals_scope def computeExpressionRaw(self,", "# Other locals elsewhere. if variable_trace.getNameUsageCount() > 1: return self,", "ExpressionBuiltinGlobals(ExpressionBase): kind = \"EXPRESSION_BUILTIN_GLOBALS\" def __init__(self, source_ref): ExpressionBase.__init__(self, source_ref=source_ref) def", "return False def mayRaiseException(self, exception_type): return False class ExpressionBuiltinLocalsBase(ExpressionBase): #", "Copyright 2020, <NAME>, mailto:<EMAIL> # # Part of \"Nuitka\", an", "Just inform the collection that all escaped. self.variable_traces = trace_collection.onLocalsUsage(", "def computeExpressionRaw(self, trace_collection): # Just inform the collection that all", "( not variable_trace.mustHaveValue() and not variable_trace.mustNotHaveValue() ): return self, None,", "self, None, None pairs = [] for variable, variable_trace in", "is abortative. if not self.getParent().isStatementReturn(): trace_collection.onLocalsUsage(self.getParentVariableProvider()) return self, None, None", "ExpressionKeyValuePair( key=makeConstantRefNode( constant=variable.getName(), user_provided=True, source_ref=self.source_ref, ), value=ExpressionVariableRef( variable=variable, source_ref=self.source_ref ),", "trace_collection): return self, None, None def mayHaveSideEffects(self): return False def", "problematic, because using them, the code may change or access", "that all escaped unless it is abortative. if not self.getParent().isStatementReturn():", "without arguments is reformulated to locals or globals calls. \"\"\"", "False def getVariableTraces(self): return self.variable_traces class ExpressionBuiltinLocalsUpdated(ExpressionBuiltinLocalsBase): kind = \"EXPRESSION_BUILTIN_LOCALS_UPDATED\"", "sorted of course. def _sorted(pairs): names = self.getParentVariableProvider().getLocalVariableNames() return sorted(", "locals elsewhere. if variable_trace.getNameUsageCount() > 1: return self, None, None", "mailto:<EMAIL> # # Part of \"Nuitka\", an optimizing Python compiler", "OF ANY KIND, either express or implied. # See the", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "we start to not know where their value goes. The", ") result = ExpressionMakeDict(pairs=_sorted(pairs), source_ref=self.source_ref) return result, \"new_expression\", \"Statically predicted", "ANY KIND, either express or implied. # See the License", "See the License for the specific language governing permissions and", "arguments is reformulated to locals or globals calls. \"\"\" from", "variable_trace.mustNotHaveValue() ): return self, None, None # Other locals elsewhere.", "variable, variable_trace in self.variable_traces: if ( not variable_trace.mustHaveValue() and not", "the License. # You may obtain a copy of the", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "for the specific language governing permissions and # limitations under", "escaped unless it is abortative. if not self.getParent().isStatementReturn(): trace_collection.onLocalsUsage(self.getParentVariableProvider()) return", "variable=variable, source_ref=self.source_ref ), source_ref=self.source_ref, ) ) # Locals is sorted", "pairs = [] for variable, variable_trace in self.variable_traces: if variable_trace.mustHaveValue():", "to in writing, software # distributed under the License is", "anymore, if we start to not know where their value", "call without arguments is reformulated to locals or globals calls.", "# See the License for the specific language governing permissions", "# Copyright 2020, <NAME>, mailto:<EMAIL> # # Part of \"Nuitka\",", "\"dir()\" call without arguments is reformulated to locals or globals", "self.locals_scope def computeExpressionRaw(self, trace_collection): if self.locals_scope.isMarkedForPropagation(): result = ExpressionMakeDict( pairs=(", "language governing permissions and # limitations under the License. #", "False def mayRaiseException(self, exception_type): return False def getVariableTraces(self): return self.variable_traces", "> 1: return self, None, None pairs = [] for", "or agreed to in writing, software # distributed under the", "trace_collection.onLocalsDictEscaped(self.locals_scope) return self, None, None class ExpressionBuiltinLocalsRef(ExpressionBuiltinLocalsBase): kind = \"EXPRESSION_BUILTIN_LOCALS_REF\"", ") ) # Locals is sorted of course. def _sorted(pairs):", "and this # should be using a slot, with \"__dir__\"", "required by applicable law or agreed to in writing, software", "class ExpressionBuiltinLocalsUpdated(ExpressionBuiltinLocalsBase): kind = \"EXPRESSION_BUILTIN_LOCALS_UPDATED\" def __init__(self, locals_scope, source_ref): ExpressionBuiltinLocalsBase.__init__(", "\"Nuitka\", an optimizing Python compiler that is compatible and #", "BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "in self.variable_traces: if ( not variable_trace.mustHaveValue() and not variable_trace.mustNotHaveValue() ):", "variable=variable, source_ref=self.source_ref ), source_ref=self.source_ref, ) for variable_name, variable in self.locals_scope.getPropagationVariables().items()", "with the License. # You may obtain a copy of", "disable=abstract-method __slots__ = (\"variable_traces\", \"locals_scope\") def __init__(self, locals_scope, source_ref): ExpressionBase.__init__(self,", "nodes These nodes give access to variables, highly problematic, because", "CPython, but also works on its own. # # Licensed", "source_ref): ExpressionBuiltinLocalsBase.__init__( self, locals_scope=locals_scope, source_ref=source_ref ) def getLocalsScope(self): return self.locals_scope", "\"\"\" Globals/locals/single arg dir nodes These nodes give access to", "should be possible to predict and this # should be", "return self.locals_scope def computeExpressionRaw(self, trace_collection): if self.locals_scope.isMarkedForPropagation(): result = ExpressionMakeDict(", "self, locals_scope=locals_scope, source_ref=source_ref ) assert locals_scope is not None def", "anything about them, so nothing can be trusted anymore, if", "compliance with the License. # You may obtain a copy", "\"\"\" from .ConstantRefNodes import makeConstantRefNode from .DictionaryNodes import ExpressionKeyValuePair, ExpressionMakeDict", "agreed to in writing, software # distributed under the License", "trace_collection): # TODO: Quite some cases should be possible to", "abstract, pylint: disable=abstract-method __slots__ = (\"variable_traces\", \"locals_scope\") def __init__(self, locals_scope,", "distributed under the License is distributed on an \"AS IS\"", "return self, None, None class ExpressionBuiltinLocalsRef(ExpressionBuiltinLocalsBase): kind = \"EXPRESSION_BUILTIN_LOCALS_REF\" def", "), source_ref=self.source_ref, ) for variable_name, variable in self.locals_scope.getPropagationVariables().items() ), source_ref=self.source_ref,", "def mayHaveSideEffects(self): return False def mayRaiseException(self, exception_type): return False class", "__slots__ = (\"variable_traces\", \"locals_scope\") def __init__(self, locals_scope, source_ref): ExpressionBase.__init__(self, source_ref=source_ref)", "source_ref=source_ref ) assert locals_scope is not None def getLocalsScope(self): return", "None # Other locals elsewhere. if variable_trace.getNameUsageCount() > 1: return", "their value goes. The \"dir()\" call without arguments is reformulated", "getLocalsScope(self): return self.locals_scope def computeExpressionRaw(self, trace_collection): # Just inform the", "kind = \"EXPRESSION_BUILTIN_GLOBALS\" def __init__(self, source_ref): ExpressionBase.__init__(self, source_ref=source_ref) def finalize(self):", "self.variable_traces def mayHaveSideEffects(self): return False def mayRaiseException(self, exception_type): return False", "TODO: Quite some cases should be possible to predict and", "[] for variable, variable_trace in self.variable_traces: if variable_trace.mustHaveValue(): pairs.append( ExpressionKeyValuePair(", "them, the code may change or access anything about them,", "express or implied. # See the License for the specific", "except in compliance with the License. # You may obtain", "key=makeConstantRefNode( constant=variable_name, source_ref=self.source_ref ), value=ExpressionTempVariableRef( variable=variable, source_ref=self.source_ref ), source_ref=self.source_ref, )", "limitations under the License. # \"\"\" Globals/locals/single arg dir nodes", "import makeConstantRefNode from .DictionaryNodes import ExpressionKeyValuePair, ExpressionMakeDict from .ExpressionBases import", "variable_trace in self.variable_traces: if ( not variable_trace.mustHaveValue() and not variable_trace.mustNotHaveValue()", "Licensed under the Apache License, Version 2.0 (the \"License\"); #", "not use this file except in compliance with the License.", "constant=variable.getName(), user_provided=True, source_ref=self.source_ref, ), value=ExpressionVariableRef( variable=variable, source_ref=self.source_ref ), source_ref=self.source_ref, )", "locals_scope def finalize(self): del self.locals_scope del self.variable_traces def mayHaveSideEffects(self): return", "def _sorted(pairs): names = self.getParentVariableProvider().getLocalVariableNames() return sorted( pairs, key=lambda pair:", "sorted( pairs, key=lambda pair: names.index(pair.getKey().getCompileTimeConstant()), ) result = ExpressionMakeDict(pairs=_sorted(pairs), source_ref=self.source_ref)", "writing, software # distributed under the License is distributed on", "locals_scope=locals_scope, source_ref=source_ref ) def getLocalsScope(self): return self.locals_scope def computeExpressionRaw(self, trace_collection):", ".DictionaryNodes import ExpressionKeyValuePair, ExpressionMakeDict from .ExpressionBases import ExpressionBase, ExpressionBuiltinSingleArgBase from", "from .ConstantRefNodes import makeConstantRefNode from .DictionaryNodes import ExpressionKeyValuePair, ExpressionMakeDict from", "locals_scope, source_ref): ExpressionBuiltinLocalsBase.__init__( self, locals_scope=locals_scope, source_ref=source_ref ) def getLocalsScope(self): return", "you may not use this file except in compliance with", ".ConstantRefNodes import makeConstantRefNode from .DictionaryNodes import ExpressionKeyValuePair, ExpressionMakeDict from .ExpressionBases", "source_ref=self.source_ref ), source_ref=self.source_ref, ) ) # Locals is sorted of", "\"EXPRESSION_BUILTIN_LOCALS_REF\" def __init__(self, locals_scope, source_ref): ExpressionBuiltinLocalsBase.__init__( self, locals_scope=locals_scope, source_ref=source_ref )", "# Licensed under the Apache License, Version 2.0 (the \"License\");", "# Base classes can be abstract, pylint: disable=abstract-method __slots__ =", "variable_trace.mustHaveValue(): pairs.append( ExpressionKeyValuePair( key=makeConstantRefNode( constant=variable.getName(), user_provided=True, source_ref=self.source_ref, ), value=ExpressionVariableRef( variable=variable,", "(\"variable_traces\", \"locals_scope\") def __init__(self, locals_scope, source_ref): ExpressionBase.__init__(self, source_ref=source_ref) self.variable_traces =", "self, None, None def mayHaveSideEffects(self): return False def mayRaiseException(self, exception_type):", "\"Statically predicted locals dictionary.\" class ExpressionBuiltinDir1(ExpressionBuiltinSingleArgBase): kind = \"EXPRESSION_BUILTIN_DIR1\" def", "integrates with CPython, but also works on its own. #", "\"locals_scope\") def __init__(self, locals_scope, source_ref): ExpressionBase.__init__(self, source_ref=source_ref) self.variable_traces = None", "return sorted( pairs, key=lambda pair: names.index(pair.getKey().getCompileTimeConstant()), ) result = ExpressionMakeDict(pairs=_sorted(pairs),", "= ExpressionMakeDict( pairs=( ExpressionKeyValuePair( key=makeConstantRefNode( constant=variable_name, source_ref=self.source_ref ), value=ExpressionTempVariableRef( variable=variable,", "pair: names.index(pair.getKey().getCompileTimeConstant()), ) result = ExpressionMakeDict(pairs=_sorted(pairs), source_ref=self.source_ref) return result, \"new_expression\",", "# Just inform the collection that all escaped. self.variable_traces =", "\"__dir__\" being overloaded or not. # Any code could be", "own. # # Licensed under the Apache License, Version 2.0", "run, note that. trace_collection.onControlFlowEscape(self) # Any exception may be raised.", "CONDITIONS OF ANY KIND, either express or implied. # See", "ExpressionBuiltinLocalsBase.__init__( self, locals_scope=locals_scope, source_ref=source_ref ) def getLocalsScope(self): return self.locals_scope def", "\"EXPRESSION_BUILTIN_DIR1\" def computeExpression(self, trace_collection): # TODO: Quite some cases should", "None, None pairs = [] for variable, variable_trace in self.variable_traces:", "is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES", "predict and this # should be using a slot, with", "= result.computeExpressionRaw(trace_collection) assert new_result[0] is result self.finalize() return result, \"new_expression\",", "ExpressionTempVariableRef, ExpressionVariableRef class ExpressionBuiltinGlobals(ExpressionBase): kind = \"EXPRESSION_BUILTIN_GLOBALS\" def __init__(self, source_ref):", "source_ref): ExpressionBase.__init__(self, source_ref=source_ref) self.variable_traces = None self.locals_scope = locals_scope def", "also works on its own. # # Licensed under the", "works on its own. # # Licensed under the Apache", "# TODO: Quite some cases should be possible to predict", "names = self.getParentVariableProvider().getLocalVariableNames() return sorted( pairs, key=lambda pair: names.index(pair.getKey().getCompileTimeConstant()), )", "them, so nothing can be trusted anymore, if we start", "self.locals_scope del self.variable_traces def mayHaveSideEffects(self): return False def mayRaiseException(self, exception_type):", "mayHaveSideEffects(self): return False def mayRaiseException(self, exception_type): return False def getVariableTraces(self):", "def __init__(self, locals_scope, source_ref): ExpressionBuiltinLocalsBase.__init__( self, locals_scope=locals_scope, source_ref=source_ref ) def", "Base classes can be abstract, pylint: disable=abstract-method __slots__ = (\"variable_traces\",", "constant=variable_name, source_ref=self.source_ref ), value=ExpressionTempVariableRef( variable=variable, source_ref=self.source_ref ), source_ref=self.source_ref, ) for", "or globals calls. \"\"\" from .ConstantRefNodes import makeConstantRefNode from .DictionaryNodes", "OR CONDITIONS OF ANY KIND, either express or implied. #", "goes. The \"dir()\" call without arguments is reformulated to locals", "the collection that all escaped. self.variable_traces = trace_collection.onLocalsUsage( self.getParentVariableProvider() )", "the License is distributed on an \"AS IS\" BASIS, #", "source_ref=source_ref) def finalize(self): del self.parent def computeExpressionRaw(self, trace_collection): return self,", "Globals/locals/single arg dir nodes These nodes give access to variables,", "using a slot, with \"__dir__\" being overloaded or not. #", "False def mayRaiseException(self, exception_type): return False class ExpressionBuiltinLocalsBase(ExpressionBase): # Base", "not know where their value goes. The \"dir()\" call without", "governing permissions and # limitations under the License. # \"\"\"", "and # limitations under the License. # \"\"\" Globals/locals/single arg", "classes can be abstract, pylint: disable=abstract-method __slots__ = (\"variable_traces\", \"locals_scope\")", "License. # \"\"\" Globals/locals/single arg dir nodes These nodes give", ") new_result = result.computeExpressionRaw(trace_collection) assert new_result[0] is result self.finalize() return", "code could be run, note that. trace_collection.onControlFlowEscape(self) # Any exception", "that is compatible and # integrates with CPython, but also", "and not variable_trace.mustNotHaveValue() ): return self, None, None # Other", "because using them, the code may change or access anything", "that all escaped. self.variable_traces = trace_collection.onLocalsUsage( self.getParentVariableProvider() ) for variable,", "law or agreed to in writing, software # distributed under", "import ExpressionBase, ExpressionBuiltinSingleArgBase from .VariableRefNodes import ExpressionTempVariableRef, ExpressionVariableRef class ExpressionBuiltinGlobals(ExpressionBase):", "return result, \"new_expression\", \"Propagated locals dictionary reference.\" # Just inform", "dictionary reference.\" # Just inform the collection that all escaped", "self.locals_scope.getPropagationVariables().items() ), source_ref=self.source_ref, ) new_result = result.computeExpressionRaw(trace_collection) assert new_result[0] is", "note that. trace_collection.onControlFlowEscape(self) # Any exception may be raised. trace_collection.onExceptionRaiseExit(BaseException)", ") def getLocalsScope(self): return self.locals_scope def computeExpressionRaw(self, trace_collection): if self.locals_scope.isMarkedForPropagation():", "an optimizing Python compiler that is compatible and # integrates", "permissions and # limitations under the License. # \"\"\" Globals/locals/single", "2020, <NAME>, mailto:<EMAIL> # # Part of \"Nuitka\", an optimizing", "mayRaiseException(self, exception_type): return False class ExpressionBuiltinLocalsBase(ExpressionBase): # Base classes can", "inform the collection that all escaped. self.variable_traces = trace_collection.onLocalsUsage( self.getParentVariableProvider()", "is reformulated to locals or globals calls. \"\"\" from .ConstantRefNodes", "know where their value goes. The \"dir()\" call without arguments", "variable, variable_trace in self.variable_traces: if variable_trace.mustHaveValue(): pairs.append( ExpressionKeyValuePair( key=makeConstantRefNode( constant=variable.getName(),", "may obtain a copy of the License at # #", "\"new_expression\", \"Statically predicted locals dictionary.\" class ExpressionBuiltinDir1(ExpressionBuiltinSingleArgBase): kind = \"EXPRESSION_BUILTIN_DIR1\"", "self.locals_scope = locals_scope def finalize(self): del self.locals_scope del self.variable_traces def", "computeExpression(self, trace_collection): # TODO: Quite some cases should be possible", ") for variable_name, variable in self.locals_scope.getPropagationVariables().items() ), source_ref=self.source_ref, ) new_result", "makeConstantRefNode from .DictionaryNodes import ExpressionKeyValuePair, ExpressionMakeDict from .ExpressionBases import ExpressionBase,", "return self, None, None class ExpressionBuiltinLocalsCopy(ExpressionBuiltinLocalsBase): kind = \"EXPRESSION_BUILTIN_LOCALS_COPY\" def", "def computeExpressionRaw(self, trace_collection): if self.locals_scope.isMarkedForPropagation(): result = ExpressionMakeDict( pairs=( ExpressionKeyValuePair(", "predicted locals dictionary.\" class ExpressionBuiltinDir1(ExpressionBuiltinSingleArgBase): kind = \"EXPRESSION_BUILTIN_DIR1\" def computeExpression(self,", "be possible to predict and this # should be using", "self, None, None class ExpressionBuiltinLocalsCopy(ExpressionBuiltinLocalsBase): kind = \"EXPRESSION_BUILTIN_LOCALS_COPY\" def computeExpressionRaw(self,", "IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "escaped. self.variable_traces = trace_collection.onLocalsUsage( self.getParentVariableProvider() ) trace_collection.onLocalsDictEscaped(self.locals_scope) return self, None,", "be using a slot, with \"__dir__\" being overloaded or not.", "may not use this file except in compliance with the", "__init__(self, locals_scope, source_ref): ExpressionBase.__init__(self, source_ref=source_ref) self.variable_traces = None self.locals_scope =", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "code may change or access anything about them, so nothing", "this file except in compliance with the License. # You", "compiler that is compatible and # integrates with CPython, but", "slot, with \"__dir__\" being overloaded or not. # Any code", "elsewhere. if variable_trace.getNameUsageCount() > 1: return self, None, None pairs", "import ExpressionKeyValuePair, ExpressionMakeDict from .ExpressionBases import ExpressionBase, ExpressionBuiltinSingleArgBase from .VariableRefNodes", "for variable, variable_trace in self.variable_traces: if variable_trace.mustHaveValue(): pairs.append( ExpressionKeyValuePair( key=makeConstantRefNode(", "finalize(self): del self.locals_scope del self.variable_traces def mayHaveSideEffects(self): return False def", "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "ExpressionKeyValuePair( key=makeConstantRefNode( constant=variable_name, source_ref=self.source_ref ), value=ExpressionTempVariableRef( variable=variable, source_ref=self.source_ref ), source_ref=self.source_ref,", "# Any code could be run, note that. trace_collection.onControlFlowEscape(self) #", "overloaded or not. # Any code could be run, note", "None def mayHaveSideEffects(self): return False def mayRaiseException(self, exception_type): return False", "# # Licensed under the Apache License, Version 2.0 (the", "1: return self, None, None pairs = [] for variable,", "file except in compliance with the License. # You may", "on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS", "self, None, None # Other locals elsewhere. if variable_trace.getNameUsageCount() >", "), source_ref=self.source_ref, ) ) # Locals is sorted of course.", "del self.variable_traces def mayHaveSideEffects(self): return False def mayRaiseException(self, exception_type): return", "Locals is sorted of course. def _sorted(pairs): names = self.getParentVariableProvider().getLocalVariableNames()", "or not. # Any code could be run, note that.", "\"EXPRESSION_BUILTIN_LOCALS_COPY\" def computeExpressionRaw(self, trace_collection): # Just inform the collection that", "def __init__(self, locals_scope, source_ref): ExpressionBase.__init__(self, source_ref=source_ref) self.variable_traces = None self.locals_scope", "locals_scope is not None def getLocalsScope(self): return self.locals_scope def computeExpressionRaw(self,", "give access to variables, highly problematic, because using them, the", "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "kind = \"EXPRESSION_BUILTIN_LOCALS_REF\" def __init__(self, locals_scope, source_ref): ExpressionBuiltinLocalsBase.__init__( self, locals_scope=locals_scope,", "value goes. The \"dir()\" call without arguments is reformulated to", "access anything about them, so nothing can be trusted anymore,", "= (\"variable_traces\", \"locals_scope\") def __init__(self, locals_scope, source_ref): ExpressionBase.__init__(self, source_ref=source_ref) self.variable_traces", "def mayHaveSideEffects(self): return False def mayRaiseException(self, exception_type): return False def", "key=lambda pair: names.index(pair.getKey().getCompileTimeConstant()), ) result = ExpressionMakeDict(pairs=_sorted(pairs), source_ref=self.source_ref) return result,", "if self.locals_scope.isMarkedForPropagation(): result = ExpressionMakeDict( pairs=( ExpressionKeyValuePair( key=makeConstantRefNode( constant=variable_name, source_ref=self.source_ref", "dictionary.\" class ExpressionBuiltinDir1(ExpressionBuiltinSingleArgBase): kind = \"EXPRESSION_BUILTIN_DIR1\" def computeExpression(self, trace_collection): #", "being overloaded or not. # Any code could be run,", "if variable_trace.getNameUsageCount() > 1: return self, None, None pairs =", "= self.getParentVariableProvider().getLocalVariableNames() return sorted( pairs, key=lambda pair: names.index(pair.getKey().getCompileTimeConstant()), ) result", "trace_collection.onControlFlowEscape(self) # Any exception may be raised. trace_collection.onExceptionRaiseExit(BaseException) return self,", ") # Locals is sorted of course. def _sorted(pairs): names", "__init__(self, locals_scope, source_ref): ExpressionBuiltinLocalsBase.__init__( self, locals_scope=locals_scope, source_ref=source_ref ) assert locals_scope", "source_ref): ExpressionBase.__init__(self, source_ref=source_ref) def finalize(self): del self.parent def computeExpressionRaw(self, trace_collection):", "def getLocalsScope(self): return self.locals_scope def computeExpressionRaw(self, trace_collection): if self.locals_scope.isMarkedForPropagation(): result", "compatible and # integrates with CPython, but also works on", "nothing can be trusted anymore, if we start to not", "all escaped unless it is abortative. if not self.getParent().isStatementReturn(): trace_collection.onLocalsUsage(self.getParentVariableProvider())", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "abortative. if not self.getParent().isStatementReturn(): trace_collection.onLocalsUsage(self.getParentVariableProvider()) return self, None, None class", "source_ref=self.source_ref, ) for variable_name, variable in self.locals_scope.getPropagationVariables().items() ), source_ref=self.source_ref, )", "trace_collection): if self.locals_scope.isMarkedForPropagation(): result = ExpressionMakeDict( pairs=( ExpressionKeyValuePair( key=makeConstantRefNode( constant=variable_name,", "ExpressionMakeDict(pairs=_sorted(pairs), source_ref=self.source_ref) return result, \"new_expression\", \"Statically predicted locals dictionary.\" class", "or implied. # See the License for the specific language", "collection that all escaped. self.variable_traces = trace_collection.onLocalsUsage( self.getParentVariableProvider() ) for", "self.variable_traces = None self.locals_scope = locals_scope def finalize(self): del self.locals_scope", "ExpressionBuiltinLocalsBase(ExpressionBase): # Base classes can be abstract, pylint: disable=abstract-method __slots__", "KIND, either express or implied. # See the License for", "specific language governing permissions and # limitations under the License.", "# Any exception may be raised. trace_collection.onExceptionRaiseExit(BaseException) return self, None,", "value=ExpressionTempVariableRef( variable=variable, source_ref=self.source_ref ), source_ref=self.source_ref, ) for variable_name, variable in", "change or access anything about them, so nothing can be", "variable_trace.mustHaveValue() and not variable_trace.mustNotHaveValue() ): return self, None, None #", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "= \"EXPRESSION_BUILTIN_DIR1\" def computeExpression(self, trace_collection): # TODO: Quite some cases", "new_result[0] is result self.finalize() return result, \"new_expression\", \"Propagated locals dictionary", "def getLocalsScope(self): return self.locals_scope def computeExpressionRaw(self, trace_collection): # Just inform", "), source_ref=self.source_ref, ) new_result = result.computeExpressionRaw(trace_collection) assert new_result[0] is result", "not self.getParent().isStatementReturn(): trace_collection.onLocalsUsage(self.getParentVariableProvider()) return self, None, None class ExpressionBuiltinLocalsCopy(ExpressionBuiltinLocalsBase): kind", "pairs=( ExpressionKeyValuePair( key=makeConstantRefNode( constant=variable_name, source_ref=self.source_ref ), value=ExpressionTempVariableRef( variable=variable, source_ref=self.source_ref ),", "course. def _sorted(pairs): names = self.getParentVariableProvider().getLocalVariableNames() return sorted( pairs, key=lambda", "(the \"License\"); # you may not use this file except", "self.getParentVariableProvider().getLocalVariableNames() return sorted( pairs, key=lambda pair: names.index(pair.getKey().getCompileTimeConstant()), ) result =", "# you may not use this file except in compliance", "locals_scope=locals_scope, source_ref=source_ref ) assert locals_scope is not None def getLocalsScope(self):", "result, \"new_expression\", \"Propagated locals dictionary reference.\" # Just inform the", "\"Propagated locals dictionary reference.\" # Just inform the collection that", "Just inform the collection that all escaped unless it is", "to locals or globals calls. \"\"\" from .ConstantRefNodes import makeConstantRefNode", "getVariableTraces(self): return self.variable_traces class ExpressionBuiltinLocalsUpdated(ExpressionBuiltinLocalsBase): kind = \"EXPRESSION_BUILTIN_LOCALS_UPDATED\" def __init__(self,", "ExpressionVariableRef class ExpressionBuiltinGlobals(ExpressionBase): kind = \"EXPRESSION_BUILTIN_GLOBALS\" def __init__(self, source_ref): ExpressionBase.__init__(self,", "None def getLocalsScope(self): return self.locals_scope def computeExpressionRaw(self, trace_collection): # Just", "reference.\" # Just inform the collection that all escaped unless", "# # Unless required by applicable law or agreed to", "ExpressionBuiltinLocalsBase.__init__( self, locals_scope=locals_scope, source_ref=source_ref ) assert locals_scope is not None", "# should be using a slot, with \"__dir__\" being overloaded", "return self, None, None def mayHaveSideEffects(self): return False def mayRaiseException(self,", "obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0", "return self.variable_traces class ExpressionBuiltinLocalsUpdated(ExpressionBuiltinLocalsBase): kind = \"EXPRESSION_BUILTIN_LOCALS_UPDATED\" def __init__(self, locals_scope,", "<NAME>, mailto:<EMAIL> # # Part of \"Nuitka\", an optimizing Python", "Version 2.0 (the \"License\"); # you may not use this", ") trace_collection.onLocalsDictEscaped(self.locals_scope) return self, None, None class ExpressionBuiltinLocalsRef(ExpressionBuiltinLocalsBase): kind =", "ExpressionBuiltinDir1(ExpressionBuiltinSingleArgBase): kind = \"EXPRESSION_BUILTIN_DIR1\" def computeExpression(self, trace_collection): # TODO: Quite", "= [] for variable, variable_trace in self.variable_traces: if variable_trace.mustHaveValue(): pairs.append(", "mayHaveSideEffects(self): return False def mayRaiseException(self, exception_type): return False class ExpressionBuiltinLocalsBase(ExpressionBase):", "trace_collection): # Just inform the collection that all escaped. self.variable_traces", "key=makeConstantRefNode( constant=variable.getName(), user_provided=True, source_ref=self.source_ref, ), value=ExpressionVariableRef( variable=variable, source_ref=self.source_ref ), source_ref=self.source_ref,", "not. # Any code could be run, note that. trace_collection.onControlFlowEscape(self)", "computeExpressionRaw(self, trace_collection): # Just inform the collection that all escaped.", "class ExpressionBuiltinDir1(ExpressionBuiltinSingleArgBase): kind = \"EXPRESSION_BUILTIN_DIR1\" def computeExpression(self, trace_collection): # TODO:", "implied. # See the License for the specific language governing", "locals_scope, source_ref): ExpressionBuiltinLocalsBase.__init__( self, locals_scope=locals_scope, source_ref=source_ref ) assert locals_scope is", "escaped. self.variable_traces = trace_collection.onLocalsUsage( self.getParentVariableProvider() ) for variable, variable_trace in", "locals dictionary.\" class ExpressionBuiltinDir1(ExpressionBuiltinSingleArgBase): kind = \"EXPRESSION_BUILTIN_DIR1\" def computeExpression(self, trace_collection):", "under the Apache License, Version 2.0 (the \"License\"); # you", "be abstract, pylint: disable=abstract-method __slots__ = (\"variable_traces\", \"locals_scope\") def __init__(self,", "ExpressionBuiltinLocalsRef(ExpressionBuiltinLocalsBase): kind = \"EXPRESSION_BUILTIN_LOCALS_REF\" def __init__(self, locals_scope, source_ref): ExpressionBuiltinLocalsBase.__init__( self,", "unless it is abortative. if not self.getParent().isStatementReturn(): trace_collection.onLocalsUsage(self.getParentVariableProvider()) return self,", "None, None def mayHaveSideEffects(self): return False def mayRaiseException(self, exception_type): return", "collection that all escaped. self.variable_traces = trace_collection.onLocalsUsage( self.getParentVariableProvider() ) trace_collection.onLocalsDictEscaped(self.locals_scope)", "False class ExpressionBuiltinLocalsBase(ExpressionBase): # Base classes can be abstract, pylint:", "by applicable law or agreed to in writing, software #", "ExpressionBuiltinLocalsUpdated(ExpressionBuiltinLocalsBase): kind = \"EXPRESSION_BUILTIN_LOCALS_UPDATED\" def __init__(self, locals_scope, source_ref): ExpressionBuiltinLocalsBase.__init__( self,", "could be run, note that. trace_collection.onControlFlowEscape(self) # Any exception may", "variables, highly problematic, because using them, the code may change", "return False def mayRaiseException(self, exception_type): return False def getVariableTraces(self): return", "self.locals_scope.isMarkedForPropagation(): result = ExpressionMakeDict( pairs=( ExpressionKeyValuePair( key=makeConstantRefNode( constant=variable_name, source_ref=self.source_ref ),", "be trusted anymore, if we start to not know where", "to not know where their value goes. The \"dir()\" call", "= locals_scope def finalize(self): del self.locals_scope del self.variable_traces def mayHaveSideEffects(self):", "# Just inform the collection that all escaped unless it", "def mayRaiseException(self, exception_type): return False def getVariableTraces(self): return self.variable_traces class", "ExpressionBase.__init__(self, source_ref=source_ref) self.variable_traces = None self.locals_scope = locals_scope def finalize(self):", "def finalize(self): del self.locals_scope del self.variable_traces def mayHaveSideEffects(self): return False", "None pairs = [] for variable, variable_trace in self.variable_traces: if", "to predict and this # should be using a slot,", "the License. # \"\"\" Globals/locals/single arg dir nodes These nodes", "result, \"new_expression\", \"Statically predicted locals dictionary.\" class ExpressionBuiltinDir1(ExpressionBuiltinSingleArgBase): kind =", "be run, note that. trace_collection.onControlFlowEscape(self) # Any exception may be", "with CPython, but also works on its own. # #", "an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF", "return self.locals_scope def computeExpressionRaw(self, trace_collection): # Just inform the collection", "the code may change or access anything about them, so", "that. trace_collection.onControlFlowEscape(self) # Any exception may be raised. trace_collection.onExceptionRaiseExit(BaseException) return", "Unless required by applicable law or agreed to in writing,", "None, None class ExpressionBuiltinLocalsRef(ExpressionBuiltinLocalsBase): kind = \"EXPRESSION_BUILTIN_LOCALS_REF\" def __init__(self, locals_scope,", "in self.variable_traces: if variable_trace.mustHaveValue(): pairs.append( ExpressionKeyValuePair( key=makeConstantRefNode( constant=variable.getName(), user_provided=True, source_ref=self.source_ref,", "the specific language governing permissions and # limitations under the", "of \"Nuitka\", an optimizing Python compiler that is compatible and", "inform the collection that all escaped unless it is abortative.", "reformulated to locals or globals calls. \"\"\" from .ConstantRefNodes import", "variable in self.locals_scope.getPropagationVariables().items() ), source_ref=self.source_ref, ) new_result = result.computeExpressionRaw(trace_collection) assert", "Part of \"Nuitka\", an optimizing Python compiler that is compatible", "not variable_trace.mustNotHaveValue() ): return self, None, None # Other locals", "applicable law or agreed to in writing, software # distributed", "return False def getVariableTraces(self): return self.variable_traces class ExpressionBuiltinLocalsUpdated(ExpressionBuiltinLocalsBase): kind =", "del self.locals_scope del self.variable_traces def mayHaveSideEffects(self): return False def mayRaiseException(self,", "locals dictionary reference.\" # Just inform the collection that all", "pylint: disable=abstract-method __slots__ = (\"variable_traces\", \"locals_scope\") def __init__(self, locals_scope, source_ref):", "None self.locals_scope = locals_scope def finalize(self): del self.locals_scope del self.variable_traces", "in writing, software # distributed under the License is distributed", "but also works on its own. # # Licensed under", "self.getParent().isStatementReturn(): trace_collection.onLocalsUsage(self.getParentVariableProvider()) return self, None, None class ExpressionBuiltinLocalsCopy(ExpressionBuiltinLocalsBase): kind =", "variable_name, variable in self.locals_scope.getPropagationVariables().items() ), source_ref=self.source_ref, ) new_result = result.computeExpressionRaw(trace_collection)", "calls. \"\"\" from .ConstantRefNodes import makeConstantRefNode from .DictionaryNodes import ExpressionKeyValuePair,", "collection that all escaped unless it is abortative. if not", "return result, \"new_expression\", \"Statically predicted locals dictionary.\" class ExpressionBuiltinDir1(ExpressionBuiltinSingleArgBase): kind", "source_ref=self.source_ref) return result, \"new_expression\", \"Statically predicted locals dictionary.\" class ExpressionBuiltinDir1(ExpressionBuiltinSingleArgBase):", "computeExpressionRaw(self, trace_collection): return self, None, None def mayHaveSideEffects(self): return False", "source_ref=self.source_ref, ), value=ExpressionVariableRef( variable=variable, source_ref=self.source_ref ), source_ref=self.source_ref, ) ) #", "pairs.append( ExpressionKeyValuePair( key=makeConstantRefNode( constant=variable.getName(), user_provided=True, source_ref=self.source_ref, ), value=ExpressionVariableRef( variable=variable, source_ref=self.source_ref", "a slot, with \"__dir__\" being overloaded or not. # Any", "highly problematic, because using them, the code may change or", "), value=ExpressionTempVariableRef( variable=variable, source_ref=self.source_ref ), source_ref=self.source_ref, ) for variable_name, variable", "some cases should be possible to predict and this #", "None, None # Other locals elsewhere. if variable_trace.getNameUsageCount() > 1:", "License is distributed on an \"AS IS\" BASIS, # WITHOUT", "computeExpressionRaw(self, trace_collection): if self.locals_scope.isMarkedForPropagation(): result = ExpressionMakeDict( pairs=( ExpressionKeyValuePair( key=makeConstantRefNode(", "# Locals is sorted of course. def _sorted(pairs): names =", ".VariableRefNodes import ExpressionTempVariableRef, ExpressionVariableRef class ExpressionBuiltinGlobals(ExpressionBase): kind = \"EXPRESSION_BUILTIN_GLOBALS\" def", "License, Version 2.0 (the \"License\"); # you may not use", "its own. # # Licensed under the Apache License, Version", "# You may obtain a copy of the License at", "access to variables, highly problematic, because using them, the code", "\"new_expression\", \"Propagated locals dictionary reference.\" # Just inform the collection", "class ExpressionBuiltinLocalsCopy(ExpressionBuiltinLocalsBase): kind = \"EXPRESSION_BUILTIN_LOCALS_COPY\" def computeExpressionRaw(self, trace_collection): # Just", ") assert locals_scope is not None def getLocalsScope(self): return self.locals_scope", "copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #", "trace_collection.onLocalsUsage( self.getParentVariableProvider() ) for variable, variable_trace in self.variable_traces: if (", "= \"EXPRESSION_BUILTIN_LOCALS_COPY\" def computeExpressionRaw(self, trace_collection): # Just inform the collection", "of course. def _sorted(pairs): names = self.getParentVariableProvider().getLocalVariableNames() return sorted( pairs,", "self.parent def computeExpressionRaw(self, trace_collection): return self, None, None def mayHaveSideEffects(self):", "= trace_collection.onLocalsUsage( self.getParentVariableProvider() ) for variable, variable_trace in self.variable_traces: if", "def mayRaiseException(self, exception_type): return False class ExpressionBuiltinLocalsBase(ExpressionBase): # Base classes", "self, None, None class ExpressionBuiltinLocalsRef(ExpressionBuiltinLocalsBase): kind = \"EXPRESSION_BUILTIN_LOCALS_REF\" def __init__(self,", "for variable_name, variable in self.locals_scope.getPropagationVariables().items() ), source_ref=self.source_ref, ) new_result =", "or access anything about them, so nothing can be trusted", "def getVariableTraces(self): return self.variable_traces class ExpressionBuiltinLocalsUpdated(ExpressionBuiltinLocalsBase): kind = \"EXPRESSION_BUILTIN_LOCALS_UPDATED\" def", "all escaped. self.variable_traces = trace_collection.onLocalsUsage( self.getParentVariableProvider() ) for variable, variable_trace", "the License for the specific language governing permissions and #", "it is abortative. if not self.getParent().isStatementReturn(): trace_collection.onLocalsUsage(self.getParentVariableProvider()) return self, None,", "Apache License, Version 2.0 (the \"License\"); # you may not", "either express or implied. # See the License for the", "is sorted of course. def _sorted(pairs): names = self.getParentVariableProvider().getLocalVariableNames() return", "result.computeExpressionRaw(trace_collection) assert new_result[0] is result self.finalize() return result, \"new_expression\", \"Propagated", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "self.variable_traces class ExpressionBuiltinLocalsUpdated(ExpressionBuiltinLocalsBase): kind = \"EXPRESSION_BUILTIN_LOCALS_UPDATED\" def __init__(self, locals_scope, source_ref):", "trace_collection.onLocalsUsage(self.getParentVariableProvider()) return self, None, None class ExpressionBuiltinLocalsCopy(ExpressionBuiltinLocalsBase): kind = \"EXPRESSION_BUILTIN_LOCALS_COPY\"", "def finalize(self): del self.parent def computeExpressionRaw(self, trace_collection): return self, None,", "_sorted(pairs): names = self.getParentVariableProvider().getLocalVariableNames() return sorted( pairs, key=lambda pair: names.index(pair.getKey().getCompileTimeConstant()),", "possible to predict and this # should be using a", "not variable_trace.mustHaveValue() and not variable_trace.mustNotHaveValue() ): return self, None, None", "ExpressionMakeDict( pairs=( ExpressionKeyValuePair( key=makeConstantRefNode( constant=variable_name, source_ref=self.source_ref ), value=ExpressionTempVariableRef( variable=variable, source_ref=self.source_ref", "that all escaped. self.variable_traces = trace_collection.onLocalsUsage( self.getParentVariableProvider() ) trace_collection.onLocalsDictEscaped(self.locals_scope) return", "source_ref=self.source_ref, ) ) # Locals is sorted of course. def", "getLocalsScope(self): return self.locals_scope def computeExpressionRaw(self, trace_collection): if self.locals_scope.isMarkedForPropagation(): result =", "del self.parent def computeExpressionRaw(self, trace_collection): return self, None, None def", "a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #", ".ExpressionBases import ExpressionBase, ExpressionBuiltinSingleArgBase from .VariableRefNodes import ExpressionTempVariableRef, ExpressionVariableRef class", "result = ExpressionMakeDict( pairs=( ExpressionKeyValuePair( key=makeConstantRefNode( constant=variable_name, source_ref=self.source_ref ), value=ExpressionTempVariableRef(", "ExpressionBase.__init__(self, source_ref=source_ref) def finalize(self): del self.parent def computeExpressionRaw(self, trace_collection): return", "is result self.finalize() return result, \"new_expression\", \"Propagated locals dictionary reference.\"", "def computeExpression(self, trace_collection): # TODO: Quite some cases should be", "def __init__(self, locals_scope, source_ref): ExpressionBuiltinLocalsBase.__init__( self, locals_scope=locals_scope, source_ref=source_ref ) assert", "ExpressionBuiltinLocalsCopy(ExpressionBuiltinLocalsBase): kind = \"EXPRESSION_BUILTIN_LOCALS_COPY\" def computeExpressionRaw(self, trace_collection): # Just inform", "None class ExpressionBuiltinLocalsRef(ExpressionBuiltinLocalsBase): kind = \"EXPRESSION_BUILTIN_LOCALS_REF\" def __init__(self, locals_scope, source_ref):", "user_provided=True, source_ref=self.source_ref, ), value=ExpressionVariableRef( variable=variable, source_ref=self.source_ref ), source_ref=self.source_ref, ) )", "Python compiler that is compatible and # integrates with CPython,", "ExpressionKeyValuePair, ExpressionMakeDict from .ExpressionBases import ExpressionBase, ExpressionBuiltinSingleArgBase from .VariableRefNodes import", "import ExpressionTempVariableRef, ExpressionVariableRef class ExpressionBuiltinGlobals(ExpressionBase): kind = \"EXPRESSION_BUILTIN_GLOBALS\" def __init__(self,", "source_ref): ExpressionBuiltinLocalsBase.__init__( self, locals_scope=locals_scope, source_ref=source_ref ) assert locals_scope is not", "ExpressionMakeDict from .ExpressionBases import ExpressionBase, ExpressionBuiltinSingleArgBase from .VariableRefNodes import ExpressionTempVariableRef,", "can be trusted anymore, if we start to not know", "def computeExpressionRaw(self, trace_collection): return self, None, None def mayHaveSideEffects(self): return", "\"License\"); # you may not use this file except in", "= None self.locals_scope = locals_scope def finalize(self): del self.locals_scope del", "not None def getLocalsScope(self): return self.locals_scope def computeExpressionRaw(self, trace_collection): #", "nodes give access to variables, highly problematic, because using them,", "may change or access anything about them, so nothing can", "distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR", "dir nodes These nodes give access to variables, highly problematic,", "return False class ExpressionBuiltinLocalsBase(ExpressionBase): # Base classes can be abstract,", "self.variable_traces: if ( not variable_trace.mustHaveValue() and not variable_trace.mustNotHaveValue() ): return", "with \"__dir__\" being overloaded or not. # Any code could", "# distributed under the License is distributed on an \"AS", "= trace_collection.onLocalsUsage( self.getParentVariableProvider() ) trace_collection.onLocalsDictEscaped(self.locals_scope) return self, None, None class", "if variable_trace.mustHaveValue(): pairs.append( ExpressionKeyValuePair( key=makeConstantRefNode( constant=variable.getName(), user_provided=True, source_ref=self.source_ref, ), value=ExpressionVariableRef(", "# Unless required by applicable law or agreed to in", "= \"EXPRESSION_BUILTIN_LOCALS_REF\" def __init__(self, locals_scope, source_ref): ExpressionBuiltinLocalsBase.__init__( self, locals_scope=locals_scope, source_ref=source_ref", "return self, None, None # Other locals elsewhere. if variable_trace.getNameUsageCount()", "def __init__(self, source_ref): ExpressionBase.__init__(self, source_ref=source_ref) def finalize(self): del self.parent def", "\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY", "trusted anymore, if we start to not know where their", "= \"EXPRESSION_BUILTIN_LOCALS_UPDATED\" def __init__(self, locals_scope, source_ref): ExpressionBuiltinLocalsBase.__init__( self, locals_scope=locals_scope, source_ref=source_ref", "from .ExpressionBases import ExpressionBase, ExpressionBuiltinSingleArgBase from .VariableRefNodes import ExpressionTempVariableRef, ExpressionVariableRef", "source_ref=source_ref) self.variable_traces = None self.locals_scope = locals_scope def finalize(self): del", "if ( not variable_trace.mustHaveValue() and not variable_trace.mustNotHaveValue() ): return self,", "You may obtain a copy of the License at #", "so nothing can be trusted anymore, if we start to", "for variable, variable_trace in self.variable_traces: if ( not variable_trace.mustHaveValue() and", "self.getParentVariableProvider() ) trace_collection.onLocalsDictEscaped(self.locals_scope) return self, None, None class ExpressionBuiltinLocalsRef(ExpressionBuiltinLocalsBase): kind", "# Part of \"Nuitka\", an optimizing Python compiler that is", "trace_collection.onLocalsUsage( self.getParentVariableProvider() ) trace_collection.onLocalsDictEscaped(self.locals_scope) return self, None, None class ExpressionBuiltinLocalsRef(ExpressionBuiltinLocalsBase):", "the Apache License, Version 2.0 (the \"License\"); # you may", "\"EXPRESSION_BUILTIN_LOCALS_UPDATED\" def __init__(self, locals_scope, source_ref): ExpressionBuiltinLocalsBase.__init__( self, locals_scope=locals_scope, source_ref=source_ref )" ]
[ "pytest.raises(chainerx.DtypeError): -scalar # should not be able to negate bool", "# should not be able to negate bool else: _check_cast_scalar_equals_data(-scalar,", "bool(scalar) == bool(data) assert int(scalar) == int(data) assert float(scalar) ==", "1.0), (1.0, chainerx.float64, 1.0), (-1, chainerx.bool_, True), (-1, chainerx.int8, -1),", "expected_value): scalar = chainerx.Scalar(value, cast_dtype) assert scalar.dtype == cast_dtype if", "assert scalar2 == scalar1 assert scalar1 == value1 assert value1", "value2 assert value2 == scalar1 @pytest.mark.parametrize('value1,value2', [ (0, 1), (-1,", "(0x100, chainerx.int16, 0x100), (0x100, chainerx.int32, 0x100), (0x100, chainerx.int64, 0x100), (0x100,", "0x100), (0x100, chainerx.uint8, 0), (0x10000, chainerx.bool_, True), (0x10000, chainerx.int8, 0),", "(-1.0001, -1), (True, False), (True, 1.1), (1.0001, 1.0002), (float('nan'), float('nan')),", "== chainerx.float64 else: assert False @pytest.mark.parametrize('value', all_scalar_values) def test_repr(value): scalar", "(True, 1), # (True, 1.0), # (False, 0), # (False,", "scalar2 assert scalar2 == value1 assert value1 == scalar2 assert", "1.5), (-1.5, -1.5), (True, True), (False, False), # (True, 1),", "scalar = chainerx.Scalar(value) if isinstance(value, bool): assert scalar.dtype == chainerx.bool_", "(1.0, chainerx.int64, 1), (1.0, chainerx.uint8, 1), (1.0, chainerx.float32, 1.0), (1.0,", "-1), (0x7fffffffffffffff, chainerx.int32, -1), (0x7fffffffffffffff, chainerx.int64, 0x7fffffffffffffff), (0x7fffffffffffffff, chainerx.uint8, 255),", "chainerx.float64, 0.0), (0.0, chainerx.bool_, False), (0.0, chainerx.int8, 0), (0.0, chainerx.int16,", "1), (1.0, chainerx.int32, 1), (1.0, chainerx.int64, 1), (1.0, chainerx.uint8, 1),", "chainerx.int64, 1), (1.0, chainerx.uint8, 1), (1.0, chainerx.float32, 1.0), (1.0, chainerx.float64,", "(True, True), (False, False), # (True, 1), # (True, 1.0),", "chainerx.int64 elif isinstance(value, float): assert scalar.dtype == chainerx.float64 else: assert", "dtype_spec): expected_dtype = chainerx.dtype(dtype_spec) scalar = chainerx.Scalar(value, dtype_spec) assert scalar.dtype", "(0.0, chainerx.float32, 0.0), (0.0, chainerx.float64, 0.0), (1, chainerx.bool_, True), (1,", "[0, 0.0, 1, 1.0, -1, 0x100, 0x10000, 0x100000000, 0x7fffffffffffffff]) @chainerx.testing.parametrize_dtype_specifier('dtype_spec')", "chainerx.Scalar(value) if isinstance(value, bool): assert scalar.dtype == chainerx.bool_ elif isinstance(value,", "scalar == chainerx.Scalar(value, expected_dtype) @pytest.mark.parametrize('value1,value2', [ # TODO(niboshi): Support commented-out", "# (False, 0), # (False, 0.0), # (float('inf'), float('inf')), ])", "== scalar1 @pytest.mark.parametrize('value1,value2', [ (0, 1), (-1, 1), (-1.0001, -1.0),", "== value2 assert value2 == scalar1 @pytest.mark.parametrize('value1,value2', [ (0, 1),", "(0, chainerx.float32, 0.0), (0, chainerx.float64, 0.0), (0.0, chainerx.bool_, False), (0.0,", "assert scalar.dtype == dtype if math.isnan(value): assert math.isnan(scalar.tolist()) else: assert", "-1), (-1, chainerx.int64, -1), (-1, chainerx.uint8, 0xff), (-1, chainerx.float32, -1.0),", "chainerx.int8, 1), (1.0, chainerx.int16, 1), (1.0, chainerx.int32, 1), (1.0, chainerx.int64,", "chainerx.uint8, 0), (0x7fffffffffffffff, chainerx.bool_, True), (0x7fffffffffffffff, chainerx.int8, -1), (0x7fffffffffffffff, chainerx.int16,", "2.3, True, False ]) def test_cast(value): scalar = chainerx.Scalar(value) _check_cast_scalar_equals_data(scalar,", "True, False, float('inf'), float('nan')] @pytest.mark.parametrize('value,dtype', [ (0, chainerx.int64), (-1, chainerx.int64),", "+value) if isinstance(value, bool): with pytest.raises(chainerx.DtypeError): -scalar # should not", "not be able to negate bool else: _check_cast_scalar_equals_data(-scalar, -value) @pytest.mark.parametrize('value',", "True), (1.0, chainerx.int8, 1), (1.0, chainerx.int16, 1), (1.0, chainerx.int32, 1),", "== value assert isinstance(scalar.tolist(), type(value)) @pytest.mark.parametrize('value,cast_dtype,expected_value', [ (0, chainerx.bool_, False),", "chainerx.int16, 0), (0x100000000, chainerx.int32, 0), (0x100000000, chainerx.int64, 0x100000000), (0x100000000, chainerx.uint8,", "= chainerx.Scalar(value1) scalar2 = chainerx.Scalar(value2) assert scalar1 == scalar2 assert", "chainerx.int64, 0x10000), (0x10000, chainerx.uint8, 0), (0x100000000, chainerx.bool_, True), (0x100000000, chainerx.int8,", "!= scalar1 @pytest.mark.parametrize('value', [ -2, 1, -1.5, 2.3, True, False", "0), (0, chainerx.int16, 0), (0, chainerx.int32, 0), (0, chainerx.int64, 0),", "dtype if math.isnan(value): assert math.isnan(scalar.tolist()) else: assert scalar.tolist() == value", "@pytest.mark.parametrize('value', all_scalar_values) def test_repr(value): scalar = chainerx.Scalar(value) assert repr(scalar) ==", "scalar2 assert scalar1 != value2 assert value2 != scalar1 @pytest.mark.parametrize('value',", "chainerx.int8, -1), (-1, chainerx.int16, -1), (-1, chainerx.int32, -1), (-1, chainerx.int64,", "bool(data) assert int(scalar) == int(data) assert float(scalar) == float(data) all_scalar_values", "chainerx.int16, 1), (1, chainerx.int32, 1), (1, chainerx.int64, 1), (1, chainerx.uint8,", "0x100000000, 0x7fffffffffffffff]) @chainerx.testing.parametrize_dtype_specifier('dtype_spec') def test_init_with_dtype(value, dtype_spec): expected_dtype = chainerx.dtype(dtype_spec) scalar", "@pytest.mark.parametrize('value', all_scalar_values) def test_dtype(value): scalar = chainerx.Scalar(value) if isinstance(value, bool):", "1), (1, chainerx.int64, 1), (1, chainerx.uint8, 1), (1, chainerx.float32, 1.0),", "(1.0, chainerx.float64, 1.0), (-1, chainerx.bool_, True), (-1, chainerx.int8, -1), (-1,", "scalar2 == scalar1 assert scalar1 == value1 assert value1 ==", "scalar1 assert scalar2 == value2 assert value2 == scalar2 assert", "isinstance(scalar.tolist(), type(expected_value)) @pytest.mark.parametrize( 'value', [0, 0.0, 1, 1.0, -1, 0x100,", "0), (0x10000, chainerx.int16, 0), (0x10000, chainerx.int32, 0x10000), (0x10000, chainerx.int64, 0x10000),", "scalar.dtype == chainerx.int64 elif isinstance(value, float): assert scalar.dtype == chainerx.float64", "1), (1, chainerx.int32, 1), (1, chainerx.int64, 1), (1, chainerx.uint8, 1),", "]) def test_equality(value1, value2): scalar1 = chainerx.Scalar(value1) scalar2 = chainerx.Scalar(value2)", "!= value2 assert value2 != scalar1 @pytest.mark.parametrize('value', [ -2, 1,", "(0x100000000, chainerx.bool_, True), (0x100000000, chainerx.int8, 0), (0x100000000, chainerx.int16, 0), (0x100000000,", "(-1.5, -1.5), (True, True), (False, False), # (True, 1), #", "@pytest.mark.parametrize( 'value', [0, 0.0, 1, 1.0, -1, 0x100, 0x10000, 0x100000000,", "== value1 assert value1 == scalar2 assert scalar1 == value2", "chainerx.bool_, True), (-1, chainerx.int8, -1), (-1, chainerx.int16, -1), (-1, chainerx.int32,", "0), (0, chainerx.uint8, 0), (0, chainerx.float32, 0.0), (0, chainerx.float64, 0.0),", "chainerx.bool_, True), (1, chainerx.int8, 1), (1, chainerx.int16, 1), (1, chainerx.int32,", "(0, chainerx.int32, 0), (0, chainerx.int64, 0), (0, chainerx.uint8, 0), (0,", "int): assert scalar.dtype == chainerx.int64 elif isinstance(value, float): assert scalar.dtype", "1.1), (1.0001, 1.0002), (float('nan'), float('nan')), ]) def test_inequality(value1, value2): scalar1", "(0x100000000, chainerx.int64, 0x100000000), (0x100000000, chainerx.uint8, 0), (0x7fffffffffffffff, chainerx.bool_, True), (0x7fffffffffffffff,", "chainerx.int8, 1), (1, chainerx.int16, 1), (1, chainerx.int32, 1), (1, chainerx.int64,", "(0x7fffffffffffffff, chainerx.bool_, True), (0x7fffffffffffffff, chainerx.int8, -1), (0x7fffffffffffffff, chainerx.int16, -1), (0x7fffffffffffffff,", "(-1, chainerx.bool_, True), (-1, chainerx.int8, -1), (-1, chainerx.int16, -1), (-1,", "scalar.tolist() == value assert isinstance(scalar.tolist(), type(value)) @pytest.mark.parametrize('value,cast_dtype,expected_value', [ (0, chainerx.bool_,", "(0x100000000, chainerx.int32, 0), (0x100000000, chainerx.int64, 0x100000000), (0x100000000, chainerx.uint8, 0), (0x7fffffffffffffff,", "True), (0x100, chainerx.int8, 0), (0x100, chainerx.int16, 0x100), (0x100, chainerx.int32, 0x100),", "value1 assert value1 == scalar2 assert scalar1 == value2 assert", "bool else: _check_cast_scalar_equals_data(-scalar, -value) @pytest.mark.parametrize('value', all_scalar_values) def test_dtype(value): scalar =", "False), # (True, 1), # (True, 1.0), # (False, 0),", "scalar2 == value1 assert value1 == scalar2 assert scalar1 ==", "(1, chainerx.float64, 1.0), (1.0, chainerx.bool_, True), (1.0, chainerx.int8, 1), (1.0,", "== value1 assert value1 == scalar1 assert scalar2 == value2", "scalar.tolist() == expected_value assert isinstance(scalar.tolist(), type(expected_value)) @pytest.mark.parametrize( 'value', [0, 0.0,", "expected_value assert isinstance(scalar.tolist(), type(expected_value)) @pytest.mark.parametrize( 'value', [0, 0.0, 1, 1.0,", "elif isinstance(value, int): assert scalar.dtype == chainerx.int64 elif isinstance(value, float):", "chainerx.Scalar(value, expected_dtype) @pytest.mark.parametrize('value1,value2', [ # TODO(niboshi): Support commented-out cases (0,", "# (float('inf'), float('inf')), ]) def test_equality(value1, value2): scalar1 = chainerx.Scalar(value1)", "cases (0, 0), (1, 1), # (1, 1.0), (1.5, 1.5),", "chainerx.int16, 0x100), (0x100, chainerx.int32, 0x100), (0x100, chainerx.int64, 0x100), (0x100, chainerx.uint8,", "# (True, 1), # (True, 1.0), # (False, 0), #", "assert value2 == scalar1 @pytest.mark.parametrize('value1,value2', [ (0, 1), (-1, 1),", "all_scalar_values) def test_dtype(value): scalar = chainerx.Scalar(value) if isinstance(value, bool): assert", "chainerx.bool_, False), (0.0, chainerx.int8, 0), (0.0, chainerx.int16, 0), (0.0, chainerx.int32,", "chainerx.bool_, True), (1.0, chainerx.int8, 1), (1.0, chainerx.int16, 1), (1.0, chainerx.int32,", "1), # (1, 1.0), (1.5, 1.5), (-1.5, -1.5), (True, True),", "(False, 0), # (False, 0.0), # (float('inf'), float('inf')), ]) def", "1, -1.5, 2.3, True, False, float('inf'), float('nan')] @pytest.mark.parametrize('value,dtype', [ (0,", "(0x10000, chainerx.int16, 0), (0x10000, chainerx.int32, 0x10000), (0x10000, chainerx.int64, 0x10000), (0x10000,", "chainerx.int16, 0), (0x10000, chainerx.int32, 0x10000), (0x10000, chainerx.int64, 0x10000), (0x10000, chainerx.uint8,", "chainerx.float64, 1.0), (-1, chainerx.bool_, True), (-1, chainerx.int8, -1), (-1, chainerx.int16,", "test_equality(value1, value2): scalar1 = chainerx.Scalar(value1) scalar2 = chainerx.Scalar(value2) assert scalar1", "@pytest.mark.parametrize('value,dtype', [ (0, chainerx.int64), (-1, chainerx.int64), (0x7fffffffffffffff, chainerx.int64), (-0x8000000000000000, chainerx.int64),", "(0, chainerx.uint8, 0), (0, chainerx.float32, 0.0), (0, chainerx.float64, 0.0), (0.0,", "cast_dtype if math.isnan(expected_value): assert math.isnan(scalar.tolist()) else: assert scalar.tolist() == expected_value", "1.0, -1, 0x100, 0x10000, 0x100000000, 0x7fffffffffffffff]) @chainerx.testing.parametrize_dtype_specifier('dtype_spec') def test_init_with_dtype(value, dtype_spec):", "(1.0001, 1.0002), (float('nan'), float('nan')), ]) def test_inequality(value1, value2): scalar1 =", "(-1, 1), (-1.0001, -1.0), (-1.0001, -1), (True, False), (True, 1.1),", "else: _check_cast_scalar_equals_data(-scalar, -value) @pytest.mark.parametrize('value', all_scalar_values) def test_dtype(value): scalar = chainerx.Scalar(value)", "chainerx.int32, 0), (0.0, chainerx.int64, 0), (0.0, chainerx.uint8, 0), (0.0, chainerx.float32,", "False, float('inf'), float('nan')] @pytest.mark.parametrize('value,dtype', [ (0, chainerx.int64), (-1, chainerx.int64), (0x7fffffffffffffff,", "test_dtype(value): scalar = chainerx.Scalar(value) if isinstance(value, bool): assert scalar.dtype ==", "else: assert scalar.tolist() == expected_value assert isinstance(scalar.tolist(), type(expected_value)) @pytest.mark.parametrize( 'value',", "0.0), (0.0, chainerx.float64, 0.0), (1, chainerx.bool_, True), (1, chainerx.int8, 1),", "= chainerx.Scalar(value, cast_dtype) assert scalar.dtype == cast_dtype if math.isnan(expected_value): assert", "!= scalar2 assert scalar1 != value2 assert value2 != scalar1", "chainerx.int64, 0), (0, chainerx.uint8, 0), (0, chainerx.float32, 0.0), (0, chainerx.float64,", "dtype_spec) assert scalar.dtype == expected_dtype assert scalar == chainerx.Scalar(value, expected_dtype)", "scalar = chainerx.Scalar(value, cast_dtype) assert scalar.dtype == cast_dtype if math.isnan(expected_value):", "== scalar1 assert scalar2 == value2 assert value2 == scalar2", "-1.0), (-1, chainerx.float64, -1.0), (0x100, chainerx.bool_, True), (0x100, chainerx.int8, 0),", "# (False, 0.0), # (float('inf'), float('inf')), ]) def test_equality(value1, value2):", "(0, chainerx.int64, 0), (0, chainerx.uint8, 0), (0, chainerx.float32, 0.0), (0,", "(0, chainerx.int64), (-1, chainerx.int64), (0x7fffffffffffffff, chainerx.int64), (-0x8000000000000000, chainerx.int64), (0.0, chainerx.float64),", "(0x100, chainerx.int32, 0x100), (0x100, chainerx.int64, 0x100), (0x100, chainerx.uint8, 0), (0x10000,", "def test_equality(value1, value2): scalar1 = chainerx.Scalar(value1) scalar2 = chainerx.Scalar(value2) assert", "scalar = chainerx.Scalar(value) _check_cast_scalar_equals_data(scalar, value) _check_cast_scalar_equals_data(+scalar, +value) if isinstance(value, bool):", "def test_init_with_dtype(value, dtype_spec): expected_dtype = chainerx.dtype(dtype_spec) scalar = chainerx.Scalar(value, dtype_spec)", "chainerx.int8, -1), (0x7fffffffffffffff, chainerx.int16, -1), (0x7fffffffffffffff, chainerx.int32, -1), (0x7fffffffffffffff, chainerx.int64,", "0), (0x100, chainerx.int16, 0x100), (0x100, chainerx.int32, 0x100), (0x100, chainerx.int64, 0x100),", "!= scalar2 assert scalar2 != scalar1 assert scalar2 != value1", "-1), (-1, chainerx.int16, -1), (-1, chainerx.int32, -1), (-1, chainerx.int64, -1),", "chainerx.int64), (-1, chainerx.int64), (0x7fffffffffffffff, chainerx.int64), (-0x8000000000000000, chainerx.int64), (0.0, chainerx.float64), (float('inf'),", "chainerx.int32, 0x10000), (0x10000, chainerx.int64, 0x10000), (0x10000, chainerx.uint8, 0), (0x100000000, chainerx.bool_,", "(-1, chainerx.int64, -1), (-1, chainerx.uint8, 0xff), (-1, chainerx.float32, -1.0), (-1,", "(0x10000, chainerx.bool_, True), (0x10000, chainerx.int8, 0), (0x10000, chainerx.int16, 0), (0x10000,", "== str(value) def test_init_invalid(): with pytest.raises(TypeError): chainerx.Scalar(\"1\") # string, which", "-1), (0x7fffffffffffffff, chainerx.int64, 0x7fffffffffffffff), (0x7fffffffffffffff, chainerx.uint8, 255), ]) def test_init_casted(value,", "(-1, chainerx.int32, -1), (-1, chainerx.int64, -1), (-1, chainerx.uint8, 0xff), (-1,", "1.0), (1.5, 1.5), (-1.5, -1.5), (True, True), (False, False), #", "(1, chainerx.int16, 1), (1, chainerx.int32, 1), (1, chainerx.int64, 1), (1,", "= chainerx.Scalar(value, dtype_spec) assert scalar.dtype == expected_dtype assert scalar ==", "chainerx.int32, 1), (1, chainerx.int64, 1), (1, chainerx.uint8, 1), (1, chainerx.float32,", "1, 1.0, -1, 0x100, 0x10000, 0x100000000, 0x7fffffffffffffff]) @chainerx.testing.parametrize_dtype_specifier('dtype_spec') def test_init_with_dtype(value,", "bool): with pytest.raises(chainerx.DtypeError): -scalar # should not be able to", "value1 == scalar1 assert scalar2 == value2 assert value2 ==", "(False, False), # (True, 1), # (True, 1.0), # (False,", "chainerx.int16, 1), (1.0, chainerx.int32, 1), (1.0, chainerx.int64, 1), (1.0, chainerx.uint8,", "-1.5, 2.3, True, False, float('inf'), float('nan')] @pytest.mark.parametrize('value,dtype', [ (0, chainerx.int64),", "float(scalar) == float(data) all_scalar_values = [ -2, 1, -1.5, 2.3,", "(0x10000, chainerx.int8, 0), (0x10000, chainerx.int16, 0), (0x10000, chainerx.int32, 0x10000), (0x10000,", "1), (1, chainerx.uint8, 1), (1, chainerx.float32, 1.0), (1, chainerx.float64, 1.0),", "chainerx.float64), (float('nan'), chainerx.float64), (True, chainerx.bool_), (False, chainerx.bool_), ]) def test_init_without_dtype(value,", "== scalar2 assert scalar2 == scalar1 assert scalar1 == value1", "str(scalar) == str(value) def test_init_invalid(): with pytest.raises(TypeError): chainerx.Scalar(\"1\") # string,", "scalar1 assert scalar2 != value1 assert value1 != scalar2 assert", "(-1, chainerx.int16, -1), (-1, chainerx.int32, -1), (-1, chainerx.int64, -1), (-1,", "!= scalar1 assert scalar2 != value1 assert value1 != scalar2", "scalar.dtype == chainerx.bool_ elif isinstance(value, int): assert scalar.dtype == chainerx.int64", "if math.isnan(expected_value): assert math.isnan(scalar.tolist()) else: assert scalar.tolist() == expected_value assert", "]) def test_init_without_dtype(value, dtype): scalar = chainerx.Scalar(value) assert scalar.dtype ==", "0), (0x100000000, chainerx.int64, 0x100000000), (0x100000000, chainerx.uint8, 0), (0x7fffffffffffffff, chainerx.bool_, True),", "0x7fffffffffffffff), (0x7fffffffffffffff, chainerx.uint8, 255), ]) def test_init_casted(value, cast_dtype, expected_value): scalar", "scalar1 != value2 assert value2 != scalar1 @pytest.mark.parametrize('value', [ -2,", "0), (1, 1), # (1, 1.0), (1.5, 1.5), (-1.5, -1.5),", "assert scalar2 != value1 assert value1 != scalar2 assert scalar1", "with pytest.raises(TypeError): chainerx.Scalar(\"1\") # string, which is not a numeric", "True), (0x10000, chainerx.int8, 0), (0x10000, chainerx.int16, 0), (0x10000, chainerx.int32, 0x10000),", "assert math.isnan(scalar.tolist()) else: assert scalar.tolist() == expected_value assert isinstance(scalar.tolist(), type(expected_value))", "chainerx.Scalar(value) assert scalar.dtype == dtype if math.isnan(value): assert math.isnan(scalar.tolist()) else:", "(1, chainerx.uint8, 1), (1, chainerx.float32, 1.0), (1, chainerx.float64, 1.0), (1.0,", "0), (0.0, chainerx.int64, 0), (0.0, chainerx.uint8, 0), (0.0, chainerx.float32, 0.0),", "data): assert bool(scalar) == bool(data) assert int(scalar) == int(data) assert", "chainerx.float64), (float('inf'), chainerx.float64), (float('nan'), chainerx.float64), (True, chainerx.bool_), (False, chainerx.bool_), ])", "(1, chainerx.int32, 1), (1, chainerx.int64, 1), (1, chainerx.uint8, 1), (1,", "(0.0, chainerx.int8, 0), (0.0, chainerx.int16, 0), (0.0, chainerx.int32, 0), (0.0,", "1), (1.0, chainerx.float32, 1.0), (1.0, chainerx.float64, 1.0), (-1, chainerx.bool_, True),", "(1, chainerx.bool_, True), (1, chainerx.int8, 1), (1, chainerx.int16, 1), (1,", "0x10000), (0x10000, chainerx.int64, 0x10000), (0x10000, chainerx.uint8, 0), (0x100000000, chainerx.bool_, True),", "chainerx.float64, -1.0), (0x100, chainerx.bool_, True), (0x100, chainerx.int8, 0), (0x100, chainerx.int16,", "value assert isinstance(scalar.tolist(), type(value)) @pytest.mark.parametrize('value,cast_dtype,expected_value', [ (0, chainerx.bool_, False), (0,", "expected_dtype) @pytest.mark.parametrize('value1,value2', [ # TODO(niboshi): Support commented-out cases (0, 0),", "(0x100000000, chainerx.int16, 0), (0x100000000, chainerx.int32, 0), (0x100000000, chainerx.int64, 0x100000000), (0x100000000,", "1), # (True, 1.0), # (False, 0), # (False, 0.0),", "(1.0, chainerx.bool_, True), (1.0, chainerx.int8, 1), (1.0, chainerx.int16, 1), (1.0,", "(1.0, chainerx.uint8, 1), (1.0, chainerx.float32, 1.0), (1.0, chainerx.float64, 1.0), (-1,", "math.isnan(expected_value): assert math.isnan(scalar.tolist()) else: assert scalar.tolist() == expected_value assert isinstance(scalar.tolist(),", "(False, 0.0), # (float('inf'), float('inf')), ]) def test_equality(value1, value2): scalar1", "@pytest.mark.parametrize('value1,value2', [ # TODO(niboshi): Support commented-out cases (0, 0), (1,", "int(data) assert float(scalar) == float(data) all_scalar_values = [ -2, 1,", "scalar2 = chainerx.Scalar(value2) assert scalar1 == scalar2 assert scalar2 ==", "assert scalar1 == value2 assert value2 == scalar1 @pytest.mark.parametrize('value1,value2', [", "0), (0.0, chainerx.int16, 0), (0.0, chainerx.int32, 0), (0.0, chainerx.int64, 0),", "test_init_with_dtype(value, dtype_spec): expected_dtype = chainerx.dtype(dtype_spec) scalar = chainerx.Scalar(value, dtype_spec) assert", "should not be able to negate bool else: _check_cast_scalar_equals_data(-scalar, -value)", "scalar.dtype == cast_dtype if math.isnan(expected_value): assert math.isnan(scalar.tolist()) else: assert scalar.tolist()", "scalar1 = chainerx.Scalar(value1) scalar2 = chainerx.Scalar(value2) assert scalar1 != scalar2", "0), (0, chainerx.int32, 0), (0, chainerx.int64, 0), (0, chainerx.uint8, 0),", "assert scalar1 != scalar2 assert scalar2 != scalar1 assert scalar2", "chainerx.int32, 0), (0x100000000, chainerx.int64, 0x100000000), (0x100000000, chainerx.uint8, 0), (0x7fffffffffffffff, chainerx.bool_,", "(0x10000, chainerx.int32, 0x10000), (0x10000, chainerx.int64, 0x10000), (0x10000, chainerx.uint8, 0), (0x100000000,", "float('nan')), ]) def test_inequality(value1, value2): scalar1 = chainerx.Scalar(value1) scalar2 =", "scalar1 @pytest.mark.parametrize('value1,value2', [ (0, 1), (-1, 1), (-1.0001, -1.0), (-1.0001,", "else: assert scalar.tolist() == value assert isinstance(scalar.tolist(), type(value)) @pytest.mark.parametrize('value,cast_dtype,expected_value', [", "else: assert False @pytest.mark.parametrize('value', all_scalar_values) def test_repr(value): scalar = chainerx.Scalar(value)", "test_repr(value): scalar = chainerx.Scalar(value) assert repr(scalar) == repr(value) assert str(scalar)", "scalar1 == value2 assert value2 == scalar1 @pytest.mark.parametrize('value1,value2', [ (0,", "1), (-1, 1), (-1.0001, -1.0), (-1.0001, -1), (True, False), (True,", "(False, chainerx.bool_), ]) def test_init_without_dtype(value, dtype): scalar = chainerx.Scalar(value) assert", "== scalar2 assert scalar1 == value2 assert value2 == scalar1", "chainerx.float64, 0.0), (1, chainerx.bool_, True), (1, chainerx.int8, 1), (1, chainerx.int16,", "chainerx.Scalar(value, cast_dtype) assert scalar.dtype == cast_dtype if math.isnan(expected_value): assert math.isnan(scalar.tolist())", "== cast_dtype if math.isnan(expected_value): assert math.isnan(scalar.tolist()) else: assert scalar.tolist() ==", "-1.0), (-1.0001, -1), (True, False), (True, 1.1), (1.0001, 1.0002), (float('nan'),", "chainerx.int16, -1), (0x7fffffffffffffff, chainerx.int32, -1), (0x7fffffffffffffff, chainerx.int64, 0x7fffffffffffffff), (0x7fffffffffffffff, chainerx.uint8,", "all_scalar_values = [ -2, 1, -1.5, 2.3, True, False, float('inf'),", "[ (0, chainerx.bool_, False), (0, chainerx.int8, 0), (0, chainerx.int16, 0),", "# (1, 1.0), (1.5, 1.5), (-1.5, -1.5), (True, True), (False,", "1.0), # (False, 0), # (False, 0.0), # (float('inf'), float('inf')),", "scalar1 assert scalar1 == value1 assert value1 == scalar1 assert", "True), (0x7fffffffffffffff, chainerx.int8, -1), (0x7fffffffffffffff, chainerx.int16, -1), (0x7fffffffffffffff, chainerx.int32, -1),", "(True, False), (True, 1.1), (1.0001, 1.0002), (float('nan'), float('nan')), ]) def", "def test_repr(value): scalar = chainerx.Scalar(value) assert repr(scalar) == repr(value) assert", "type(expected_value)) @pytest.mark.parametrize( 'value', [0, 0.0, 1, 1.0, -1, 0x100, 0x10000,", "able to negate bool else: _check_cast_scalar_equals_data(-scalar, -value) @pytest.mark.parametrize('value', all_scalar_values) def", "math import pytest import chainerx def _check_cast_scalar_equals_data(scalar, data): assert bool(scalar)", "(-1.0001, -1.0), (-1.0001, -1), (True, False), (True, 1.1), (1.0001, 1.0002),", "bool): assert scalar.dtype == chainerx.bool_ elif isinstance(value, int): assert scalar.dtype", "@pytest.mark.parametrize('value', [ -2, 1, -1.5, 2.3, True, False ]) def", "@chainerx.testing.parametrize_dtype_specifier('dtype_spec') def test_init_with_dtype(value, dtype_spec): expected_dtype = chainerx.dtype(dtype_spec) scalar = chainerx.Scalar(value,", "assert repr(scalar) == repr(value) assert str(scalar) == str(value) def test_init_invalid():", "all_scalar_values) def test_repr(value): scalar = chainerx.Scalar(value) assert repr(scalar) == repr(value)", "chainerx.dtype(dtype_spec) scalar = chainerx.Scalar(value, dtype_spec) assert scalar.dtype == expected_dtype assert", "value2 == scalar1 @pytest.mark.parametrize('value1,value2', [ (0, 1), (-1, 1), (-1.0001,", "chainerx.int64), (0.0, chainerx.float64), (float('inf'), chainerx.float64), (float('nan'), chainerx.float64), (True, chainerx.bool_), (False,", "False), (0.0, chainerx.int8, 0), (0.0, chainerx.int16, 0), (0.0, chainerx.int32, 0),", "math.isnan(scalar.tolist()) else: assert scalar.tolist() == value assert isinstance(scalar.tolist(), type(value)) @pytest.mark.parametrize('value,cast_dtype,expected_value',", "assert math.isnan(scalar.tolist()) else: assert scalar.tolist() == value assert isinstance(scalar.tolist(), type(value))", "chainerx.int32, 0x100), (0x100, chainerx.int64, 0x100), (0x100, chainerx.uint8, 0), (0x10000, chainerx.bool_,", "float('nan')] @pytest.mark.parametrize('value,dtype', [ (0, chainerx.int64), (-1, chainerx.int64), (0x7fffffffffffffff, chainerx.int64), (-0x8000000000000000,", "assert scalar1 == value1 assert value1 == scalar1 assert scalar2", "assert scalar.dtype == chainerx.float64 else: assert False @pytest.mark.parametrize('value', all_scalar_values) def", "(0.0, chainerx.uint8, 0), (0.0, chainerx.float32, 0.0), (0.0, chainerx.float64, 0.0), (1,", "1), (1, chainerx.float32, 1.0), (1, chainerx.float64, 1.0), (1.0, chainerx.bool_, True),", "(0.0, chainerx.float64), (float('inf'), chainerx.float64), (float('nan'), chainerx.float64), (True, chainerx.bool_), (False, chainerx.bool_),", "(0x100000000, chainerx.int8, 0), (0x100000000, chainerx.int16, 0), (0x100000000, chainerx.int32, 0), (0x100000000,", "scalar1 != scalar2 assert scalar2 != scalar1 assert scalar2 !=", "chainerx.float64), (True, chainerx.bool_), (False, chainerx.bool_), ]) def test_init_without_dtype(value, dtype): scalar", "int(scalar) == int(data) assert float(scalar) == float(data) all_scalar_values = [", "255), ]) def test_init_casted(value, cast_dtype, expected_value): scalar = chainerx.Scalar(value, cast_dtype)", "-scalar # should not be able to negate bool else:", "-1.5, 2.3, True, False ]) def test_cast(value): scalar = chainerx.Scalar(value)", "(1.0, chainerx.int16, 1), (1.0, chainerx.int32, 1), (1.0, chainerx.int64, 1), (1.0,", "(0x7fffffffffffffff, chainerx.uint8, 255), ]) def test_init_casted(value, cast_dtype, expected_value): scalar =", "chainerx.uint8, 0), (0.0, chainerx.float32, 0.0), (0.0, chainerx.float64, 0.0), (1, chainerx.bool_,", "== scalar1 assert scalar1 == value1 assert value1 == scalar1", "cast_dtype, expected_value): scalar = chainerx.Scalar(value, cast_dtype) assert scalar.dtype == cast_dtype", "def test_inequality(value1, value2): scalar1 = chainerx.Scalar(value1) scalar2 = chainerx.Scalar(value2) assert", "chainerx.Scalar(value) _check_cast_scalar_equals_data(scalar, value) _check_cast_scalar_equals_data(+scalar, +value) if isinstance(value, bool): with pytest.raises(chainerx.DtypeError):", "value2): scalar1 = chainerx.Scalar(value1) scalar2 = chainerx.Scalar(value2) assert scalar1 !=", "False @pytest.mark.parametrize('value', all_scalar_values) def test_repr(value): scalar = chainerx.Scalar(value) assert repr(scalar)", "chainerx.float64, 1.0), (1.0, chainerx.bool_, True), (1.0, chainerx.int8, 1), (1.0, chainerx.int16,", "test_init_without_dtype(value, dtype): scalar = chainerx.Scalar(value) assert scalar.dtype == dtype if", "0.0), (0, chainerx.float64, 0.0), (0.0, chainerx.bool_, False), (0.0, chainerx.int8, 0),", "0.0), (0.0, chainerx.bool_, False), (0.0, chainerx.int8, 0), (0.0, chainerx.int16, 0),", "scalar2 = chainerx.Scalar(value2) assert scalar1 != scalar2 assert scalar2 !=", "test_cast(value): scalar = chainerx.Scalar(value) _check_cast_scalar_equals_data(scalar, value) _check_cast_scalar_equals_data(+scalar, +value) if isinstance(value,", "0), (0, chainerx.float32, 0.0), (0, chainerx.float64, 0.0), (0.0, chainerx.bool_, False),", "test_init_invalid(): with pytest.raises(TypeError): chainerx.Scalar(\"1\") # string, which is not a", "scalar2 assert scalar1 == value2 assert value2 == scalar1 @pytest.mark.parametrize('value1,value2',", "_check_cast_scalar_equals_data(-scalar, -value) @pytest.mark.parametrize('value', all_scalar_values) def test_dtype(value): scalar = chainerx.Scalar(value) if", "(0x100, chainerx.int8, 0), (0x100, chainerx.int16, 0x100), (0x100, chainerx.int32, 0x100), (0x100,", "(-1, chainerx.float32, -1.0), (-1, chainerx.float64, -1.0), (0x100, chainerx.bool_, True), (0x100,", "pytest import chainerx def _check_cast_scalar_equals_data(scalar, data): assert bool(scalar) == bool(data)", "= chainerx.dtype(dtype_spec) scalar = chainerx.Scalar(value, dtype_spec) assert scalar.dtype == expected_dtype", "0), (0x10000, chainerx.int32, 0x10000), (0x10000, chainerx.int64, 0x10000), (0x10000, chainerx.uint8, 0),", "]) def test_inequality(value1, value2): scalar1 = chainerx.Scalar(value1) scalar2 = chainerx.Scalar(value2)", "value2 assert value2 == scalar2 assert scalar2 == value1 assert", "@pytest.mark.parametrize('value,cast_dtype,expected_value', [ (0, chainerx.bool_, False), (0, chainerx.int8, 0), (0, chainerx.int16,", "chainerx.uint8, 0), (0x100000000, chainerx.bool_, True), (0x100000000, chainerx.int8, 0), (0x100000000, chainerx.int16,", "]) def test_init_casted(value, cast_dtype, expected_value): scalar = chainerx.Scalar(value, cast_dtype) assert", "0), (0.0, chainerx.int32, 0), (0.0, chainerx.int64, 0), (0.0, chainerx.uint8, 0),", "(0x100, chainerx.uint8, 0), (0x10000, chainerx.bool_, True), (0x10000, chainerx.int8, 0), (0x10000,", "(0x7fffffffffffffff, chainerx.int64, 0x7fffffffffffffff), (0x7fffffffffffffff, chainerx.uint8, 255), ]) def test_init_casted(value, cast_dtype,", "False ]) def test_cast(value): scalar = chainerx.Scalar(value) _check_cast_scalar_equals_data(scalar, value) _check_cast_scalar_equals_data(+scalar,", "import math import pytest import chainerx def _check_cast_scalar_equals_data(scalar, data): assert", "(1.5, 1.5), (-1.5, -1.5), (True, True), (False, False), # (True,", "scalar2 != value1 assert value1 != scalar2 assert scalar1 !=", "(1, 1.0), (1.5, 1.5), (-1.5, -1.5), (True, True), (False, False),", "-1), (0x7fffffffffffffff, chainerx.int16, -1), (0x7fffffffffffffff, chainerx.int32, -1), (0x7fffffffffffffff, chainerx.int64, 0x7fffffffffffffff),", "(float('inf'), float('inf')), ]) def test_equality(value1, value2): scalar1 = chainerx.Scalar(value1) scalar2", "float('inf')), ]) def test_equality(value1, value2): scalar1 = chainerx.Scalar(value1) scalar2 =", "chainerx.float32, -1.0), (-1, chainerx.float64, -1.0), (0x100, chainerx.bool_, True), (0x100, chainerx.int8,", "scalar1 = chainerx.Scalar(value1) scalar2 = chainerx.Scalar(value2) assert scalar1 == scalar2", "def _check_cast_scalar_equals_data(scalar, data): assert bool(scalar) == bool(data) assert int(scalar) ==", "True), (1, chainerx.int8, 1), (1, chainerx.int16, 1), (1, chainerx.int32, 1),", "= chainerx.Scalar(value2) assert scalar1 == scalar2 assert scalar2 == scalar1", "1), (-1.0001, -1.0), (-1.0001, -1), (True, False), (True, 1.1), (1.0001,", "isinstance(value, bool): with pytest.raises(chainerx.DtypeError): -scalar # should not be able", "= chainerx.Scalar(value1) scalar2 = chainerx.Scalar(value2) assert scalar1 != scalar2 assert", "0.0), (1, chainerx.bool_, True), (1, chainerx.int8, 1), (1, chainerx.int16, 1),", "_check_cast_scalar_equals_data(+scalar, +value) if isinstance(value, bool): with pytest.raises(chainerx.DtypeError): -scalar # should", "== value2 assert value2 == scalar2 assert scalar2 == value1", "chainerx.uint8, 0), (0, chainerx.float32, 0.0), (0, chainerx.float64, 0.0), (0.0, chainerx.bool_,", "(0x7fffffffffffffff, chainerx.int16, -1), (0x7fffffffffffffff, chainerx.int32, -1), (0x7fffffffffffffff, chainerx.int64, 0x7fffffffffffffff), (0x7fffffffffffffff,", "False), (0, chainerx.int8, 0), (0, chainerx.int16, 0), (0, chainerx.int32, 0),", "(0x100, chainerx.bool_, True), (0x100, chainerx.int8, 0), (0x100, chainerx.int16, 0x100), (0x100,", "chainerx.Scalar(value2) assert scalar1 == scalar2 assert scalar2 == scalar1 assert", "scalar = chainerx.Scalar(value, dtype_spec) assert scalar.dtype == expected_dtype assert scalar", "0x10000), (0x10000, chainerx.uint8, 0), (0x100000000, chainerx.bool_, True), (0x100000000, chainerx.int8, 0),", "assert float(scalar) == float(data) all_scalar_values = [ -2, 1, -1.5,", "(1.0, chainerx.int32, 1), (1.0, chainerx.int64, 1), (1.0, chainerx.uint8, 1), (1.0,", "True, False ]) def test_cast(value): scalar = chainerx.Scalar(value) _check_cast_scalar_equals_data(scalar, value)", "(0, 1), (-1, 1), (-1.0001, -1.0), (-1.0001, -1), (True, False),", "assert isinstance(scalar.tolist(), type(expected_value)) @pytest.mark.parametrize( 'value', [0, 0.0, 1, 1.0, -1,", "value2): scalar1 = chainerx.Scalar(value1) scalar2 = chainerx.Scalar(value2) assert scalar1 ==", "chainerx.uint8, 255), ]) def test_init_casted(value, cast_dtype, expected_value): scalar = chainerx.Scalar(value,", "chainerx.uint8, 1), (1, chainerx.float32, 1.0), (1, chainerx.float64, 1.0), (1.0, chainerx.bool_,", "scalar2 == value2 assert value2 == scalar2 assert scalar2 ==", "@pytest.mark.parametrize('value1,value2', [ (0, 1), (-1, 1), (-1.0001, -1.0), (-1.0001, -1),", "expected_dtype assert scalar == chainerx.Scalar(value, expected_dtype) @pytest.mark.parametrize('value1,value2', [ # TODO(niboshi):", "chainerx.uint8, 1), (1.0, chainerx.float32, 1.0), (1.0, chainerx.float64, 1.0), (-1, chainerx.bool_,", "chainerx.float32, 0.0), (0, chainerx.float64, 0.0), (0.0, chainerx.bool_, False), (0.0, chainerx.int8,", "def test_cast(value): scalar = chainerx.Scalar(value) _check_cast_scalar_equals_data(scalar, value) _check_cast_scalar_equals_data(+scalar, +value) if", "(0.0, chainerx.int16, 0), (0.0, chainerx.int32, 0), (0.0, chainerx.int64, 0), (0.0,", "scalar1 @pytest.mark.parametrize('value', [ -2, 1, -1.5, 2.3, True, False ])", "negate bool else: _check_cast_scalar_equals_data(-scalar, -value) @pytest.mark.parametrize('value', all_scalar_values) def test_dtype(value): scalar", "chainerx.bool_), ]) def test_init_without_dtype(value, dtype): scalar = chainerx.Scalar(value) assert scalar.dtype", "assert scalar.dtype == chainerx.bool_ elif isinstance(value, int): assert scalar.dtype ==", "assert str(scalar) == str(value) def test_init_invalid(): with pytest.raises(TypeError): chainerx.Scalar(\"1\") #", "def test_dtype(value): scalar = chainerx.Scalar(value) if isinstance(value, bool): assert scalar.dtype", "chainerx.float32, 1.0), (1.0, chainerx.float64, 1.0), (-1, chainerx.bool_, True), (-1, chainerx.int8,", "True), (-1, chainerx.int8, -1), (-1, chainerx.int16, -1), (-1, chainerx.int32, -1),", "chainerx.int64, 0x7fffffffffffffff), (0x7fffffffffffffff, chainerx.uint8, 255), ]) def test_init_casted(value, cast_dtype, expected_value):", "[ # TODO(niboshi): Support commented-out cases (0, 0), (1, 1),", "(0, chainerx.int8, 0), (0, chainerx.int16, 0), (0, chainerx.int32, 0), (0,", "(-1, chainerx.uint8, 0xff), (-1, chainerx.float32, -1.0), (-1, chainerx.float64, -1.0), (0x100,", "-1), (True, False), (True, 1.1), (1.0001, 1.0002), (float('nan'), float('nan')), ])", "2.3, True, False, float('inf'), float('nan')] @pytest.mark.parametrize('value,dtype', [ (0, chainerx.int64), (-1,", "(1, chainerx.int8, 1), (1, chainerx.int16, 1), (1, chainerx.int32, 1), (1,", "assert value1 == scalar2 assert scalar1 == value2 assert value2", "to negate bool else: _check_cast_scalar_equals_data(-scalar, -value) @pytest.mark.parametrize('value', all_scalar_values) def test_dtype(value):", "chainerx.int8, 0), (0x100000000, chainerx.int16, 0), (0x100000000, chainerx.int32, 0), (0x100000000, chainerx.int64,", "(-0x8000000000000000, chainerx.int64), (0.0, chainerx.float64), (float('inf'), chainerx.float64), (float('nan'), chainerx.float64), (True, chainerx.bool_),", "_check_cast_scalar_equals_data(scalar, value) _check_cast_scalar_equals_data(+scalar, +value) if isinstance(value, bool): with pytest.raises(chainerx.DtypeError): -scalar", "repr(value) assert str(scalar) == str(value) def test_init_invalid(): with pytest.raises(TypeError): chainerx.Scalar(\"1\")", "1.0), (1.0, chainerx.bool_, True), (1.0, chainerx.int8, 1), (1.0, chainerx.int16, 1),", "def test_init_invalid(): with pytest.raises(TypeError): chainerx.Scalar(\"1\") # string, which is not", "== scalar2 assert scalar2 == value1 assert value1 == scalar2", "(0.0, chainerx.int32, 0), (0.0, chainerx.int64, 0), (0.0, chainerx.uint8, 0), (0.0,", "(1, 1), # (1, 1.0), (1.5, 1.5), (-1.5, -1.5), (True,", "= [ -2, 1, -1.5, 2.3, True, False, float('inf'), float('nan')]", "isinstance(value, int): assert scalar.dtype == chainerx.int64 elif isinstance(value, float): assert", "scalar.dtype == expected_dtype assert scalar == chainerx.Scalar(value, expected_dtype) @pytest.mark.parametrize('value1,value2', [", "chainerx.int8, 0), (0x100, chainerx.int16, 0x100), (0x100, chainerx.int32, 0x100), (0x100, chainerx.int64,", "assert value2 == scalar2 assert scalar2 == value1 assert value1", "float): assert scalar.dtype == chainerx.float64 else: assert False @pytest.mark.parametrize('value', all_scalar_values)", "expected_dtype = chainerx.dtype(dtype_spec) scalar = chainerx.Scalar(value, dtype_spec) assert scalar.dtype ==", "(0.0, chainerx.int64, 0), (0.0, chainerx.uint8, 0), (0.0, chainerx.float32, 0.0), (0.0,", "-value) @pytest.mark.parametrize('value', all_scalar_values) def test_dtype(value): scalar = chainerx.Scalar(value) if isinstance(value,", "value2 assert value2 != scalar1 @pytest.mark.parametrize('value', [ -2, 1, -1.5,", "(True, 1.1), (1.0001, 1.0002), (float('nan'), float('nan')), ]) def test_inequality(value1, value2):", "]) def test_cast(value): scalar = chainerx.Scalar(value) _check_cast_scalar_equals_data(scalar, value) _check_cast_scalar_equals_data(+scalar, +value)", "chainerx.Scalar(value1) scalar2 = chainerx.Scalar(value2) assert scalar1 != scalar2 assert scalar2", "scalar1 == scalar2 assert scalar2 == scalar1 assert scalar1 ==", "'value', [0, 0.0, 1, 1.0, -1, 0x100, 0x10000, 0x100000000, 0x7fffffffffffffff])", "Support commented-out cases (0, 0), (1, 1), # (1, 1.0),", "scalar.dtype == dtype if math.isnan(value): assert math.isnan(scalar.tolist()) else: assert scalar.tolist()", "(0.0, chainerx.float64, 0.0), (1, chainerx.bool_, True), (1, chainerx.int8, 1), (1,", "assert scalar.dtype == expected_dtype assert scalar == chainerx.Scalar(value, expected_dtype) @pytest.mark.parametrize('value1,value2',", "0), (0.0, chainerx.float32, 0.0), (0.0, chainerx.float64, 0.0), (1, chainerx.bool_, True),", "chainerx.bool_, False), (0, chainerx.int8, 0), (0, chainerx.int16, 0), (0, chainerx.int32,", "assert scalar1 == scalar2 assert scalar2 == scalar1 assert scalar1", "assert isinstance(scalar.tolist(), type(value)) @pytest.mark.parametrize('value,cast_dtype,expected_value', [ (0, chainerx.bool_, False), (0, chainerx.int8,", "True), (False, False), # (True, 1), # (True, 1.0), #", "dtype): scalar = chainerx.Scalar(value) assert scalar.dtype == dtype if math.isnan(value):", "scalar1 == value1 assert value1 == scalar1 assert scalar2 ==", "chainerx.int8, 0), (0x10000, chainerx.int16, 0), (0x10000, chainerx.int32, 0x10000), (0x10000, chainerx.int64,", "str(value) def test_init_invalid(): with pytest.raises(TypeError): chainerx.Scalar(\"1\") # string, which is", "0), (0x7fffffffffffffff, chainerx.bool_, True), (0x7fffffffffffffff, chainerx.int8, -1), (0x7fffffffffffffff, chainerx.int16, -1),", "1), (1, chainerx.int16, 1), (1, chainerx.int32, 1), (1, chainerx.int64, 1),", "scalar.dtype == chainerx.float64 else: assert False @pytest.mark.parametrize('value', all_scalar_values) def test_repr(value):", "chainerx.int64), (-0x8000000000000000, chainerx.int64), (0.0, chainerx.float64), (float('inf'), chainerx.float64), (float('nan'), chainerx.float64), (True,", "value1 != scalar2 assert scalar1 != value2 assert value2 !=", "# TODO(niboshi): Support commented-out cases (0, 0), (1, 1), #", "chainerx.uint8, 0), (0x10000, chainerx.bool_, True), (0x10000, chainerx.int8, 0), (0x10000, chainerx.int16,", "0.0), # (float('inf'), float('inf')), ]) def test_equality(value1, value2): scalar1 =", "assert scalar2 == value1 assert value1 == scalar2 assert scalar1", "== chainerx.bool_ elif isinstance(value, int): assert scalar.dtype == chainerx.int64 elif", "scalar2 assert scalar2 == scalar1 assert scalar1 == value1 assert", "(0.0, chainerx.bool_, False), (0.0, chainerx.int8, 0), (0.0, chainerx.int16, 0), (0.0,", "0), (0.0, chainerx.uint8, 0), (0.0, chainerx.float32, 0.0), (0.0, chainerx.float64, 0.0),", "-1), (-1, chainerx.uint8, 0xff), (-1, chainerx.float32, -1.0), (-1, chainerx.float64, -1.0),", "assert value1 == scalar1 assert scalar2 == value2 assert value2", "import pytest import chainerx def _check_cast_scalar_equals_data(scalar, data): assert bool(scalar) ==", "scalar = chainerx.Scalar(value) assert scalar.dtype == dtype if math.isnan(value): assert", "(0x10000, chainerx.int64, 0x10000), (0x10000, chainerx.uint8, 0), (0x100000000, chainerx.bool_, True), (0x100000000,", "# (True, 1.0), # (False, 0), # (False, 0.0), #", "== int(data) assert float(scalar) == float(data) all_scalar_values = [ -2,", "1), (1.0, chainerx.int64, 1), (1.0, chainerx.uint8, 1), (1.0, chainerx.float32, 1.0),", "True), (0x100000000, chainerx.int8, 0), (0x100000000, chainerx.int16, 0), (0x100000000, chainerx.int32, 0),", "(0, chainerx.bool_, False), (0, chainerx.int8, 0), (0, chainerx.int16, 0), (0,", "0), (0x10000, chainerx.bool_, True), (0x10000, chainerx.int8, 0), (0x10000, chainerx.int16, 0),", "chainerx.Scalar(value) assert repr(scalar) == repr(value) assert str(scalar) == str(value) def", "assert scalar.dtype == cast_dtype if math.isnan(expected_value): assert math.isnan(scalar.tolist()) else: assert", "math.isnan(value): assert math.isnan(scalar.tolist()) else: assert scalar.tolist() == value assert isinstance(scalar.tolist(),", "assert scalar.dtype == chainerx.int64 elif isinstance(value, float): assert scalar.dtype ==", "= chainerx.Scalar(value2) assert scalar1 != scalar2 assert scalar2 != scalar1", "value1 assert value1 != scalar2 assert scalar1 != value2 assert", "chainerx.int64, 0), (0.0, chainerx.uint8, 0), (0.0, chainerx.float32, 0.0), (0.0, chainerx.float64,", "if isinstance(value, bool): with pytest.raises(chainerx.DtypeError): -scalar # should not be", "(1, chainerx.int64, 1), (1, chainerx.uint8, 1), (1, chainerx.float32, 1.0), (1,", "1.0), (-1, chainerx.bool_, True), (-1, chainerx.int8, -1), (-1, chainerx.int16, -1),", "<reponame>yuhonghong66/chainer import math import pytest import chainerx def _check_cast_scalar_equals_data(scalar, data):", "chainerx.bool_), (False, chainerx.bool_), ]) def test_init_without_dtype(value, dtype): scalar = chainerx.Scalar(value)", "chainerx.Scalar(value2) assert scalar1 != scalar2 assert scalar2 != scalar1 assert", "chainerx.Scalar(value1) scalar2 = chainerx.Scalar(value2) assert scalar1 == scalar2 assert scalar2", "value1 assert value1 == scalar1 assert scalar2 == value2 assert", "import chainerx def _check_cast_scalar_equals_data(scalar, data): assert bool(scalar) == bool(data) assert", "(float('nan'), float('nan')), ]) def test_inequality(value1, value2): scalar1 = chainerx.Scalar(value1) scalar2", "isinstance(value, bool): assert scalar.dtype == chainerx.bool_ elif isinstance(value, int): assert", "(float('nan'), chainerx.float64), (True, chainerx.bool_), (False, chainerx.bool_), ]) def test_init_without_dtype(value, dtype):", "def test_init_without_dtype(value, dtype): scalar = chainerx.Scalar(value) assert scalar.dtype == dtype", "!= value1 assert value1 != scalar2 assert scalar1 != value2", "[ (0, 1), (-1, 1), (-1.0001, -1.0), (-1.0001, -1), (True,", "(float('inf'), chainerx.float64), (float('nan'), chainerx.float64), (True, chainerx.bool_), (False, chainerx.bool_), ]) def", "0), (0x100000000, chainerx.bool_, True), (0x100000000, chainerx.int8, 0), (0x100000000, chainerx.int16, 0),", "cast_dtype) assert scalar.dtype == cast_dtype if math.isnan(expected_value): assert math.isnan(scalar.tolist()) else:", "value2 != scalar1 @pytest.mark.parametrize('value', [ -2, 1, -1.5, 2.3, True,", "== float(data) all_scalar_values = [ -2, 1, -1.5, 2.3, True,", "(-1, chainerx.float64, -1.0), (0x100, chainerx.bool_, True), (0x100, chainerx.int8, 0), (0x100,", "value) _check_cast_scalar_equals_data(+scalar, +value) if isinstance(value, bool): with pytest.raises(chainerx.DtypeError): -scalar #", "-1.0), (0x100, chainerx.bool_, True), (0x100, chainerx.int8, 0), (0x100, chainerx.int16, 0x100),", "chainerx.int32, -1), (0x7fffffffffffffff, chainerx.int64, 0x7fffffffffffffff), (0x7fffffffffffffff, chainerx.uint8, 255), ]) def", "1, -1.5, 2.3, True, False ]) def test_cast(value): scalar =", "1), (1.0, chainerx.int16, 1), (1.0, chainerx.int32, 1), (1.0, chainerx.int64, 1),", "chainerx.int64, 0x100000000), (0x100000000, chainerx.uint8, 0), (0x7fffffffffffffff, chainerx.bool_, True), (0x7fffffffffffffff, chainerx.int8,", "1.0), (1, chainerx.float64, 1.0), (1.0, chainerx.bool_, True), (1.0, chainerx.int8, 1),", "[ (0, chainerx.int64), (-1, chainerx.int64), (0x7fffffffffffffff, chainerx.int64), (-0x8000000000000000, chainerx.int64), (0.0,", "0), (0x100000000, chainerx.int32, 0), (0x100000000, chainerx.int64, 0x100000000), (0x100000000, chainerx.uint8, 0),", "0x100), (0x100, chainerx.int64, 0x100), (0x100, chainerx.uint8, 0), (0x10000, chainerx.bool_, True),", "[ -2, 1, -1.5, 2.3, True, False ]) def test_cast(value):", "= chainerx.Scalar(value) if isinstance(value, bool): assert scalar.dtype == chainerx.bool_ elif", "chainerx.bool_, True), (0x10000, chainerx.int8, 0), (0x10000, chainerx.int16, 0), (0x10000, chainerx.int32,", "(True, chainerx.bool_), (False, chainerx.bool_), ]) def test_init_without_dtype(value, dtype): scalar =", "chainerx.int8, 0), (0.0, chainerx.int16, 0), (0.0, chainerx.int32, 0), (0.0, chainerx.int64,", "chainerx.int32, 1), (1.0, chainerx.int64, 1), (1.0, chainerx.uint8, 1), (1.0, chainerx.float32,", "test_init_casted(value, cast_dtype, expected_value): scalar = chainerx.Scalar(value, cast_dtype) assert scalar.dtype ==", "0xff), (-1, chainerx.float32, -1.0), (-1, chainerx.float64, -1.0), (0x100, chainerx.bool_, True),", "(0, 0), (1, 1), # (1, 1.0), (1.5, 1.5), (-1.5,", "(-1, chainerx.int8, -1), (-1, chainerx.int16, -1), (-1, chainerx.int32, -1), (-1,", "scalar2 != scalar1 assert scalar2 != value1 assert value1 !=", "(0x7fffffffffffffff, chainerx.int64), (-0x8000000000000000, chainerx.int64), (0.0, chainerx.float64), (float('inf'), chainerx.float64), (float('nan'), chainerx.float64),", "assert int(scalar) == int(data) assert float(scalar) == float(data) all_scalar_values =", "chainerx.bool_, True), (0x100000000, chainerx.int8, 0), (0x100000000, chainerx.int16, 0), (0x100000000, chainerx.int32,", "= chainerx.Scalar(value) assert repr(scalar) == repr(value) assert str(scalar) == str(value)", "assert value1 != scalar2 assert scalar1 != value2 assert value2", "False), (True, 1.1), (1.0001, 1.0002), (float('nan'), float('nan')), ]) def test_inequality(value1,", "(0, chainerx.int16, 0), (0, chainerx.int32, 0), (0, chainerx.int64, 0), (0,", "chainerx.float64 else: assert False @pytest.mark.parametrize('value', all_scalar_values) def test_repr(value): scalar =", "be able to negate bool else: _check_cast_scalar_equals_data(-scalar, -value) @pytest.mark.parametrize('value', all_scalar_values)", "_check_cast_scalar_equals_data(scalar, data): assert bool(scalar) == bool(data) assert int(scalar) == int(data)", "value2 == scalar2 assert scalar2 == value1 assert value1 ==", "-1), (-1, chainerx.int32, -1), (-1, chainerx.int64, -1), (-1, chainerx.uint8, 0xff),", "0), # (False, 0.0), # (float('inf'), float('inf')), ]) def test_equality(value1,", "== chainerx.Scalar(value, expected_dtype) @pytest.mark.parametrize('value1,value2', [ # TODO(niboshi): Support commented-out cases", "value1 == scalar2 assert scalar1 == value2 assert value2 ==", "= chainerx.Scalar(value) assert scalar.dtype == dtype if math.isnan(value): assert math.isnan(scalar.tolist())", "-2, 1, -1.5, 2.3, True, False ]) def test_cast(value): scalar", "type(value)) @pytest.mark.parametrize('value,cast_dtype,expected_value', [ (0, chainerx.bool_, False), (0, chainerx.int8, 0), (0,", "chainerx.bool_ elif isinstance(value, int): assert scalar.dtype == chainerx.int64 elif isinstance(value,", "isinstance(value, float): assert scalar.dtype == chainerx.float64 else: assert False @pytest.mark.parametrize('value',", "chainerx.int32, -1), (-1, chainerx.int64, -1), (-1, chainerx.uint8, 0xff), (-1, chainerx.float32,", "chainerx.bool_, True), (0x100, chainerx.int8, 0), (0x100, chainerx.int16, 0x100), (0x100, chainerx.int32,", "chainerx def _check_cast_scalar_equals_data(scalar, data): assert bool(scalar) == bool(data) assert int(scalar)", "repr(scalar) == repr(value) assert str(scalar) == str(value) def test_init_invalid(): with", "assert bool(scalar) == bool(data) assert int(scalar) == int(data) assert float(scalar)", "== chainerx.int64 elif isinstance(value, float): assert scalar.dtype == chainerx.float64 else:", "chainerx.float32, 0.0), (0.0, chainerx.float64, 0.0), (1, chainerx.bool_, True), (1, chainerx.int8,", "-2, 1, -1.5, 2.3, True, False, float('inf'), float('nan')] @pytest.mark.parametrize('value,dtype', [", "chainerx.int16, -1), (-1, chainerx.int32, -1), (-1, chainerx.int64, -1), (-1, chainerx.uint8,", "commented-out cases (0, 0), (1, 1), # (1, 1.0), (1.5,", "elif isinstance(value, float): assert scalar.dtype == chainerx.float64 else: assert False", "float('inf'), float('nan')] @pytest.mark.parametrize('value,dtype', [ (0, chainerx.int64), (-1, chainerx.int64), (0x7fffffffffffffff, chainerx.int64),", "chainerx.int64, 1), (1, chainerx.uint8, 1), (1, chainerx.float32, 1.0), (1, chainerx.float64,", "(0x100000000, chainerx.uint8, 0), (0x7fffffffffffffff, chainerx.bool_, True), (0x7fffffffffffffff, chainerx.int8, -1), (0x7fffffffffffffff,", "chainerx.int32, 0), (0, chainerx.int64, 0), (0, chainerx.uint8, 0), (0, chainerx.float32,", "0x100000000), (0x100000000, chainerx.uint8, 0), (0x7fffffffffffffff, chainerx.bool_, True), (0x7fffffffffffffff, chainerx.int8, -1),", "[ -2, 1, -1.5, 2.3, True, False, float('inf'), float('nan')] @pytest.mark.parametrize('value,dtype',", "(1, chainerx.float32, 1.0), (1, chainerx.float64, 1.0), (1.0, chainerx.bool_, True), (1.0,", "chainerx.bool_, True), (0x7fffffffffffffff, chainerx.int8, -1), (0x7fffffffffffffff, chainerx.int16, -1), (0x7fffffffffffffff, chainerx.int32,", "def test_init_casted(value, cast_dtype, expected_value): scalar = chainerx.Scalar(value, cast_dtype) assert scalar.dtype", "(-1, chainerx.int64), (0x7fffffffffffffff, chainerx.int64), (-0x8000000000000000, chainerx.int64), (0.0, chainerx.float64), (float('inf'), chainerx.float64),", "= chainerx.Scalar(value) _check_cast_scalar_equals_data(scalar, value) _check_cast_scalar_equals_data(+scalar, +value) if isinstance(value, bool): with", "1), (1.0, chainerx.uint8, 1), (1.0, chainerx.float32, 1.0), (1.0, chainerx.float64, 1.0),", "assert scalar1 != value2 assert value2 != scalar1 @pytest.mark.parametrize('value', [", "== bool(data) assert int(scalar) == int(data) assert float(scalar) == float(data)", "0.0, 1, 1.0, -1, 0x100, 0x10000, 0x100000000, 0x7fffffffffffffff]) @chainerx.testing.parametrize_dtype_specifier('dtype_spec') def", "assert scalar2 == value2 assert value2 == scalar2 assert scalar2", "== dtype if math.isnan(value): assert math.isnan(scalar.tolist()) else: assert scalar.tolist() ==", "0x10000, 0x100000000, 0x7fffffffffffffff]) @chainerx.testing.parametrize_dtype_specifier('dtype_spec') def test_init_with_dtype(value, dtype_spec): expected_dtype = chainerx.dtype(dtype_spec)", "(0, chainerx.float64, 0.0), (0.0, chainerx.bool_, False), (0.0, chainerx.int8, 0), (0.0,", "scalar2 assert scalar2 != scalar1 assert scalar2 != value1 assert", "== expected_dtype assert scalar == chainerx.Scalar(value, expected_dtype) @pytest.mark.parametrize('value1,value2', [ #", "== expected_value assert isinstance(scalar.tolist(), type(expected_value)) @pytest.mark.parametrize( 'value', [0, 0.0, 1,", "(True, 1.0), # (False, 0), # (False, 0.0), # (float('inf'),", "isinstance(scalar.tolist(), type(value)) @pytest.mark.parametrize('value,cast_dtype,expected_value', [ (0, chainerx.bool_, False), (0, chainerx.int8, 0),", "assert scalar.tolist() == value assert isinstance(scalar.tolist(), type(value)) @pytest.mark.parametrize('value,cast_dtype,expected_value', [ (0,", "0), (0x100000000, chainerx.int16, 0), (0x100000000, chainerx.int32, 0), (0x100000000, chainerx.int64, 0x100000000),", "assert scalar.tolist() == expected_value assert isinstance(scalar.tolist(), type(expected_value)) @pytest.mark.parametrize( 'value', [0,", "0x100, 0x10000, 0x100000000, 0x7fffffffffffffff]) @chainerx.testing.parametrize_dtype_specifier('dtype_spec') def test_init_with_dtype(value, dtype_spec): expected_dtype =", "chainerx.int64), (0x7fffffffffffffff, chainerx.int64), (-0x8000000000000000, chainerx.int64), (0.0, chainerx.float64), (float('inf'), chainerx.float64), (float('nan'),", "chainerx.int16, 0), (0.0, chainerx.int32, 0), (0.0, chainerx.int64, 0), (0.0, chainerx.uint8,", "if math.isnan(value): assert math.isnan(scalar.tolist()) else: assert scalar.tolist() == value assert", "if isinstance(value, bool): assert scalar.dtype == chainerx.bool_ elif isinstance(value, int):", "(0x7fffffffffffffff, chainerx.int32, -1), (0x7fffffffffffffff, chainerx.int64, 0x7fffffffffffffff), (0x7fffffffffffffff, chainerx.uint8, 255), ])", "-1.5), (True, True), (False, False), # (True, 1), # (True,", "== repr(value) assert str(scalar) == str(value) def test_init_invalid(): with pytest.raises(TypeError):", "1.0002), (float('nan'), float('nan')), ]) def test_inequality(value1, value2): scalar1 = chainerx.Scalar(value1)", "0), (0, chainerx.int64, 0), (0, chainerx.uint8, 0), (0, chainerx.float32, 0.0),", "(0x7fffffffffffffff, chainerx.int8, -1), (0x7fffffffffffffff, chainerx.int16, -1), (0x7fffffffffffffff, chainerx.int32, -1), (0x7fffffffffffffff,", "scalar = chainerx.Scalar(value) assert repr(scalar) == repr(value) assert str(scalar) ==", "0x7fffffffffffffff]) @chainerx.testing.parametrize_dtype_specifier('dtype_spec') def test_init_with_dtype(value, dtype_spec): expected_dtype = chainerx.dtype(dtype_spec) scalar =", "(0x10000, chainerx.uint8, 0), (0x100000000, chainerx.bool_, True), (0x100000000, chainerx.int8, 0), (0x100000000,", "test_inequality(value1, value2): scalar1 = chainerx.Scalar(value1) scalar2 = chainerx.Scalar(value2) assert scalar1", "float(data) all_scalar_values = [ -2, 1, -1.5, 2.3, True, False,", "0x100), (0x100, chainerx.int32, 0x100), (0x100, chainerx.int64, 0x100), (0x100, chainerx.uint8, 0),", "chainerx.uint8, 0xff), (-1, chainerx.float32, -1.0), (-1, chainerx.float64, -1.0), (0x100, chainerx.bool_,", "assert scalar == chainerx.Scalar(value, expected_dtype) @pytest.mark.parametrize('value1,value2', [ # TODO(niboshi): Support", "assert False @pytest.mark.parametrize('value', all_scalar_values) def test_repr(value): scalar = chainerx.Scalar(value) assert", "chainerx.float32, 1.0), (1, chainerx.float64, 1.0), (1.0, chainerx.bool_, True), (1.0, chainerx.int8,", "chainerx.int8, 0), (0, chainerx.int16, 0), (0, chainerx.int32, 0), (0, chainerx.int64,", "math.isnan(scalar.tolist()) else: assert scalar.tolist() == expected_value assert isinstance(scalar.tolist(), type(expected_value)) @pytest.mark.parametrize(", "(0x100, chainerx.int64, 0x100), (0x100, chainerx.uint8, 0), (0x10000, chainerx.bool_, True), (0x10000,", "with pytest.raises(chainerx.DtypeError): -scalar # should not be able to negate", "chainerx.Scalar(value, dtype_spec) assert scalar.dtype == expected_dtype assert scalar == chainerx.Scalar(value,", "chainerx.int64, -1), (-1, chainerx.uint8, 0xff), (-1, chainerx.float32, -1.0), (-1, chainerx.float64,", "chainerx.int16, 0), (0, chainerx.int32, 0), (0, chainerx.int64, 0), (0, chainerx.uint8,", "chainerx.int64, 0x100), (0x100, chainerx.uint8, 0), (0x10000, chainerx.bool_, True), (0x10000, chainerx.int8,", "-1, 0x100, 0x10000, 0x100000000, 0x7fffffffffffffff]) @chainerx.testing.parametrize_dtype_specifier('dtype_spec') def test_init_with_dtype(value, dtype_spec): expected_dtype", "assert scalar2 != scalar1 assert scalar2 != value1 assert value1", "TODO(niboshi): Support commented-out cases (0, 0), (1, 1), # (1,", "(1.0, chainerx.int8, 1), (1.0, chainerx.int16, 1), (1.0, chainerx.int32, 1), (1.0,", "assert value2 != scalar1 @pytest.mark.parametrize('value', [ -2, 1, -1.5, 2.3,", "(1.0, chainerx.float32, 1.0), (1.0, chainerx.float64, 1.0), (-1, chainerx.bool_, True), (-1," ]
[ "text=\"PLAY AGAIN\") self.gameover_buttons.append(gameover_play_again_button) gameover_quit_button = Button(self, 50, HEIGHT - 100,", "\"play\": self.playing_update() if self.state == \"dead\": self.gameover_update() def draw(self): self.window.fill(BG_COL)", "[0, 1]: self.snake.direction = [0, -1] if event.key == pygame.K_DOWN", "self.active_buttons: if button.hovered: button.click() def gameover_update(self): for button in self.active_buttons:", "reset(self): # reset the game self.state = \"play\" self.active_buttons =", "== pygame.QUIT: self.running = False # checks if a key", "event.key == pygame.K_DOWN and self.snake.direction != [ 0, -1 ]:", "Button(self, (WIDTH // 2) - 50, 20, 100, 33, QUIT_BUTTON_COLOUR,", "= 5 def run(self): while self.running: self.events() self.update() self.draw() self.clock.tick(FPS[0])", "# checks if a key is pressed down if event.type", "hover_colour=(219, 53, 43), function=self.playing_quit, text=\"QUIT\") self.playing_buttons.append(playing_quit_button) # GAMEOVER BUTTON gameover_play_again_button", "GAMEOVER BUTTON gameover_play_again_button = Button(self, 50, 300, WIDTH - 100,", "= self.playing_buttons self.snake = Snake(self) FPS[0] = 5 def run(self):", "text, pos): text = self.gameover.render(text, False, RED) self.window.blit(text, (pos[0], pos[1]))", "self.intro_buttons.append(intro_play_button) intro_quit_button = Button(self, 50, HEIGHT - 100, WIDTH -", "bold=False, italic=True) self.font = pygame.font.SysFont(FONT, 20, bold=1) self.running = True", "self.clock.tick(FPS[0]) pygame.quit() sys.exit() def events(self): if self.state == \"intro\": self.intro_events()", "20]) def playing_quit(self): self.running = False # GAMEOVER FUNCTIONS def", "self.snake.direction = [1, 0] if event.key == pygame.K_UP and self.snake.direction", "function=self.reset, text=\"PLAY AGAIN\") self.gameover_buttons.append(gameover_play_again_button) gameover_quit_button = Button(self, 50, HEIGHT -", "if button.hovered: button.click() def intro_update(self): for button in self.active_buttons: button.update()", "italic=True) self.font = pygame.font.SysFont(FONT, 20, bold=1) self.running = True self.state", "pygame.K_ESCAPE: self.running = False if event.type == pygame.MOUSEBUTTONDOWN: for button", "button.click() def intro_update(self): for button in self.active_buttons: button.update() def intro_draw(self):", "== pygame.K_ESCAPE: self.running = False if event.key == pygame.K_LEFT and", "== \"intro\": self.intro_events() if self.state == \"play\": self.playing_events() if self.state", "100, 50, QUIT_BUTTON_COLOUR, hover_colour=(216, 53, 43), function=self.intro_quit, text=\"QUIT\") self.gameover_buttons.append(gameover_quit_button) def", "for button in self.active_buttons: if button.hovered: button.click() def playing_update(self): for", "QUIT BUTTON intro_play_button = Button(self, 50, 300, WIDTH - 100,", "# PlAY FUNCTIONS def playing_events(self): for event in pygame.event.get(): if", "button in self.active_buttons: if button.hovered: button.click() def intro_update(self): for button", "playing_quit_button = Button(self, (WIDTH // 2) - 50, 20, 100,", "function=self.intro_to_play, text=\"PLAY\") self.intro_buttons.append(intro_play_button) intro_quit_button = Button(self, 50, HEIGHT - 100,", "if self.state == \"play\": self.playing_update() if self.state == \"dead\": self.gameover_update()", "# PLAYING QUIT BUTTON playing_quit_button = Button(self, (WIDTH // 2)", "BUTTON intro_play_button = Button(self, 50, 300, WIDTH - 100, 50,", "== pygame.QUIT: self.running = False if event.type == pygame.KEYDOWN and", "QUIT_BUTTON_COLOUR, hover_colour=(216, 53, 43), function=self.intro_quit, text=\"QUIT\") self.gameover_buttons.append(gameover_quit_button) def show_text(self, text,", "self.intro_draw() if self.state == \"play\": self.playing_draw() if self.state == \"dead\":", "if self.state == \"play\": self.playing_draw() if self.state == \"dead\": self.gameover_draw()", "1, 0 ]: self.snake.direction = [-1, 0] if event.key ==", "= Snake(self) FPS[0] = 5 def run(self): while self.running: self.events()", "Button(self, 50, 300, WIDTH - 100, 50, PLAY_BUTTON_COLOUR, hover_colour=(36, 183,", "import WIDTH, HEIGHT, FONT, BG_COL, QUIT_BUTTON_COLOUR, PLAY_BUTTON_COLOUR, BLACK, FPS, RED", "-1, 0 ]: self.snake.direction = [1, 0] if event.key ==", "100, 50, QUIT_BUTTON_COLOUR, hover_colour=(219, 53, 43), function=self.intro_quit, text=\"QUIT\") self.intro_buttons.append(intro_quit_button) #", "pygame.K_LEFT and self.snake.direction != [ 1, 0 ]: self.snake.direction =", "button.update() def intro_draw(self): for button in self.active_buttons: button.draw() def intro_to_play(self):", "pygame from app_window import App_window from button import Button from", "event.type == pygame.QUIT: self.running = False if event.type == pygame.KEYDOWN", "False # PlAY FUNCTIONS def playing_events(self): for event in pygame.event.get():", "if event.type == pygame.QUIT: self.running = False if event.type ==", "intro_events(self): for event in pygame.event.get(): if event.type == pygame.QUIT: self.running", "= False if event.type == pygame.KEYDOWN and event.key == pygame.K_ESCAPE:", "event.key == pygame.K_LEFT and self.snake.direction != [ 1, 0 ]:", "46), function=self.intro_to_play, text=\"PLAY\") self.intro_buttons.append(intro_play_button) intro_quit_button = Button(self, 50, HEIGHT -", "self.snake.direction != [ 0, -1 ]: self.snake.direction = [0, 1]", "= [0, -1] if event.key == pygame.K_DOWN and self.snake.direction !=", "(WIDTH // 2) - 50, 20, 100, 33, QUIT_BUTTON_COLOUR, hover_colour=(219,", "Food from settings import WIDTH, HEIGHT, FONT, BG_COL, QUIT_BUTTON_COLOUR, PLAY_BUTTON_COLOUR,", "- 100, WIDTH - 100, 50, QUIT_BUTTON_COLOUR, hover_colour=(219, 53, 43),", "def playing_update(self): for button in self.active_buttons: button.update() self.app_window.update() def playing_draw(self):", "if event.type == pygame.KEYDOWN and event.key == pygame.K_ESCAPE: self.running =", "= \"intro\" self.intro_buttons = [] self.playing_buttons = [] self.gameover_buttons =", "20, 100, 33, QUIT_BUTTON_COLOUR, hover_colour=(219, 53, 43), function=self.playing_quit, text=\"QUIT\") self.playing_buttons.append(playing_quit_button)", "FPS, RED class App: def __init__(self): pygame.init() self.clock = pygame.time.Clock()", "event.key == pygame.K_ESCAPE: self.running = False if event.type == pygame.MOUSEBUTTONDOWN:", "self.playing_buttons = [] self.gameover_buttons = [] self.active_buttons = self.intro_buttons self.app_window", "import Button from snake import Snake from food import Food", "pygame.K_RIGHT and self.snake.direction != [ -1, 0 ]: self.snake.direction =", "pygame.font.SysFont(FONT, 20, bold=1) self.running = True self.state = \"intro\" self.intro_buttons", "PLAYING QUIT BUTTON playing_quit_button = Button(self, (WIDTH // 2) -", "33, QUIT_BUTTON_COLOUR, hover_colour=(219, 53, 43), function=self.playing_quit, text=\"QUIT\") self.playing_buttons.append(playing_quit_button) # GAMEOVER", "button in self.active_buttons: button.update() def gameover_draw(self): for button in self.active_buttons:", "self.active_buttons = self.playing_buttons self.snake = Snake(self) FPS[0] = 5 def", "text, pos): text = self.font.render(text, False, BLACK) self.window.blit(text, (pos[0], pos[1]))", "button in self.active_buttons: button.draw() self.game_over(\"GAME OVER\", [WIDTH - 440, 30])", "0] if event.key == pygame.K_UP and self.snake.direction != [0, 1]:", "update(self): if self.state == \"intro\": self.intro_update() if self.state == \"play\":", "if button.hovered: button.click() def gameover_update(self): for button in self.active_buttons: button.update()", "HEIGHT - 100, WIDTH - 100, 50, QUIT_BUTTON_COLOUR, hover_colour=(216, 53,", "if event.key == pygame.K_ESCAPE: self.running = False if event.key ==", "!= [0, 1]: self.snake.direction = [0, -1] if event.key ==", "== \"play\": self.playing_update() if self.state == \"dead\": self.gameover_update() def draw(self):", "self.active_buttons: button.update() def intro_draw(self): for button in self.active_buttons: button.draw() def", "30]) def game_over(self, text, pos): text = self.gameover.render(text, False, RED)", "50, HEIGHT - 100, WIDTH - 100, 50, QUIT_BUTTON_COLOUR, hover_colour=(219,", "False if event.type == pygame.MOUSEBUTTONDOWN: for button in self.active_buttons: if", "\"play\" self.active_buttons = self.playing_buttons def intro_quit(self): self.running = False #", "for event in pygame.event.get(): if event.type == pygame.QUIT: self.running =", "self.snake = Snake(self) self.food = Food(self) self.make_buttons() def make_buttons(self): #", "[0, 1] if event.type == pygame.MOUSEBUTTONDOWN: for button in self.active_buttons:", "pygame.KEYDOWN: if event.key == pygame.K_ESCAPE: self.running = False if event.key", "PLAY AND QUIT BUTTON intro_play_button = Button(self, 50, 300, WIDTH", "self.running = True self.state = \"intro\" self.intro_buttons = [] self.playing_buttons", "= pygame.font.SysFont(\"Comicsansms\", 90, bold=False, italic=True) self.font = pygame.font.SysFont(FONT, 20, bold=1)", "intro_quit(self): self.running = False # PlAY FUNCTIONS def playing_events(self): for", "food import Food from settings import WIDTH, HEIGHT, FONT, BG_COL,", "self.running = False if event.type == pygame.KEYDOWN and event.key ==", "event.key == pygame.K_UP and self.snake.direction != [0, 1]: self.snake.direction =", "def events(self): if self.state == \"intro\": self.intro_events() if self.state ==", "]: self.snake.direction = [-1, 0] if event.key == pygame.K_RIGHT and", "= Button(self, 50, 300, WIDTH - 100, 50, PLAY_BUTTON_COLOUR, hover_colour=(49,", "snake import Snake from food import Food from settings import", "self.window.fill(BG_COL) if self.state == \"intro\": self.intro_draw() if self.state == \"play\":", "button import Button from snake import Snake from food import", "== pygame.KEYDOWN and event.key == pygame.K_ESCAPE: self.running = False if", "def run(self): while self.running: self.events() self.update() self.draw() self.clock.tick(FPS[0]) pygame.quit() sys.exit()", "button in self.active_buttons: if button.hovered: button.click() def playing_update(self): for button", "self.active_buttons: button.update() self.app_window.update() def playing_draw(self): self.app_window.draw() for button in self.active_buttons:", "and self.snake.direction != [0, 1]: self.snake.direction = [0, -1] if", "pygame.event.get(): if event.type == pygame.QUIT: self.running = False if event.type", "self.gameover = pygame.font.SysFont(\"Comicsansms\", 90, bold=False, italic=True) self.font = pygame.font.SysFont(FONT, 20,", "for button in self.active_buttons: button.draw() self.game_over(\"GAME OVER\", [WIDTH - 440,", "button.draw() self.game_over(\"GAME OVER\", [WIDTH - 440, 30]) def game_over(self, text,", "pygame.MOUSEBUTTONDOWN: for button in self.active_buttons: if button.hovered: button.click() def gameover_update(self):", "button.update() def gameover_draw(self): for button in self.active_buttons: button.draw() self.game_over(\"GAME OVER\",", "def playing_quit(self): self.running = False # GAMEOVER FUNCTIONS def gameover_events(self):", "300, WIDTH - 100, 50, PLAY_BUTTON_COLOUR, hover_colour=(36, 183, 23), function=self.reset,", "self.playing_buttons.append(playing_quit_button) # GAMEOVER BUTTON gameover_play_again_button = Button(self, 50, 300, WIDTH", "50, QUIT_BUTTON_COLOUR, hover_colour=(216, 53, 43), function=self.intro_quit, text=\"QUIT\") self.gameover_buttons.append(gameover_quit_button) def show_text(self,", "PLAY_BUTTON_COLOUR, BLACK, FPS, RED class App: def __init__(self): pygame.init() self.clock", "50, 300, WIDTH - 100, 50, PLAY_BUTTON_COLOUR, hover_colour=(36, 183, 23),", "= Food(self) self.make_buttons() def make_buttons(self): # INTRO PLAY AND QUIT", "from app_window import App_window from button import Button from snake", "0] if event.key == pygame.K_RIGHT and self.snake.direction != [ -1,", "pygame.display.set_mode((WIDTH, HEIGHT)) self.gameover = pygame.font.SysFont(\"Comicsansms\", 90, bold=False, italic=True) self.font =", "def intro_quit(self): self.running = False # PlAY FUNCTIONS def playing_events(self):", "self.app_window = App_window(self) self.snake = Snake(self) self.food = Food(self) self.make_buttons()", "FUNCTIONS def playing_events(self): for event in pygame.event.get(): if event.type ==", "== \"play\": self.playing_draw() if self.state == \"dead\": self.gameover_draw() pygame.display.update() #", "= pygame.font.SysFont(FONT, 20, bold=1) self.running = True self.state = \"intro\"", "- 100, 50, PLAY_BUTTON_COLOUR, hover_colour=(49, 218, 46), function=self.intro_to_play, text=\"PLAY\") self.intro_buttons.append(intro_play_button)", "self.state == \"intro\": self.intro_draw() if self.state == \"play\": self.playing_draw() if", "and event.key == pygame.K_ESCAPE: self.running = False if event.type ==", "183, 23), function=self.reset, text=\"PLAY AGAIN\") self.gameover_buttons.append(gameover_play_again_button) gameover_quit_button = Button(self, 50,", "self.gameover_events() def update(self): if self.state == \"intro\": self.intro_update() if self.state", "button in self.active_buttons: if button.hovered: button.click() def gameover_update(self): for button", "def game_over(self, text, pos): text = self.gameover.render(text, False, RED) self.window.blit(text,", "button.hovered: button.click() def gameover_update(self): for button in self.active_buttons: button.update() def", "= [] self.active_buttons = self.intro_buttons self.app_window = App_window(self) self.snake =", "BG_COL, QUIT_BUTTON_COLOUR, PLAY_BUTTON_COLOUR, BLACK, FPS, RED class App: def __init__(self):", "App_window(self) self.snake = Snake(self) self.food = Food(self) self.make_buttons() def make_buttons(self):", "self.window.blit(text, (pos[0], pos[1])) def reset(self): # reset the game self.state", "button.draw() def intro_to_play(self): self.state = \"play\" self.active_buttons = self.playing_buttons def", "- 50, 20, 100, 33, QUIT_BUTTON_COLOUR, hover_colour=(219, 53, 43), function=self.playing_quit,", "self.playing_buttons self.snake = Snake(self) FPS[0] = 5 def run(self): while", "self.make_buttons() def make_buttons(self): # INTRO PLAY AND QUIT BUTTON intro_play_button", "def playing_draw(self): self.app_window.draw() for button in self.active_buttons: button.draw() self.show_text(\"Score: \"", "the game self.state = \"play\" self.active_buttons = self.playing_buttons self.snake =", "sys import pygame from app_window import App_window from button import", "pygame.K_ESCAPE: self.running = False if event.key == pygame.K_LEFT and self.snake.direction", "self.active_buttons = self.intro_buttons self.app_window = App_window(self) self.snake = Snake(self) self.food", "from food import Food from settings import WIDTH, HEIGHT, FONT,", "button.hovered: button.click() def playing_update(self): for button in self.active_buttons: button.update() self.app_window.update()", "False # GAMEOVER FUNCTIONS def gameover_events(self): for event in pygame.event.get():", "event.type == pygame.KEYDOWN: if event.key == pygame.K_ESCAPE: self.running = False", "self.state == \"play\": self.playing_events() if self.state == \"dead\": self.gameover_events() def", "WIDTH - 100, 50, PLAY_BUTTON_COLOUR, hover_colour=(49, 218, 46), function=self.intro_to_play, text=\"PLAY\")", "= self.font.render(text, False, BLACK) self.window.blit(text, (pos[0], pos[1])) def reset(self): #", "if event.key == pygame.K_DOWN and self.snake.direction != [ 0, -1", "100, WIDTH - 100, 50, QUIT_BUTTON_COLOUR, hover_colour=(219, 53, 43), function=self.intro_quit,", "= [] self.gameover_buttons = [] self.active_buttons = self.intro_buttons self.app_window =", "50, 300, WIDTH - 100, 50, PLAY_BUTTON_COLOUR, hover_colour=(49, 218, 46),", "self.state == \"play\": self.playing_update() if self.state == \"dead\": self.gameover_update() def", "def draw(self): self.window.fill(BG_COL) if self.state == \"intro\": self.intro_draw() if self.state", "if self.state == \"dead\": self.gameover_draw() pygame.display.update() # INTRO FUNCTIONS def", "self.state == \"intro\": self.intro_update() if self.state == \"play\": self.playing_update() if", "False if event.key == pygame.K_LEFT and self.snake.direction != [ 1,", "self.gameover_buttons.append(gameover_play_again_button) gameover_quit_button = Button(self, 50, HEIGHT - 100, WIDTH -", "1] if event.type == pygame.MOUSEBUTTONDOWN: for button in self.active_buttons: if", "\"dead\": self.gameover_events() def update(self): if self.state == \"intro\": self.intro_update() if", "button in self.active_buttons: button.draw() def intro_to_play(self): self.state = \"play\" self.active_buttons", "INTRO PLAY AND QUIT BUTTON intro_play_button = Button(self, 50, 300,", "- 440, 30]) def game_over(self, text, pos): text = self.gameover.render(text,", "self.running = False # GAMEOVER FUNCTIONS def gameover_events(self): for event", "import Food from settings import WIDTH, HEIGHT, FONT, BG_COL, QUIT_BUTTON_COLOUR,", "QUIT BUTTON playing_quit_button = Button(self, (WIDTH // 2) - 50,", "in self.active_buttons: button.draw() self.show_text(\"Score: \" + str(self.snake.length - 1), [20,", "pressed down if event.type == pygame.KEYDOWN: if event.key == pygame.K_ESCAPE:", "HEIGHT, FONT, BG_COL, QUIT_BUTTON_COLOUR, PLAY_BUTTON_COLOUR, BLACK, FPS, RED class App:", "while self.running: self.events() self.update() self.draw() self.clock.tick(FPS[0]) pygame.quit() sys.exit() def events(self):", "90, bold=False, italic=True) self.font = pygame.font.SysFont(FONT, 20, bold=1) self.running =", "self.state == \"intro\": self.intro_events() if self.state == \"play\": self.playing_events() if", "= \"play\" self.active_buttons = self.playing_buttons self.snake = Snake(self) FPS[0] =", "pos[1])) def reset(self): # reset the game self.state = \"play\"", "self.active_buttons: if button.hovered: button.click() def intro_update(self): for button in self.active_buttons:", "!= [ -1, 0 ]: self.snake.direction = [1, 0] if", "INTRO FUNCTIONS def intro_events(self): for event in pygame.event.get(): if event.type", "event in pygame.event.get(): if event.type == pygame.QUIT: self.running = False", "def gameover_update(self): for button in self.active_buttons: button.update() def gameover_draw(self): for", "self.gameover_update() def draw(self): self.window.fill(BG_COL) if self.state == \"intro\": self.intro_draw() if", "QUIT_BUTTON_COLOUR, hover_colour=(219, 53, 43), function=self.intro_quit, text=\"QUIT\") self.intro_buttons.append(intro_quit_button) # PLAYING QUIT", "pygame.MOUSEBUTTONDOWN: for button in self.active_buttons: if button.hovered: button.click() def intro_update(self):", "pygame.event.get(): if event.type == pygame.QUIT: self.running = False # checks", "in pygame.event.get(): if event.type == pygame.QUIT: self.running = False #", "text=\"QUIT\") self.intro_buttons.append(intro_quit_button) # PLAYING QUIT BUTTON playing_quit_button = Button(self, (WIDTH", "== \"dead\": self.gameover_draw() pygame.display.update() # INTRO FUNCTIONS def intro_events(self): for", "button.click() def playing_update(self): for button in self.active_buttons: button.update() self.app_window.update() def", "= pygame.display.set_mode((WIDTH, HEIGHT)) self.gameover = pygame.font.SysFont(\"Comicsansms\", 90, bold=False, italic=True) self.font", "if self.state == \"dead\": self.gameover_update() def draw(self): self.window.fill(BG_COL) if self.state", "and self.snake.direction != [ 0, -1 ]: self.snake.direction = [0,", "== pygame.MOUSEBUTTONDOWN: for button in self.active_buttons: if button.hovered: button.click() def", "for button in self.active_buttons: button.update() self.app_window.update() def playing_draw(self): self.app_window.draw() for", "def show_text(self, text, pos): text = self.font.render(text, False, BLACK) self.window.blit(text,", "def make_buttons(self): # INTRO PLAY AND QUIT BUTTON intro_play_button =", "self.state == \"dead\": self.gameover_draw() pygame.display.update() # INTRO FUNCTIONS def intro_events(self):", "== \"dead\": self.gameover_update() def draw(self): self.window.fill(BG_COL) if self.state == \"intro\":", "in self.active_buttons: if button.hovered: button.click() def playing_update(self): for button in", "-1 ]: self.snake.direction = [0, 1] if event.type == pygame.MOUSEBUTTONDOWN:", "43), function=self.playing_quit, text=\"QUIT\") self.playing_buttons.append(playing_quit_button) # GAMEOVER BUTTON gameover_play_again_button = Button(self,", "gameover_update(self): for button in self.active_buttons: button.update() def gameover_draw(self): for button", "self.running: self.events() self.update() self.draw() self.clock.tick(FPS[0]) pygame.quit() sys.exit() def events(self): if", "in self.active_buttons: button.draw() self.game_over(\"GAME OVER\", [WIDTH - 440, 30]) def", "[20, 20]) def playing_quit(self): self.running = False # GAMEOVER FUNCTIONS", "1), [20, 20]) def playing_quit(self): self.running = False # GAMEOVER", "App: def __init__(self): pygame.init() self.clock = pygame.time.Clock() self.window = pygame.display.set_mode((WIDTH,", "== \"intro\": self.intro_draw() if self.state == \"play\": self.playing_draw() if self.state", "pygame.font.SysFont(\"Comicsansms\", 90, bold=False, italic=True) self.font = pygame.font.SysFont(FONT, 20, bold=1) self.running", "== pygame.K_RIGHT and self.snake.direction != [ -1, 0 ]: self.snake.direction", "import pygame from app_window import App_window from button import Button", "and self.snake.direction != [ -1, 0 ]: self.snake.direction = [1,", "-1] if event.key == pygame.K_DOWN and self.snake.direction != [ 0,", "for button in self.active_buttons: if button.hovered: button.click() def gameover_update(self): for", "intro_to_play(self): self.state = \"play\" self.active_buttons = self.playing_buttons def intro_quit(self): self.running", "if event.type == pygame.KEYDOWN: if event.key == pygame.K_ESCAPE: self.running =", "BLACK) self.window.blit(text, (pos[0], pos[1])) def reset(self): # reset the game", "from snake import Snake from food import Food from settings", "in pygame.event.get(): if event.type == pygame.QUIT: self.running = False if", "FPS[0] = 5 def run(self): while self.running: self.events() self.update() self.draw()", "def update(self): if self.state == \"intro\": self.intro_update() if self.state ==", "self.food = Food(self) self.make_buttons() def make_buttons(self): # INTRO PLAY AND", "self.game_over(\"GAME OVER\", [WIDTH - 440, 30]) def game_over(self, text, pos):", "True self.state = \"intro\" self.intro_buttons = [] self.playing_buttons = []", "0 ]: self.snake.direction = [-1, 0] if event.key == pygame.K_RIGHT", "text=\"QUIT\") self.playing_buttons.append(playing_quit_button) # GAMEOVER BUTTON gameover_play_again_button = Button(self, 50, 300,", "= False # checks if a key is pressed down", "import App_window from button import Button from snake import Snake", "self.state == \"play\": self.playing_draw() if self.state == \"dead\": self.gameover_draw() pygame.display.update()", "PLAY_BUTTON_COLOUR, hover_colour=(49, 218, 46), function=self.intro_to_play, text=\"PLAY\") self.intro_buttons.append(intro_play_button) intro_quit_button = Button(self,", "show_text(self, text, pos): text = self.font.render(text, False, BLACK) self.window.blit(text, (pos[0],", "self.gameover_draw() pygame.display.update() # INTRO FUNCTIONS def intro_events(self): for event in", "event.key == pygame.K_ESCAPE: self.running = False if event.key == pygame.K_LEFT", "pygame.time.Clock() self.window = pygame.display.set_mode((WIDTH, HEIGHT)) self.gameover = pygame.font.SysFont(\"Comicsansms\", 90, bold=False,", "self.draw() self.clock.tick(FPS[0]) pygame.quit() sys.exit() def events(self): if self.state == \"intro\":", "if self.state == \"intro\": self.intro_draw() if self.state == \"play\": self.playing_draw()", "self.snake.direction != [ 1, 0 ]: self.snake.direction = [-1, 0]", "in self.active_buttons: if button.hovered: button.click() def gameover_update(self): for button in", "class App: def __init__(self): pygame.init() self.clock = pygame.time.Clock() self.window =", "bold=1) self.running = True self.state = \"intro\" self.intro_buttons = []", "False if event.type == pygame.KEYDOWN and event.key == pygame.K_ESCAPE: self.running", "\"intro\": self.intro_events() if self.state == \"play\": self.playing_events() if self.state ==", "+ str(self.snake.length - 1), [20, 20]) def playing_quit(self): self.running =", "def intro_events(self): for event in pygame.event.get(): if event.type == pygame.QUIT:", "self.playing_draw() if self.state == \"dead\": self.gameover_draw() pygame.display.update() # INTRO FUNCTIONS", "event.type == pygame.KEYDOWN and event.key == pygame.K_ESCAPE: self.running = False", "]: self.snake.direction = [1, 0] if event.key == pygame.K_UP and", "\"play\" self.active_buttons = self.playing_buttons self.snake = Snake(self) FPS[0] = 5", "0, -1 ]: self.snake.direction = [0, 1] if event.type ==", "# GAMEOVER BUTTON gameover_play_again_button = Button(self, 50, 300, WIDTH -", "\"intro\": self.intro_draw() if self.state == \"play\": self.playing_draw() if self.state ==", "== pygame.K_ESCAPE: self.running = False if event.type == pygame.MOUSEBUTTONDOWN: for", "self.gameover_buttons.append(gameover_quit_button) def show_text(self, text, pos): text = self.font.render(text, False, BLACK)", "= [0, 1] if event.type == pygame.MOUSEBUTTONDOWN: for button in", "WIDTH - 100, 50, QUIT_BUTTON_COLOUR, hover_colour=(216, 53, 43), function=self.intro_quit, text=\"QUIT\")", "\" + str(self.snake.length - 1), [20, 20]) def playing_quit(self): self.running", "str(self.snake.length - 1), [20, 20]) def playing_quit(self): self.running = False", "20, bold=1) self.running = True self.state = \"intro\" self.intro_buttons =", "- 100, 50, PLAY_BUTTON_COLOUR, hover_colour=(36, 183, 23), function=self.reset, text=\"PLAY AGAIN\")", "button in self.active_buttons: button.draw() self.show_text(\"Score: \" + str(self.snake.length - 1),", "playing_events(self): for event in pygame.event.get(): if event.type == pygame.QUIT: self.running", "gameover_quit_button = Button(self, 50, HEIGHT - 100, WIDTH - 100,", "def intro_to_play(self): self.state = \"play\" self.active_buttons = self.playing_buttons def intro_quit(self):", "function=self.intro_quit, text=\"QUIT\") self.intro_buttons.append(intro_quit_button) # PLAYING QUIT BUTTON playing_quit_button = Button(self,", "app_window import App_window from button import Button from snake import", "function=self.intro_quit, text=\"QUIT\") self.gameover_buttons.append(gameover_quit_button) def show_text(self, text, pos): text = self.font.render(text,", "self.intro_buttons = [] self.playing_buttons = [] self.gameover_buttons = [] self.active_buttons", "440, 30]) def game_over(self, text, pos): text = self.gameover.render(text, False,", "text=\"PLAY\") self.intro_buttons.append(intro_play_button) intro_quit_button = Button(self, 50, HEIGHT - 100, WIDTH", "self.playing_events() if self.state == \"dead\": self.gameover_events() def update(self): if self.state", "# INTRO PLAY AND QUIT BUTTON intro_play_button = Button(self, 50,", "self.font.render(text, False, BLACK) self.window.blit(text, (pos[0], pos[1])) def reset(self): # reset", "WIDTH - 100, 50, PLAY_BUTTON_COLOUR, hover_colour=(36, 183, 23), function=self.reset, text=\"PLAY", "def intro_update(self): for button in self.active_buttons: button.update() def intro_draw(self): for", "import sys import pygame from app_window import App_window from button", "\"dead\": self.gameover_draw() pygame.display.update() # INTRO FUNCTIONS def intro_events(self): for event", "100, 50, PLAY_BUTTON_COLOUR, hover_colour=(49, 218, 46), function=self.intro_to_play, text=\"PLAY\") self.intro_buttons.append(intro_play_button) intro_quit_button", "button.click() def gameover_update(self): for button in self.active_buttons: button.update() def gameover_draw(self):", "for button in self.active_buttons: button.draw() def intro_to_play(self): self.state = \"play\"", "# INTRO FUNCTIONS def intro_events(self): for event in pygame.event.get(): if", "WIDTH - 100, 50, QUIT_BUTTON_COLOUR, hover_colour=(219, 53, 43), function=self.intro_quit, text=\"QUIT\")", "playing_quit(self): self.running = False # GAMEOVER FUNCTIONS def gameover_events(self): for", "OVER\", [WIDTH - 440, 30]) def game_over(self, text, pos): text", "for button in self.active_buttons: if button.hovered: button.click() def intro_update(self): for", "intro_play_button = Button(self, 50, 300, WIDTH - 100, 50, PLAY_BUTTON_COLOUR,", "2) - 50, 20, 100, 33, QUIT_BUTTON_COLOUR, hover_colour=(219, 53, 43),", "GAMEOVER FUNCTIONS def gameover_events(self): for event in pygame.event.get(): if event.type", "= pygame.time.Clock() self.window = pygame.display.set_mode((WIDTH, HEIGHT)) self.gameover = pygame.font.SysFont(\"Comicsansms\", 90,", "(pos[0], pos[1])) def reset(self): # reset the game self.state =", "Button(self, 50, HEIGHT - 100, WIDTH - 100, 50, QUIT_BUTTON_COLOUR,", "\"play\": self.playing_events() if self.state == \"dead\": self.gameover_events() def update(self): if", "self.running = False if event.type == pygame.MOUSEBUTTONDOWN: for button in", "game_over(self, text, pos): text = self.gameover.render(text, False, RED) self.window.blit(text, (pos[0],", "text = self.font.render(text, False, BLACK) self.window.blit(text, (pos[0], pos[1])) def reset(self):", "in self.active_buttons: button.update() def intro_draw(self): for button in self.active_buttons: button.draw()", "self.snake.direction = [0, 1] if event.type == pygame.MOUSEBUTTONDOWN: for button", "pygame.K_UP and self.snake.direction != [0, 1]: self.snake.direction = [0, -1]", "intro_quit_button = Button(self, 50, HEIGHT - 100, WIDTH - 100,", "self.active_buttons: button.draw() def intro_to_play(self): self.state = \"play\" self.active_buttons = self.playing_buttons", "draw(self): self.window.fill(BG_COL) if self.state == \"intro\": self.intro_draw() if self.state ==", "hover_colour=(219, 53, 43), function=self.intro_quit, text=\"QUIT\") self.intro_buttons.append(intro_quit_button) # PLAYING QUIT BUTTON", "= False if event.type == pygame.MOUSEBUTTONDOWN: for button in self.active_buttons:", "in self.active_buttons: button.update() def gameover_draw(self): for button in self.active_buttons: button.draw()", "]: self.snake.direction = [0, 1] if event.type == pygame.MOUSEBUTTONDOWN: for", "self.app_window.draw() for button in self.active_buttons: button.draw() self.show_text(\"Score: \" + str(self.snake.length", "BLACK, FPS, RED class App: def __init__(self): pygame.init() self.clock =", "playing_draw(self): self.app_window.draw() for button in self.active_buttons: button.draw() self.show_text(\"Score: \" +", "self.intro_buttons self.app_window = App_window(self) self.snake = Snake(self) self.food = Food(self)", "100, WIDTH - 100, 50, QUIT_BUTTON_COLOUR, hover_colour=(216, 53, 43), function=self.intro_quit,", "218, 46), function=self.intro_to_play, text=\"PLAY\") self.intro_buttons.append(intro_play_button) intro_quit_button = Button(self, 50, HEIGHT", "53, 43), function=self.playing_quit, text=\"QUIT\") self.playing_buttons.append(playing_quit_button) # GAMEOVER BUTTON gameover_play_again_button =", "[-1, 0] if event.key == pygame.K_RIGHT and self.snake.direction != [", "Food(self) self.make_buttons() def make_buttons(self): # INTRO PLAY AND QUIT BUTTON", "button.hovered: button.click() def intro_update(self): for button in self.active_buttons: button.update() def", "0 ]: self.snake.direction = [1, 0] if event.key == pygame.K_UP", "reset the game self.state = \"play\" self.active_buttons = self.playing_buttons self.snake", "== pygame.K_DOWN and self.snake.direction != [ 0, -1 ]: self.snake.direction", "= False # PlAY FUNCTIONS def playing_events(self): for event in", "in self.active_buttons: button.update() self.app_window.update() def playing_draw(self): self.app_window.draw() for button in", "\"dead\": self.gameover_update() def draw(self): self.window.fill(BG_COL) if self.state == \"intro\": self.intro_draw()", "self.snake.direction != [ -1, 0 ]: self.snake.direction = [1, 0]", "def intro_draw(self): for button in self.active_buttons: button.draw() def intro_to_play(self): self.state", "== pygame.KEYDOWN: if event.key == pygame.K_ESCAPE: self.running = False if", "if self.state == \"play\": self.playing_events() if self.state == \"dead\": self.gameover_events()", "100, 33, QUIT_BUTTON_COLOUR, hover_colour=(219, 53, 43), function=self.playing_quit, text=\"QUIT\") self.playing_buttons.append(playing_quit_button) #", "self.active_buttons: button.draw() self.show_text(\"Score: \" + str(self.snake.length - 1), [20, 20])", "a key is pressed down if event.type == pygame.KEYDOWN: if", "\"play\": self.playing_draw() if self.state == \"dead\": self.gameover_draw() pygame.display.update() # INTRO", "if event.type == pygame.QUIT: self.running = False # checks if", "[] self.playing_buttons = [] self.gameover_buttons = [] self.active_buttons = self.intro_buttons", "!= [ 0, -1 ]: self.snake.direction = [0, 1] if", "[1, 0] if event.key == pygame.K_UP and self.snake.direction != [0,", "if event.key == pygame.K_RIGHT and self.snake.direction != [ -1, 0", "23), function=self.reset, text=\"PLAY AGAIN\") self.gameover_buttons.append(gameover_play_again_button) gameover_quit_button = Button(self, 50, HEIGHT", "button in self.active_buttons: button.update() def intro_draw(self): for button in self.active_buttons:", "def gameover_draw(self): for button in self.active_buttons: button.draw() self.game_over(\"GAME OVER\", [WIDTH", "self.running = False # PlAY FUNCTIONS def playing_events(self): for event", "button in self.active_buttons: button.update() self.app_window.update() def playing_draw(self): self.app_window.draw() for button", "self.active_buttons: button.draw() self.game_over(\"GAME OVER\", [WIDTH - 440, 30]) def game_over(self,", "50, QUIT_BUTTON_COLOUR, hover_colour=(219, 53, 43), function=self.intro_quit, text=\"QUIT\") self.intro_buttons.append(intro_quit_button) # PLAYING", "gameover_play_again_button = Button(self, 50, 300, WIDTH - 100, 50, PLAY_BUTTON_COLOUR,", "# reset the game self.state = \"play\" self.active_buttons = self.playing_buttons", "self.intro_update() if self.state == \"play\": self.playing_update() if self.state == \"dead\":", "game self.state = \"play\" self.active_buttons = self.playing_buttons self.snake = Snake(self)", "== \"intro\": self.intro_update() if self.state == \"play\": self.playing_update() if self.state", "from button import Button from snake import Snake from food", "self.state = \"play\" self.active_buttons = self.playing_buttons self.snake = Snake(self) FPS[0]", "self.state == \"dead\": self.gameover_events() def update(self): if self.state == \"intro\":", "for button in self.active_buttons: button.update() def gameover_draw(self): for button in", "50, PLAY_BUTTON_COLOUR, hover_colour=(49, 218, 46), function=self.intro_to_play, text=\"PLAY\") self.intro_buttons.append(intro_play_button) intro_quit_button =", "self.active_buttons: if button.hovered: button.click() def playing_update(self): for button in self.active_buttons:", "is pressed down if event.type == pygame.KEYDOWN: if event.key ==", "self.intro_buttons.append(intro_quit_button) # PLAYING QUIT BUTTON playing_quit_button = Button(self, (WIDTH //", "__init__(self): pygame.init() self.clock = pygame.time.Clock() self.window = pygame.display.set_mode((WIDTH, HEIGHT)) self.gameover", "[] self.active_buttons = self.intro_buttons self.app_window = App_window(self) self.snake = Snake(self)", "self.window = pygame.display.set_mode((WIDTH, HEIGHT)) self.gameover = pygame.font.SysFont(\"Comicsansms\", 90, bold=False, italic=True)", "50, PLAY_BUTTON_COLOUR, hover_colour=(36, 183, 23), function=self.reset, text=\"PLAY AGAIN\") self.gameover_buttons.append(gameover_play_again_button) gameover_quit_button", "hover_colour=(36, 183, 23), function=self.reset, text=\"PLAY AGAIN\") self.gameover_buttons.append(gameover_play_again_button) gameover_quit_button = Button(self,", "- 100, WIDTH - 100, 50, QUIT_BUTTON_COLOUR, hover_colour=(216, 53, 43),", "if self.state == \"intro\": self.intro_update() if self.state == \"play\": self.playing_update()", "PLAY_BUTTON_COLOUR, hover_colour=(36, 183, 23), function=self.reset, text=\"PLAY AGAIN\") self.gameover_buttons.append(gameover_play_again_button) gameover_quit_button =", "100, 50, PLAY_BUTTON_COLOUR, hover_colour=(36, 183, 23), function=self.reset, text=\"PLAY AGAIN\") self.gameover_buttons.append(gameover_play_again_button)", "self.active_buttons: button.update() def gameover_draw(self): for button in self.active_buttons: button.draw() self.game_over(\"GAME", "pygame.MOUSEBUTTONDOWN: for button in self.active_buttons: if button.hovered: button.click() def playing_update(self):", "hover_colour=(49, 218, 46), function=self.intro_to_play, text=\"PLAY\") self.intro_buttons.append(intro_play_button) intro_quit_button = Button(self, 50,", "self.running = False if event.key == pygame.K_LEFT and self.snake.direction !=", "if event.key == pygame.K_UP and self.snake.direction != [0, 1]: self.snake.direction", "if event.key == pygame.K_LEFT and self.snake.direction != [ 1, 0", "pygame.K_DOWN and self.snake.direction != [ 0, -1 ]: self.snake.direction =", "= False # GAMEOVER FUNCTIONS def gameover_events(self): for event in", "43), function=self.intro_quit, text=\"QUIT\") self.gameover_buttons.append(gameover_quit_button) def show_text(self, text, pos): text =", "hover_colour=(216, 53, 43), function=self.intro_quit, text=\"QUIT\") self.gameover_buttons.append(gameover_quit_button) def show_text(self, text, pos):", "Snake from food import Food from settings import WIDTH, HEIGHT,", "pygame.QUIT: self.running = False if event.type == pygame.KEYDOWN and event.key", "gameover_events(self): for event in pygame.event.get(): if event.type == pygame.QUIT: self.running", "event.type == pygame.QUIT: self.running = False # checks if a", "= [-1, 0] if event.key == pygame.K_RIGHT and self.snake.direction !=", "pygame.init() self.clock = pygame.time.Clock() self.window = pygame.display.set_mode((WIDTH, HEIGHT)) self.gameover =", "self.active_buttons = self.playing_buttons def intro_quit(self): self.running = False # PlAY", "key is pressed down if event.type == pygame.KEYDOWN: if event.key", "settings import WIDTH, HEIGHT, FONT, BG_COL, QUIT_BUTTON_COLOUR, PLAY_BUTTON_COLOUR, BLACK, FPS,", "== \"play\": self.playing_events() if self.state == \"dead\": self.gameover_events() def update(self):", "playing_update(self): for button in self.active_buttons: button.update() self.app_window.update() def playing_draw(self): self.app_window.draw()", "self.snake = Snake(self) FPS[0] = 5 def run(self): while self.running:", "self.intro_events() if self.state == \"play\": self.playing_events() if self.state == \"dead\":", "= Button(self, (WIDTH // 2) - 50, 20, 100, 33,", "== \"dead\": self.gameover_events() def update(self): if self.state == \"intro\": self.intro_update()", "self.snake.direction = [0, -1] if event.key == pygame.K_DOWN and self.snake.direction", "self.show_text(\"Score: \" + str(self.snake.length - 1), [20, 20]) def playing_quit(self):", "50, 20, 100, 33, QUIT_BUTTON_COLOUR, hover_colour=(219, 53, 43), function=self.playing_quit, text=\"QUIT\")", "self.font = pygame.font.SysFont(FONT, 20, bold=1) self.running = True self.state =", "HEIGHT - 100, WIDTH - 100, 50, QUIT_BUTTON_COLOUR, hover_colour=(219, 53,", "= [] self.playing_buttons = [] self.gameover_buttons = [] self.active_buttons =", "self.snake.direction = [-1, 0] if event.key == pygame.K_RIGHT and self.snake.direction", "\"intro\": self.intro_update() if self.state == \"play\": self.playing_update() if self.state ==", "== pygame.K_LEFT and self.snake.direction != [ 1, 0 ]: self.snake.direction", "// 2) - 50, 20, 100, 33, QUIT_BUTTON_COLOUR, hover_colour=(219, 53,", "in self.active_buttons: if button.hovered: button.click() def intro_update(self): for button in", "1]: self.snake.direction = [0, -1] if event.key == pygame.K_DOWN and", "AND QUIT BUTTON intro_play_button = Button(self, 50, 300, WIDTH -", "intro_update(self): for button in self.active_buttons: button.update() def intro_draw(self): for button", "= App_window(self) self.snake = Snake(self) self.food = Food(self) self.make_buttons() def", "RED class App: def __init__(self): pygame.init() self.clock = pygame.time.Clock() self.window", "run(self): while self.running: self.events() self.update() self.draw() self.clock.tick(FPS[0]) pygame.quit() sys.exit() def", "pos): text = self.font.render(text, False, BLACK) self.window.blit(text, (pos[0], pos[1])) def", "= True self.state = \"intro\" self.intro_buttons = [] self.playing_buttons =", "FONT, BG_COL, QUIT_BUTTON_COLOUR, PLAY_BUTTON_COLOUR, BLACK, FPS, RED class App: def", "= False if event.key == pygame.K_LEFT and self.snake.direction != [", "self.state = \"play\" self.active_buttons = self.playing_buttons def intro_quit(self): self.running =", "- 100, 50, QUIT_BUTTON_COLOUR, hover_colour=(219, 53, 43), function=self.intro_quit, text=\"QUIT\") self.intro_buttons.append(intro_quit_button)", "for button in self.active_buttons: button.draw() self.show_text(\"Score: \" + str(self.snake.length -", "53, 43), function=self.intro_quit, text=\"QUIT\") self.gameover_buttons.append(gameover_quit_button) def show_text(self, text, pos): text", "down if event.type == pygame.KEYDOWN: if event.key == pygame.K_ESCAPE: self.running", "5 def run(self): while self.running: self.events() self.update() self.draw() self.clock.tick(FPS[0]) pygame.quit()", "if button.hovered: button.click() def playing_update(self): for button in self.active_buttons: button.update()", "QUIT_BUTTON_COLOUR, hover_colour=(219, 53, 43), function=self.playing_quit, text=\"QUIT\") self.playing_buttons.append(playing_quit_button) # GAMEOVER BUTTON", "[] self.gameover_buttons = [] self.active_buttons = self.intro_buttons self.app_window = App_window(self)", "self.state = \"intro\" self.intro_buttons = [] self.playing_buttons = [] self.gameover_buttons", "import Snake from food import Food from settings import WIDTH,", "def __init__(self): pygame.init() self.clock = pygame.time.Clock() self.window = pygame.display.set_mode((WIDTH, HEIGHT))", "= \"play\" self.active_buttons = self.playing_buttons def intro_quit(self): self.running = False", "WIDTH, HEIGHT, FONT, BG_COL, QUIT_BUTTON_COLOUR, PLAY_BUTTON_COLOUR, BLACK, FPS, RED class", "BUTTON gameover_play_again_button = Button(self, 50, 300, WIDTH - 100, 50,", "self.app_window.update() def playing_draw(self): self.app_window.draw() for button in self.active_buttons: button.draw() self.show_text(\"Score:", "[0, -1] if event.key == pygame.K_DOWN and self.snake.direction != [", "self.events() self.update() self.draw() self.clock.tick(FPS[0]) pygame.quit() sys.exit() def events(self): if self.state", "[ 1, 0 ]: self.snake.direction = [-1, 0] if event.key", "[ -1, 0 ]: self.snake.direction = [1, 0] if event.key", "make_buttons(self): # INTRO PLAY AND QUIT BUTTON intro_play_button = Button(self,", "Button from snake import Snake from food import Food from", "FUNCTIONS def gameover_events(self): for event in pygame.event.get(): if event.type ==", "pygame.quit() sys.exit() def events(self): if self.state == \"intro\": self.intro_events() if", "self.playing_update() if self.state == \"dead\": self.gameover_update() def draw(self): self.window.fill(BG_COL) if", "= Button(self, 50, HEIGHT - 100, WIDTH - 100, 50,", "event.key == pygame.K_RIGHT and self.snake.direction != [ -1, 0 ]:", "AGAIN\") self.gameover_buttons.append(gameover_play_again_button) gameover_quit_button = Button(self, 50, HEIGHT - 100, WIDTH", "pygame.display.update() # INTRO FUNCTIONS def intro_events(self): for event in pygame.event.get():", "self.update() self.draw() self.clock.tick(FPS[0]) pygame.quit() sys.exit() def events(self): if self.state ==", "for button in self.active_buttons: button.update() def intro_draw(self): for button in", "!= [ 1, 0 ]: self.snake.direction = [-1, 0] if", "= Snake(self) self.food = Food(self) self.make_buttons() def make_buttons(self): # INTRO", "False # checks if a key is pressed down if", "self.state == \"dead\": self.gameover_update() def draw(self): self.window.fill(BG_COL) if self.state ==", "[ 0, -1 ]: self.snake.direction = [0, 1] if event.type", "300, WIDTH - 100, 50, PLAY_BUTTON_COLOUR, hover_colour=(49, 218, 46), function=self.intro_to_play,", "gameover_draw(self): for button in self.active_buttons: button.draw() self.game_over(\"GAME OVER\", [WIDTH -", "QUIT_BUTTON_COLOUR, PLAY_BUTTON_COLOUR, BLACK, FPS, RED class App: def __init__(self): pygame.init()", "\"intro\" self.intro_buttons = [] self.playing_buttons = [] self.gameover_buttons = []", "False, BLACK) self.window.blit(text, (pos[0], pos[1])) def reset(self): # reset the", "from settings import WIDTH, HEIGHT, FONT, BG_COL, QUIT_BUTTON_COLOUR, PLAY_BUTTON_COLOUR, BLACK,", "BUTTON playing_quit_button = Button(self, (WIDTH // 2) - 50, 20,", "53, 43), function=self.intro_quit, text=\"QUIT\") self.intro_buttons.append(intro_quit_button) # PLAYING QUIT BUTTON playing_quit_button", "function=self.playing_quit, text=\"QUIT\") self.playing_buttons.append(playing_quit_button) # GAMEOVER BUTTON gameover_play_again_button = Button(self, 50,", "Button(self, 50, 300, WIDTH - 100, 50, PLAY_BUTTON_COLOUR, hover_colour=(49, 218,", "if event.type == pygame.MOUSEBUTTONDOWN: for button in self.active_buttons: if button.hovered:", "self.snake.direction != [0, 1]: self.snake.direction = [0, -1] if event.key", "= Button(self, 50, 300, WIDTH - 100, 50, PLAY_BUTTON_COLOUR, hover_colour=(36,", "def reset(self): # reset the game self.state = \"play\" self.active_buttons", "intro_draw(self): for button in self.active_buttons: button.draw() def intro_to_play(self): self.state =", "in self.active_buttons: button.draw() def intro_to_play(self): self.state = \"play\" self.active_buttons =", "checks if a key is pressed down if event.type ==", "event.type == pygame.MOUSEBUTTONDOWN: for button in self.active_buttons: if button.hovered: button.click()", "= [1, 0] if event.key == pygame.K_UP and self.snake.direction !=", "== pygame.K_UP and self.snake.direction != [0, 1]: self.snake.direction = [0,", "= self.intro_buttons self.app_window = App_window(self) self.snake = Snake(self) self.food =", "events(self): if self.state == \"intro\": self.intro_events() if self.state == \"play\":", "self.playing_buttons def intro_quit(self): self.running = False # PlAY FUNCTIONS def", "pygame.KEYDOWN and event.key == pygame.K_ESCAPE: self.running = False if event.type", "def gameover_events(self): for event in pygame.event.get(): if event.type == pygame.QUIT:", "sys.exit() def events(self): if self.state == \"intro\": self.intro_events() if self.state", "self.running = False # checks if a key is pressed", "[WIDTH - 440, 30]) def game_over(self, text, pos): text =", "- 1), [20, 20]) def playing_quit(self): self.running = False #", "button.update() self.app_window.update() def playing_draw(self): self.app_window.draw() for button in self.active_buttons: button.draw()", "if a key is pressed down if event.type == pygame.KEYDOWN:", "Snake(self) FPS[0] = 5 def run(self): while self.running: self.events() self.update()", "FUNCTIONS def intro_events(self): for event in pygame.event.get(): if event.type ==", "PlAY FUNCTIONS def playing_events(self): for event in pygame.event.get(): if event.type", "def playing_events(self): for event in pygame.event.get(): if event.type == pygame.QUIT:", "pygame.QUIT: self.running = False # checks if a key is", "self.gameover_buttons = [] self.active_buttons = self.intro_buttons self.app_window = App_window(self) self.snake", "# GAMEOVER FUNCTIONS def gameover_events(self): for event in pygame.event.get(): if", "43), function=self.intro_quit, text=\"QUIT\") self.intro_buttons.append(intro_quit_button) # PLAYING QUIT BUTTON playing_quit_button =", "self.clock = pygame.time.Clock() self.window = pygame.display.set_mode((WIDTH, HEIGHT)) self.gameover = pygame.font.SysFont(\"Comicsansms\",", "HEIGHT)) self.gameover = pygame.font.SysFont(\"Comicsansms\", 90, bold=False, italic=True) self.font = pygame.font.SysFont(FONT,", "50, HEIGHT - 100, WIDTH - 100, 50, QUIT_BUTTON_COLOUR, hover_colour=(216,", "and self.snake.direction != [ 1, 0 ]: self.snake.direction = [-1,", "- 100, 50, QUIT_BUTTON_COLOUR, hover_colour=(216, 53, 43), function=self.intro_quit, text=\"QUIT\") self.gameover_buttons.append(gameover_quit_button)", "App_window from button import Button from snake import Snake from", "if self.state == \"intro\": self.intro_events() if self.state == \"play\": self.playing_events()", "button.draw() self.show_text(\"Score: \" + str(self.snake.length - 1), [20, 20]) def", "text=\"QUIT\") self.gameover_buttons.append(gameover_quit_button) def show_text(self, text, pos): text = self.font.render(text, False,", "if self.state == \"dead\": self.gameover_events() def update(self): if self.state ==", "= self.playing_buttons def intro_quit(self): self.running = False # PlAY FUNCTIONS", "Snake(self) self.food = Food(self) self.make_buttons() def make_buttons(self): # INTRO PLAY" ]
[ "= fix_bill_id(data['identifier']) data['legislative_session_id'] = self.get_session_id(data.pop('legislative_session')) if data['from_organization']: data['from_organization_id'] = self.org_importer.resolve_json_id(", "= {'actions'} def __init__(self, jurisdiction_id, org_importer, person_importer): super(BillImporter, self).__init__(jurisdiction_id) self.org_importer", "fix_bill_id(data['identifier']) data['legislative_session_id'] = self.get_session_id(data.pop('legislative_session')) if data['from_organization']: data['from_organization_id'] = self.org_importer.resolve_json_id( data.pop('from_organization'))", "for sponsor in data['sponsorships']: if 'person_id' in sponsor: sponsor['person_id'] =", "class BillImporter(BaseImporter): _type = 'bill' model_class = Bill related_models =", "= 'bill' model_class = Bill related_models = {'abstracts': (BillAbstract, 'bill_id',", "self.org_importer.resolve_json_id( sponsor['organization_id'], allow_no_match=True) return data def postimport(self): # go through", "= self.org_importer.resolve_json_id( data.pop('from_organization')) for action in data['actions']: action['organization_id'] = self.org_importer.resolve_json_id(", "BillTitle, BillIdentifier, BillAction, BillActionRelatedEntity, BillSponsorship, BillSource, BillDocument, BillVersion, BillDocumentLink, BillVersionLink)", "'documents__links', ).get(**spec) def limit_spec(self, spec): spec['legislative_session__jurisdiction_id'] = self.jurisdiction_id return spec", "len(candidates) > 1: # pragma: no cover # if we", "super(BillImporter, self).__init__(jurisdiction_id) self.org_importer = org_importer self.person_importer = person_importer def get_object(self,", "'organization_id' in sponsor: sponsor['organization_id'] = self.org_importer.resolve_json_id( sponsor['organization_id'], allow_no_match=True) return data", "= list(Bill.objects.filter( legislative_session__identifier=rb.legislative_session, legislative_session__jurisdiction_id=self.jurisdiction_id, identifier=rb.identifier) ) if len(candidates) == 1:", "jurisdiction and # are currently unresolved for rb in RelatedBill.objects.filter(", "bill['from_organization_id'] return self.model_class.objects.prefetch_related('actions__related_entities', 'versions__links', 'documents__links', ).get(**spec) def limit_spec(self, spec): spec['legislative_session__jurisdiction_id']", "and # are currently unresolved for rb in RelatedBill.objects.filter( bill__legislative_session__jurisdiction_id=self.jurisdiction_id,", "if we ever see this, we need to add additional", "rb.related_bill = candidates[0] rb.save() elif len(candidates) > 1: # pragma:", "postimport(self): # go through all RelatedBill objs that are attached", "= org_importer self.person_importer = person_importer def get_object(self, bill): spec =", "sponsor: sponsor['person_id'] = self.person_importer.resolve_json_id( sponsor['person_id'], allow_no_match=True) if 'organization_id' in sponsor:", "'bill_id', {}), 'sources': (BillSource, 'bill_id', {}), 'documents': (BillDocument, 'bill_id', {", "= person_importer def get_object(self, bill): spec = { 'legislative_session_id': bill['legislative_session_id'],", "'person_id' in sponsor: sponsor['person_id'] = self.person_importer.resolve_json_id( sponsor['person_id'], allow_no_match=True) if 'organization_id'", "this jurisdiction and # are currently unresolved for rb in", "action['related_entities']: if 'organization_id' in entity: entity['organization_id'] = self.org_importer.resolve_json_id( entity['organization_id']) elif", "'bill' model_class = Bill related_models = {'abstracts': (BillAbstract, 'bill_id', {}),", "bill['legislative_session_id'], 'identifier': bill['identifier'], } if 'from_organization_id' in bill: spec['from_organization_id'] =", "data['from_organization']: data['from_organization_id'] = self.org_importer.resolve_json_id( data.pop('from_organization')) for action in data['actions']: action['organization_id']", "'sources': (BillSource, 'bill_id', {}), 'documents': (BillDocument, 'bill_id', { 'links': (BillDocumentLink,", "a bill in this jurisdiction and # are currently unresolved", "if 'person_id' in sponsor: sponsor['person_id'] = self.person_importer.resolve_json_id( sponsor['person_id'], allow_no_match=True) if", "from opencivicdata.legislative.models import (Bill, RelatedBill, BillAbstract, BillTitle, BillIdentifier, BillAction, BillActionRelatedEntity,", "{ 'links': (BillDocumentLink, 'document_id', {})}), 'versions': (BillVersion, 'bill_id', { 'links':", "= { 'legislative_session_id': bill['legislative_session_id'], 'identifier': bill['identifier'], } if 'from_organization_id' in", "{ 'related_entities': (BillActionRelatedEntity, 'action_id', {})}), 'related_bills': (RelatedBill, 'bill_id', {}), 'sponsorships':", "(BillDocument, 'bill_id', { 'links': (BillDocumentLink, 'document_id', {})}), 'versions': (BillVersion, 'bill_id',", "(BillTitle, 'bill_id', {}), 'other_identifiers': (BillIdentifier, 'bill_id', {}), 'actions': (BillAction, 'bill_id',", "= self.jurisdiction_id return spec def prepare_for_db(self, data): data['identifier'] = fix_bill_id(data['identifier'])", "'identifier': bill['identifier'], } if 'from_organization_id' in bill: spec['from_organization_id'] = bill['from_organization_id']", "{ 'links': (BillVersionLink, 'version_id', {})}), } preserve_order = {'actions'} def", "we ever see this, we need to add additional fields", "self.model_class.objects.prefetch_related('actions__related_entities', 'versions__links', 'documents__links', ).get(**spec) def limit_spec(self, spec): spec['legislative_session__jurisdiction_id'] = self.jurisdiction_id", "entity: entity['person_id'] = self.person_importer.resolve_json_id( entity['person_id']) for sponsor in data['sponsorships']: if", "candidates = list(Bill.objects.filter( legislative_session__identifier=rb.legislative_session, legislative_session__jurisdiction_id=self.jurisdiction_id, identifier=rb.identifier) ) if len(candidates) ==", "entity in action['related_entities']: if 'organization_id' in entity: entity['organization_id'] = self.org_importer.resolve_json_id(", "bill__legislative_session__jurisdiction_id=self.jurisdiction_id, related_bill=None): candidates = list(Bill.objects.filter( legislative_session__identifier=rb.legislative_session, legislative_session__jurisdiction_id=self.jurisdiction_id, identifier=rb.identifier) ) if", "if 'organization_id' in entity: entity['organization_id'] = self.org_importer.resolve_json_id( entity['organization_id']) elif 'person_id'", "return self.model_class.objects.prefetch_related('actions__related_entities', 'versions__links', 'documents__links', ).get(**spec) def limit_spec(self, spec): spec['legislative_session__jurisdiction_id'] =", "import fix_bill_id from opencivicdata.legislative.models import (Bill, RelatedBill, BillAbstract, BillTitle, BillIdentifier,", "{'abstracts': (BillAbstract, 'bill_id', {}), 'other_titles': (BillTitle, 'bill_id', {}), 'other_identifiers': (BillIdentifier,", "currently unresolved for rb in RelatedBill.objects.filter( bill__legislative_session__jurisdiction_id=self.jurisdiction_id, related_bill=None): candidates =", "through all RelatedBill objs that are attached to a bill", "bill['identifier'], } if 'from_organization_id' in bill: spec['from_organization_id'] = bill['from_organization_id'] return", "'person_id' in entity: entity['person_id'] = self.person_importer.resolve_json_id( entity['person_id']) for sponsor in", "BillVersionLink) from .base import BaseImporter from ..exceptions import PupaInternalError class", "data['sponsorships']: if 'person_id' in sponsor: sponsor['person_id'] = self.person_importer.resolve_json_id( sponsor['person_id'], allow_no_match=True)", "person_importer def get_object(self, bill): spec = { 'legislative_session_id': bill['legislative_session_id'], 'identifier':", "rb.save() elif len(candidates) > 1: # pragma: no cover #", "{}), 'actions': (BillAction, 'bill_id', { 'related_entities': (BillActionRelatedEntity, 'action_id', {})}), 'related_bills':", "(BillSource, 'bill_id', {}), 'documents': (BillDocument, 'bill_id', { 'links': (BillDocumentLink, 'document_id',", "'bill_id', { 'links': (BillVersionLink, 'version_id', {})}), } preserve_order = {'actions'}", "legislative_session__jurisdiction_id=self.jurisdiction_id, identifier=rb.identifier) ) if len(candidates) == 1: rb.related_bill = candidates[0]", "def prepare_for_db(self, data): data['identifier'] = fix_bill_id(data['identifier']) data['legislative_session_id'] = self.get_session_id(data.pop('legislative_session')) if", "elif len(candidates) > 1: # pragma: no cover # if", "self.get_session_id(data.pop('legislative_session')) if data['from_organization']: data['from_organization_id'] = self.org_importer.resolve_json_id( data.pop('from_organization')) for action in", "self.org_importer.resolve_json_id( entity['organization_id']) elif 'person_id' in entity: entity['person_id'] = self.person_importer.resolve_json_id( entity['person_id'])", "} if 'from_organization_id' in bill: spec['from_organization_id'] = bill['from_organization_id'] return self.model_class.objects.prefetch_related('actions__related_entities',", "action['organization_id']) for entity in action['related_entities']: if 'organization_id' in entity: entity['organization_id']", "{}), 'documents': (BillDocument, 'bill_id', { 'links': (BillDocumentLink, 'document_id', {})}), 'versions':", "preserve_order = {'actions'} def __init__(self, jurisdiction_id, org_importer, person_importer): super(BillImporter, self).__init__(jurisdiction_id)", "data['identifier'] = fix_bill_id(data['identifier']) data['legislative_session_id'] = self.get_session_id(data.pop('legislative_session')) if data['from_organization']: data['from_organization_id'] =", "self.org_importer.resolve_json_id( action['organization_id']) for entity in action['related_entities']: if 'organization_id' in entity:", "== 1: rb.related_bill = candidates[0] rb.save() elif len(candidates) > 1:", "RelatedBill, BillAbstract, BillTitle, BillIdentifier, BillAction, BillActionRelatedEntity, BillSponsorship, BillSource, BillDocument, BillVersion,", "in action['related_entities']: if 'organization_id' in entity: entity['organization_id'] = self.org_importer.resolve_json_id( entity['organization_id'])", "BillSponsorship, BillSource, BillDocument, BillVersion, BillDocumentLink, BillVersionLink) from .base import BaseImporter", "(BillAction, 'bill_id', { 'related_entities': (BillActionRelatedEntity, 'action_id', {})}), 'related_bills': (RelatedBill, 'bill_id',", "'related_bills': (RelatedBill, 'bill_id', {}), 'sponsorships': (BillSponsorship, 'bill_id', {}), 'sources': (BillSource,", "'from_organization_id' in bill: spec['from_organization_id'] = bill['from_organization_id'] return self.model_class.objects.prefetch_related('actions__related_entities', 'versions__links', 'documents__links',", "pupa.utils import fix_bill_id from opencivicdata.legislative.models import (Bill, RelatedBill, BillAbstract, BillTitle,", "rb in RelatedBill.objects.filter( bill__legislative_session__jurisdiction_id=self.jurisdiction_id, related_bill=None): candidates = list(Bill.objects.filter( legislative_session__identifier=rb.legislative_session, legislative_session__jurisdiction_id=self.jurisdiction_id,", "= bill['from_organization_id'] return self.model_class.objects.prefetch_related('actions__related_entities', 'versions__links', 'documents__links', ).get(**spec) def limit_spec(self, spec):", "def limit_spec(self, spec): spec['legislative_session__jurisdiction_id'] = self.jurisdiction_id return spec def prepare_for_db(self,", ") if len(candidates) == 1: rb.related_bill = candidates[0] rb.save() elif", "'related_entities': (BillActionRelatedEntity, 'action_id', {})}), 'related_bills': (RelatedBill, 'bill_id', {}), 'sponsorships': (BillSponsorship,", "= self.get_session_id(data.pop('legislative_session')) if data['from_organization']: data['from_organization_id'] = self.org_importer.resolve_json_id( data.pop('from_organization')) for action", "cover # if we ever see this, we need to", "person_importer): super(BillImporter, self).__init__(jurisdiction_id) self.org_importer = org_importer self.person_importer = person_importer def", "if len(candidates) == 1: rb.related_bill = candidates[0] rb.save() elif len(candidates)", "= self.person_importer.resolve_json_id( entity['person_id']) for sponsor in data['sponsorships']: if 'person_id' in", "no cover # if we ever see this, we need", "prepare_for_db(self, data): data['identifier'] = fix_bill_id(data['identifier']) data['legislative_session_id'] = self.get_session_id(data.pop('legislative_session')) if data['from_organization']:", "identifier=rb.identifier) ) if len(candidates) == 1: rb.related_bill = candidates[0] rb.save()", "in entity: entity['organization_id'] = self.org_importer.resolve_json_id( entity['organization_id']) elif 'person_id' in entity:", "BillActionRelatedEntity, BillSponsorship, BillSource, BillDocument, BillVersion, BillDocumentLink, BillVersionLink) from .base import", "model_class = Bill related_models = {'abstracts': (BillAbstract, 'bill_id', {}), 'other_titles':", "= self.org_importer.resolve_json_id( action['organization_id']) for entity in action['related_entities']: if 'organization_id' in", "'bill_id', {}), 'documents': (BillDocument, 'bill_id', { 'links': (BillDocumentLink, 'document_id', {})}),", "{}), 'sponsorships': (BillSponsorship, 'bill_id', {}), 'sources': (BillSource, 'bill_id', {}), 'documents':", "to a bill in this jurisdiction and # are currently", "ever see this, we need to add additional fields on", "{}), 'sources': (BillSource, 'bill_id', {}), 'documents': (BillDocument, 'bill_id', { 'links':", "in sponsor: sponsor['person_id'] = self.person_importer.resolve_json_id( sponsor['person_id'], allow_no_match=True) if 'organization_id' in", "= self.org_importer.resolve_json_id( sponsor['organization_id'], allow_no_match=True) return data def postimport(self): # go", "import PupaInternalError class BillImporter(BaseImporter): _type = 'bill' model_class = Bill", "(RelatedBill, 'bill_id', {}), 'sponsorships': (BillSponsorship, 'bill_id', {}), 'sources': (BillSource, 'bill_id',", "list(Bill.objects.filter( legislative_session__identifier=rb.legislative_session, legislative_session__jurisdiction_id=self.jurisdiction_id, identifier=rb.identifier) ) if len(candidates) == 1: rb.related_bill", "bill): spec = { 'legislative_session_id': bill['legislative_session_id'], 'identifier': bill['identifier'], } if", "entity: entity['organization_id'] = self.org_importer.resolve_json_id( entity['organization_id']) elif 'person_id' in entity: entity['person_id']", "= Bill related_models = {'abstracts': (BillAbstract, 'bill_id', {}), 'other_titles': (BillTitle,", "'bill_id', { 'related_entities': (BillActionRelatedEntity, 'action_id', {})}), 'related_bills': (RelatedBill, 'bill_id', {}),", "'links': (BillDocumentLink, 'document_id', {})}), 'versions': (BillVersion, 'bill_id', { 'links': (BillVersionLink,", "jurisdiction_id, org_importer, person_importer): super(BillImporter, self).__init__(jurisdiction_id) self.org_importer = org_importer self.person_importer =", "to add additional fields on the relation raise PupaInternalError('multiple related_bill", "in sponsor: sponsor['organization_id'] = self.org_importer.resolve_json_id( sponsor['organization_id'], allow_no_match=True) return data def", "_type = 'bill' model_class = Bill related_models = {'abstracts': (BillAbstract,", "sponsor['organization_id'], allow_no_match=True) return data def postimport(self): # go through all", "add additional fields on the relation raise PupaInternalError('multiple related_bill candidates", "# pragma: no cover # if we ever see this,", "data): data['identifier'] = fix_bill_id(data['identifier']) data['legislative_session_id'] = self.get_session_id(data.pop('legislative_session')) if data['from_organization']: data['from_organization_id']", "in bill: spec['from_organization_id'] = bill['from_organization_id'] return self.model_class.objects.prefetch_related('actions__related_entities', 'versions__links', 'documents__links', ).get(**spec)", "} preserve_order = {'actions'} def __init__(self, jurisdiction_id, org_importer, person_importer): super(BillImporter,", "__init__(self, jurisdiction_id, org_importer, person_importer): super(BillImporter, self).__init__(jurisdiction_id) self.org_importer = org_importer self.person_importer", "# go through all RelatedBill objs that are attached to", "org_importer, person_importer): super(BillImporter, self).__init__(jurisdiction_id) self.org_importer = org_importer self.person_importer = person_importer", "def postimport(self): # go through all RelatedBill objs that are", "= {'abstracts': (BillAbstract, 'bill_id', {}), 'other_titles': (BillTitle, 'bill_id', {}), 'other_identifiers':", "BillDocumentLink, BillVersionLink) from .base import BaseImporter from ..exceptions import PupaInternalError", "self.person_importer.resolve_json_id( entity['person_id']) for sponsor in data['sponsorships']: if 'person_id' in sponsor:", "'bill_id', {}), 'other_titles': (BillTitle, 'bill_id', {}), 'other_identifiers': (BillIdentifier, 'bill_id', {}),", "{}), 'other_titles': (BillTitle, 'bill_id', {}), 'other_identifiers': (BillIdentifier, 'bill_id', {}), 'actions':", "from ..exceptions import PupaInternalError class BillImporter(BaseImporter): _type = 'bill' model_class", "spec['from_organization_id'] = bill['from_organization_id'] return self.model_class.objects.prefetch_related('actions__related_entities', 'versions__links', 'documents__links', ).get(**spec) def limit_spec(self,", "for entity in action['related_entities']: if 'organization_id' in entity: entity['organization_id'] =", "'other_titles': (BillTitle, 'bill_id', {}), 'other_identifiers': (BillIdentifier, 'bill_id', {}), 'actions': (BillAction,", "(BillVersion, 'bill_id', { 'links': (BillVersionLink, 'version_id', {})}), } preserve_order =", "if data['from_organization']: data['from_organization_id'] = self.org_importer.resolve_json_id( data.pop('from_organization')) for action in data['actions']:", "action['organization_id'] = self.org_importer.resolve_json_id( action['organization_id']) for entity in action['related_entities']: if 'organization_id'", "1: # pragma: no cover # if we ever see", "'version_id', {})}), } preserve_order = {'actions'} def __init__(self, jurisdiction_id, org_importer,", "{ 'legislative_session_id': bill['legislative_session_id'], 'identifier': bill['identifier'], } if 'from_organization_id' in bill:", "opencivicdata.legislative.models import (Bill, RelatedBill, BillAbstract, BillTitle, BillIdentifier, BillAction, BillActionRelatedEntity, BillSponsorship,", "1: rb.related_bill = candidates[0] rb.save() elif len(candidates) > 1: #", "sponsor['person_id'], allow_no_match=True) if 'organization_id' in sponsor: sponsor['organization_id'] = self.org_importer.resolve_json_id( sponsor['organization_id'],", "candidates[0] rb.save() elif len(candidates) > 1: # pragma: no cover", "sponsor: sponsor['organization_id'] = self.org_importer.resolve_json_id( sponsor['organization_id'], allow_no_match=True) return data def postimport(self):", "self).__init__(jurisdiction_id) self.org_importer = org_importer self.person_importer = person_importer def get_object(self, bill):", "entity['person_id']) for sponsor in data['sponsorships']: if 'person_id' in sponsor: sponsor['person_id']", "'organization_id' in entity: entity['organization_id'] = self.org_importer.resolve_json_id( entity['organization_id']) elif 'person_id' in", "in this jurisdiction and # are currently unresolved for rb", "from .base import BaseImporter from ..exceptions import PupaInternalError class BillImporter(BaseImporter):", "Bill related_models = {'abstracts': (BillAbstract, 'bill_id', {}), 'other_titles': (BillTitle, 'bill_id',", "'action_id', {})}), 'related_bills': (RelatedBill, 'bill_id', {}), 'sponsorships': (BillSponsorship, 'bill_id', {}),", "legislative_session__identifier=rb.legislative_session, legislative_session__jurisdiction_id=self.jurisdiction_id, identifier=rb.identifier) ) if len(candidates) == 1: rb.related_bill =", "need to add additional fields on the relation raise PupaInternalError('multiple", "'legislative_session_id': bill['legislative_session_id'], 'identifier': bill['identifier'], } if 'from_organization_id' in bill: spec['from_organization_id']", "data['from_organization_id'] = self.org_importer.resolve_json_id( data.pop('from_organization')) for action in data['actions']: action['organization_id'] =", "data.pop('from_organization')) for action in data['actions']: action['organization_id'] = self.org_importer.resolve_json_id( action['organization_id']) for", "{})}), 'related_bills': (RelatedBill, 'bill_id', {}), 'sponsorships': (BillSponsorship, 'bill_id', {}), 'sources':", "spec = { 'legislative_session_id': bill['legislative_session_id'], 'identifier': bill['identifier'], } if 'from_organization_id'", "data['actions']: action['organization_id'] = self.org_importer.resolve_json_id( action['organization_id']) for entity in action['related_entities']: if", "additional fields on the relation raise PupaInternalError('multiple related_bill candidates found", "bill: spec['from_organization_id'] = bill['from_organization_id'] return self.model_class.objects.prefetch_related('actions__related_entities', 'versions__links', 'documents__links', ).get(**spec) def", "entity['person_id'] = self.person_importer.resolve_json_id( entity['person_id']) for sponsor in data['sponsorships']: if 'person_id'", "= candidates[0] rb.save() elif len(candidates) > 1: # pragma: no", "'versions__links', 'documents__links', ).get(**spec) def limit_spec(self, spec): spec['legislative_session__jurisdiction_id'] = self.jurisdiction_id return", "return data def postimport(self): # go through all RelatedBill objs", "in data['actions']: action['organization_id'] = self.org_importer.resolve_json_id( action['organization_id']) for entity in action['related_entities']:", "if 'from_organization_id' in bill: spec['from_organization_id'] = bill['from_organization_id'] return self.model_class.objects.prefetch_related('actions__related_entities', 'versions__links',", "# are currently unresolved for rb in RelatedBill.objects.filter( bill__legislative_session__jurisdiction_id=self.jurisdiction_id, related_bill=None):", "spec): spec['legislative_session__jurisdiction_id'] = self.jurisdiction_id return spec def prepare_for_db(self, data): data['identifier']", "get_object(self, bill): spec = { 'legislative_session_id': bill['legislative_session_id'], 'identifier': bill['identifier'], }", "BillImporter(BaseImporter): _type = 'bill' model_class = Bill related_models = {'abstracts':", "are currently unresolved for rb in RelatedBill.objects.filter( bill__legislative_session__jurisdiction_id=self.jurisdiction_id, related_bill=None): candidates", "> 1: # pragma: no cover # if we ever", "'bill_id', {}), 'actions': (BillAction, 'bill_id', { 'related_entities': (BillActionRelatedEntity, 'action_id', {})}),", "# if we ever see this, we need to add", ".base import BaseImporter from ..exceptions import PupaInternalError class BillImporter(BaseImporter): _type", "BillDocument, BillVersion, BillDocumentLink, BillVersionLink) from .base import BaseImporter from ..exceptions", "'other_identifiers': (BillIdentifier, 'bill_id', {}), 'actions': (BillAction, 'bill_id', { 'related_entities': (BillActionRelatedEntity,", "(BillIdentifier, 'bill_id', {}), 'actions': (BillAction, 'bill_id', { 'related_entities': (BillActionRelatedEntity, 'action_id',", "in entity: entity['person_id'] = self.person_importer.resolve_json_id( entity['person_id']) for sponsor in data['sponsorships']:", "spec['legislative_session__jurisdiction_id'] = self.jurisdiction_id return spec def prepare_for_db(self, data): data['identifier'] =", "PupaInternalError class BillImporter(BaseImporter): _type = 'bill' model_class = Bill related_models", "sponsor in data['sponsorships']: if 'person_id' in sponsor: sponsor['person_id'] = self.person_importer.resolve_json_id(", "self.person_importer = person_importer def get_object(self, bill): spec = { 'legislative_session_id':", "org_importer self.person_importer = person_importer def get_object(self, bill): spec = {", "RelatedBill.objects.filter( bill__legislative_session__jurisdiction_id=self.jurisdiction_id, related_bill=None): candidates = list(Bill.objects.filter( legislative_session__identifier=rb.legislative_session, legislative_session__jurisdiction_id=self.jurisdiction_id, identifier=rb.identifier) )", "def __init__(self, jurisdiction_id, org_importer, person_importer): super(BillImporter, self).__init__(jurisdiction_id) self.org_importer = org_importer", "BillVersion, BillDocumentLink, BillVersionLink) from .base import BaseImporter from ..exceptions import", "related_bill=None): candidates = list(Bill.objects.filter( legislative_session__identifier=rb.legislative_session, legislative_session__jurisdiction_id=self.jurisdiction_id, identifier=rb.identifier) ) if len(candidates)", "'bill_id', { 'links': (BillDocumentLink, 'document_id', {})}), 'versions': (BillVersion, 'bill_id', {", "'links': (BillVersionLink, 'version_id', {})}), } preserve_order = {'actions'} def __init__(self,", "import (Bill, RelatedBill, BillAbstract, BillTitle, BillIdentifier, BillAction, BillActionRelatedEntity, BillSponsorship, BillSource,", "return spec def prepare_for_db(self, data): data['identifier'] = fix_bill_id(data['identifier']) data['legislative_session_id'] =", "elif 'person_id' in entity: entity['person_id'] = self.person_importer.resolve_json_id( entity['person_id']) for sponsor", "BaseImporter from ..exceptions import PupaInternalError class BillImporter(BaseImporter): _type = 'bill'", "= self.person_importer.resolve_json_id( sponsor['person_id'], allow_no_match=True) if 'organization_id' in sponsor: sponsor['organization_id'] =", "RelatedBill objs that are attached to a bill in this", "bill in this jurisdiction and # are currently unresolved for", "'bill_id', {}), 'other_identifiers': (BillIdentifier, 'bill_id', {}), 'actions': (BillAction, 'bill_id', {", "in RelatedBill.objects.filter( bill__legislative_session__jurisdiction_id=self.jurisdiction_id, related_bill=None): candidates = list(Bill.objects.filter( legislative_session__identifier=rb.legislative_session, legislative_session__jurisdiction_id=self.jurisdiction_id, identifier=rb.identifier)", "entity['organization_id']) elif 'person_id' in entity: entity['person_id'] = self.person_importer.resolve_json_id( entity['person_id']) for", "self.org_importer.resolve_json_id( data.pop('from_organization')) for action in data['actions']: action['organization_id'] = self.org_importer.resolve_json_id( action['organization_id'])", "(BillDocumentLink, 'document_id', {})}), 'versions': (BillVersion, 'bill_id', { 'links': (BillVersionLink, 'version_id',", "<filename>pupa/importers/bills.py<gh_stars>1-10 from pupa.utils import fix_bill_id from opencivicdata.legislative.models import (Bill, RelatedBill,", "def get_object(self, bill): spec = { 'legislative_session_id': bill['legislative_session_id'], 'identifier': bill['identifier'],", "for action in data['actions']: action['organization_id'] = self.org_importer.resolve_json_id( action['organization_id']) for entity", "BillIdentifier, BillAction, BillActionRelatedEntity, BillSponsorship, BillSource, BillDocument, BillVersion, BillDocumentLink, BillVersionLink) from", "self.jurisdiction_id return spec def prepare_for_db(self, data): data['identifier'] = fix_bill_id(data['identifier']) data['legislative_session_id']", "if 'organization_id' in sponsor: sponsor['organization_id'] = self.org_importer.resolve_json_id( sponsor['organization_id'], allow_no_match=True) return", "'documents': (BillDocument, 'bill_id', { 'links': (BillDocumentLink, 'document_id', {})}), 'versions': (BillVersion,", "'document_id', {})}), 'versions': (BillVersion, 'bill_id', { 'links': (BillVersionLink, 'version_id', {})}),", "objs that are attached to a bill in this jurisdiction", "self.org_importer = org_importer self.person_importer = person_importer def get_object(self, bill): spec", "= self.org_importer.resolve_json_id( entity['organization_id']) elif 'person_id' in entity: entity['person_id'] = self.person_importer.resolve_json_id(", "BillAbstract, BillTitle, BillIdentifier, BillAction, BillActionRelatedEntity, BillSponsorship, BillSource, BillDocument, BillVersion, BillDocumentLink,", "entity['organization_id'] = self.org_importer.resolve_json_id( entity['organization_id']) elif 'person_id' in entity: entity['person_id'] =", "len(candidates) == 1: rb.related_bill = candidates[0] rb.save() elif len(candidates) >", "this, we need to add additional fields on the relation", "BillSource, BillDocument, BillVersion, BillDocumentLink, BillVersionLink) from .base import BaseImporter from", "fix_bill_id from opencivicdata.legislative.models import (Bill, RelatedBill, BillAbstract, BillTitle, BillIdentifier, BillAction,", "attached to a bill in this jurisdiction and # are", "import BaseImporter from ..exceptions import PupaInternalError class BillImporter(BaseImporter): _type =", "all RelatedBill objs that are attached to a bill in", "in data['sponsorships']: if 'person_id' in sponsor: sponsor['person_id'] = self.person_importer.resolve_json_id( sponsor['person_id'],", "{})}), 'versions': (BillVersion, 'bill_id', { 'links': (BillVersionLink, 'version_id', {})}), }", "BillAction, BillActionRelatedEntity, BillSponsorship, BillSource, BillDocument, BillVersion, BillDocumentLink, BillVersionLink) from .base", "'bill_id', {}), 'sponsorships': (BillSponsorship, 'bill_id', {}), 'sources': (BillSource, 'bill_id', {}),", "for rb in RelatedBill.objects.filter( bill__legislative_session__jurisdiction_id=self.jurisdiction_id, related_bill=None): candidates = list(Bill.objects.filter( legislative_session__identifier=rb.legislative_session,", "we need to add additional fields on the relation raise", "'sponsorships': (BillSponsorship, 'bill_id', {}), 'sources': (BillSource, 'bill_id', {}), 'documents': (BillDocument,", "go through all RelatedBill objs that are attached to a", "allow_no_match=True) if 'organization_id' in sponsor: sponsor['organization_id'] = self.org_importer.resolve_json_id( sponsor['organization_id'], allow_no_match=True)", "fields on the relation raise PupaInternalError('multiple related_bill candidates found for", "data def postimport(self): # go through all RelatedBill objs that", "(BillActionRelatedEntity, 'action_id', {})}), 'related_bills': (RelatedBill, 'bill_id', {}), 'sponsorships': (BillSponsorship, 'bill_id',", "limit_spec(self, spec): spec['legislative_session__jurisdiction_id'] = self.jurisdiction_id return spec def prepare_for_db(self, data):", "related_models = {'abstracts': (BillAbstract, 'bill_id', {}), 'other_titles': (BillTitle, 'bill_id', {}),", "pragma: no cover # if we ever see this, we", ").get(**spec) def limit_spec(self, spec): spec['legislative_session__jurisdiction_id'] = self.jurisdiction_id return spec def", "from pupa.utils import fix_bill_id from opencivicdata.legislative.models import (Bill, RelatedBill, BillAbstract,", "..exceptions import PupaInternalError class BillImporter(BaseImporter): _type = 'bill' model_class =", "spec def prepare_for_db(self, data): data['identifier'] = fix_bill_id(data['identifier']) data['legislative_session_id'] = self.get_session_id(data.pop('legislative_session'))", "{})}), } preserve_order = {'actions'} def __init__(self, jurisdiction_id, org_importer, person_importer):", "data['legislative_session_id'] = self.get_session_id(data.pop('legislative_session')) if data['from_organization']: data['from_organization_id'] = self.org_importer.resolve_json_id( data.pop('from_organization')) for", "{}), 'other_identifiers': (BillIdentifier, 'bill_id', {}), 'actions': (BillAction, 'bill_id', { 'related_entities':", "sponsor['person_id'] = self.person_importer.resolve_json_id( sponsor['person_id'], allow_no_match=True) if 'organization_id' in sponsor: sponsor['organization_id']", "sponsor['organization_id'] = self.org_importer.resolve_json_id( sponsor['organization_id'], allow_no_match=True) return data def postimport(self): #", "action in data['actions']: action['organization_id'] = self.org_importer.resolve_json_id( action['organization_id']) for entity in", "{'actions'} def __init__(self, jurisdiction_id, org_importer, person_importer): super(BillImporter, self).__init__(jurisdiction_id) self.org_importer =", "(BillVersionLink, 'version_id', {})}), } preserve_order = {'actions'} def __init__(self, jurisdiction_id,", "'actions': (BillAction, 'bill_id', { 'related_entities': (BillActionRelatedEntity, 'action_id', {})}), 'related_bills': (RelatedBill,", "unresolved for rb in RelatedBill.objects.filter( bill__legislative_session__jurisdiction_id=self.jurisdiction_id, related_bill=None): candidates = list(Bill.objects.filter(", "allow_no_match=True) return data def postimport(self): # go through all RelatedBill", "(BillAbstract, 'bill_id', {}), 'other_titles': (BillTitle, 'bill_id', {}), 'other_identifiers': (BillIdentifier, 'bill_id',", "are attached to a bill in this jurisdiction and #", "that are attached to a bill in this jurisdiction and", "self.person_importer.resolve_json_id( sponsor['person_id'], allow_no_match=True) if 'organization_id' in sponsor: sponsor['organization_id'] = self.org_importer.resolve_json_id(", "(Bill, RelatedBill, BillAbstract, BillTitle, BillIdentifier, BillAction, BillActionRelatedEntity, BillSponsorship, BillSource, BillDocument,", "see this, we need to add additional fields on the", "'versions': (BillVersion, 'bill_id', { 'links': (BillVersionLink, 'version_id', {})}), } preserve_order", "(BillSponsorship, 'bill_id', {}), 'sources': (BillSource, 'bill_id', {}), 'documents': (BillDocument, 'bill_id',", "on the relation raise PupaInternalError('multiple related_bill candidates found for {}'.format(rb))" ]
[ "def predict_Quora_test_data(n_models,nb_words,nlp_f,test_data_1,test_data_2): models=[] n_h_features=nlp_f.shape[1] print('loading the models...') for i in", "np from utilities.tools import load_model import pandas as pd def", "models.append(load_model(i+1,nb_words,n_h_features)) preds=[] print('predicting the test data...\\n') i=0 for m in", "def predict_MSRP_test_data(n_models,nb_words,nlp_f,test_data_1,test_data_2,test_labels): models=[] n_h_features=nlp_f.shape[1] print('loading the models...') for i in", "of ',len(test_data_1)) print(\"test data accuracy: \", accuracy_score(final_labels,test_labels)) print(\"test data f_measure:", "= pd.DataFrame({\"Quality\": final_labels}) submission.to_csv(\"predictions/MSRP.tsv\", index=True,index_label='test_id') def predict_Quora_test_data(n_models,nb_words,nlp_f,test_data_1,test_data_2): models=[] n_h_features=nlp_f.shape[1] print('loading", "as pd def predict_MSRP_test_data(n_models,nb_words,nlp_f,test_data_1,test_data_2,test_labels): models=[] n_h_features=nlp_f.shape[1] print('loading the models...') for", "print('making the sumbission file') submission = pd.DataFrame({\"is_duplicate\": final_labels}) submission.to_csv(\"predictions/Quora.tsv\", index=True,index_label='test_id')", "print(i ,' out of ',len(test_data_1)) print('making the sumbission file') submission", "final_labels}) submission.to_csv(\"predictions/MSRP.tsv\", index=True,index_label='test_id') def predict_Quora_test_data(n_models,nb_words,nlp_f,test_data_1,test_data_2): models=[] n_h_features=nlp_f.shape[1] print('loading the models...')", "f1_score,accuracy_score import numpy as np from utilities.tools import load_model import", "i%10000==0: print(i ,' out of ',len(test_data_1)) print('making the sumbission file')", "test_labels)) submission = pd.DataFrame({\"Quality\": final_labels}) submission.to_csv(\"predictions/MSRP.tsv\", index=True,index_label='test_id') def predict_Quora_test_data(n_models,nb_words,nlp_f,test_data_1,test_data_2): models=[]", "i=0 for m in models: i+=1 preds_prob=m.predict([test_data_1, test_data_2,nlp_f], batch_size=64, verbose=0)", "the test data...\\n') i=0 for m in models: i+=1 preds_prob=m.predict([test_data_1,", "preds_prob=m.predict([test_data_1, test_data_2,nlp_f], batch_size=64, verbose=0) preds.append(preds_prob[:,1]) preds=np.asarray(preds) final_labels=np.zeros(len(test_data_1),dtype=int) #average the predicttion", "data f_measure: \", f1_score(final_labels, test_labels)) submission = pd.DataFrame({\"Quality\": final_labels}) submission.to_csv(\"predictions/MSRP.tsv\",", "models=[] n_h_features=nlp_f.shape[1] print('loading the models...') for i in range(n_models): models.append(load_model(i+1,nb_words,n_h_features))", "',len(test_data_1)) print('making the sumbission file') submission = pd.DataFrame({\"is_duplicate\": final_labels}) submission.to_csv(\"predictions/Quora.tsv\",", "models...') for i in range(n_models): models.append(load_model(i+1,nb_words,n_h_features)) preds=[] print('predicting the test", "for i in range(len(test_data_1)): final_labels[i]=round(np.mean(preds[:,i])) if i%100==0: print(i ,' out", "import pandas as pd def predict_MSRP_test_data(n_models,nb_words,nlp_f,test_data_1,test_data_2,test_labels): models=[] n_h_features=nlp_f.shape[1] print('loading the", "test data...\\n') i=0 for m in models: i+=1 preds_prob=m.predict([test_data_1, test_data_2,nlp_f],", "for m in models: i+=1 preds_prob=m.predict([test_data_1, test_data_2,nlp_f], batch_size=125, verbose=0) preds.append(preds_prob[:,1])", "out of ',len(test_data_1)) print(\"test data accuracy: \", accuracy_score(final_labels,test_labels)) print(\"test data", "f1_score(final_labels, test_labels)) submission = pd.DataFrame({\"Quality\": final_labels}) submission.to_csv(\"predictions/MSRP.tsv\", index=True,index_label='test_id') def predict_Quora_test_data(n_models,nb_words,nlp_f,test_data_1,test_data_2):", "predict_Quora_test_data(n_models,nb_words,nlp_f,test_data_1,test_data_2): models=[] n_h_features=nlp_f.shape[1] print('loading the models...') for i in range(n_models):", "load_model import pandas as pd def predict_MSRP_test_data(n_models,nb_words,nlp_f,test_data_1,test_data_2,test_labels): models=[] n_h_features=nlp_f.shape[1] print('loading", "print(\"test data accuracy: \", accuracy_score(final_labels,test_labels)) print(\"test data f_measure: \", f1_score(final_labels,", "test_data_2,nlp_f], batch_size=125, verbose=0) preds.append(preds_prob[:,1]) preds=np.asarray(preds) final_labels=np.zeros(len(test_data_1),dtype=float) #average the predicttion for", "\", accuracy_score(final_labels,test_labels)) print(\"test data f_measure: \", f1_score(final_labels, test_labels)) submission =", "import f1_score,accuracy_score import numpy as np from utilities.tools import load_model", "import numpy as np from utilities.tools import load_model import pandas", "print('loading the models...') for i in range(n_models): models.append(load_model(i+1,nb_words,n_h_features)) preds=[] print('predicting", "preds=np.asarray(preds) final_labels=np.zeros(len(test_data_1),dtype=int) #average the predicttion for i in range(len(test_data_1)): final_labels[i]=round(np.mean(preds[:,i]))", "the models...') for i in range(n_models): models.append(load_model(i+1,nb_words,n_h_features)) preds=[] print('predicting the", "in range(len(test_data_1)): final_labels[i]=round(np.mean(preds[:,i])) if i%100==0: print(i ,' out of ',len(test_data_1))", "print(\"test data f_measure: \", f1_score(final_labels, test_labels)) submission = pd.DataFrame({\"Quality\": final_labels})", "i in range(len(test_data_1)): final_labels[i]=np.mean(preds[:,i]) if i%10000==0: print(i ,' out of", "utilities.tools import load_model import pandas as pd def predict_MSRP_test_data(n_models,nb_words,nlp_f,test_data_1,test_data_2,test_labels): models=[]", "from utilities.tools import load_model import pandas as pd def predict_MSRP_test_data(n_models,nb_words,nlp_f,test_data_1,test_data_2,test_labels):", "preds_prob=m.predict([test_data_1, test_data_2,nlp_f], batch_size=125, verbose=0) preds.append(preds_prob[:,1]) preds=np.asarray(preds) final_labels=np.zeros(len(test_data_1),dtype=float) #average the predicttion", "if i%100==0: print(i ,' out of ',len(test_data_1)) print(\"test data accuracy:", "submission = pd.DataFrame({\"Quality\": final_labels}) submission.to_csv(\"predictions/MSRP.tsv\", index=True,index_label='test_id') def predict_Quora_test_data(n_models,nb_words,nlp_f,test_data_1,test_data_2): models=[] n_h_features=nlp_f.shape[1]", "batch_size=125, verbose=0) preds.append(preds_prob[:,1]) preds=np.asarray(preds) final_labels=np.zeros(len(test_data_1),dtype=float) #average the predicttion for i", "final_labels[i]=round(np.mean(preds[:,i])) if i%100==0: print(i ,' out of ',len(test_data_1)) print(\"test data", "predicttion for i in range(len(test_data_1)): final_labels[i]=np.mean(preds[:,i]) if i%10000==0: print(i ,'", "from sklearn.metrics import f1_score,accuracy_score import numpy as np from utilities.tools", "out of ',len(test_data_1)) print('making the sumbission file') submission = pd.DataFrame({\"is_duplicate\":", "preds.append(preds_prob[:,1]) preds=np.asarray(preds) final_labels=np.zeros(len(test_data_1),dtype=int) #average the predicttion for i in range(len(test_data_1)):", "#average the predicttion for i in range(len(test_data_1)): final_labels[i]=round(np.mean(preds[:,i])) if i%100==0:", "verbose=0) preds.append(preds_prob[:,1]) preds=np.asarray(preds) final_labels=np.zeros(len(test_data_1),dtype=float) #average the predicttion for i in", "i+=1 preds_prob=m.predict([test_data_1, test_data_2,nlp_f], batch_size=125, verbose=0) preds.append(preds_prob[:,1]) preds=np.asarray(preds) final_labels=np.zeros(len(test_data_1),dtype=float) #average the", "accuracy: \", accuracy_score(final_labels,test_labels)) print(\"test data f_measure: \", f1_score(final_labels, test_labels)) submission", "data accuracy: \", accuracy_score(final_labels,test_labels)) print(\"test data f_measure: \", f1_score(final_labels, test_labels))", "the predicttion for i in range(len(test_data_1)): final_labels[i]=np.mean(preds[:,i]) if i%10000==0: print(i", "i in range(len(test_data_1)): final_labels[i]=round(np.mean(preds[:,i])) if i%100==0: print(i ,' out of", "m in models: i+=1 preds_prob=m.predict([test_data_1, test_data_2,nlp_f], batch_size=64, verbose=0) preds.append(preds_prob[:,1]) preds=np.asarray(preds)", "as np from utilities.tools import load_model import pandas as pd", ",' out of ',len(test_data_1)) print(\"test data accuracy: \", accuracy_score(final_labels,test_labels)) print(\"test", "i+=1 preds_prob=m.predict([test_data_1, test_data_2,nlp_f], batch_size=64, verbose=0) preds.append(preds_prob[:,1]) preds=np.asarray(preds) final_labels=np.zeros(len(test_data_1),dtype=int) #average the", "sklearn.metrics import f1_score,accuracy_score import numpy as np from utilities.tools import", "range(len(test_data_1)): final_labels[i]=round(np.mean(preds[:,i])) if i%100==0: print(i ,' out of ',len(test_data_1)) print(\"test", "submission.to_csv(\"predictions/MSRP.tsv\", index=True,index_label='test_id') def predict_Quora_test_data(n_models,nb_words,nlp_f,test_data_1,test_data_2): models=[] n_h_features=nlp_f.shape[1] print('loading the models...') for", "pd.DataFrame({\"Quality\": final_labels}) submission.to_csv(\"predictions/MSRP.tsv\", index=True,index_label='test_id') def predict_Quora_test_data(n_models,nb_words,nlp_f,test_data_1,test_data_2): models=[] n_h_features=nlp_f.shape[1] print('loading the", "if i%10000==0: print(i ,' out of ',len(test_data_1)) print('making the sumbission", "final_labels[i]=np.mean(preds[:,i]) if i%10000==0: print(i ,' out of ',len(test_data_1)) print('making the", "m in models: i+=1 preds_prob=m.predict([test_data_1, test_data_2,nlp_f], batch_size=125, verbose=0) preds.append(preds_prob[:,1]) preds=np.asarray(preds)", "verbose=0) preds.append(preds_prob[:,1]) preds=np.asarray(preds) final_labels=np.zeros(len(test_data_1),dtype=int) #average the predicttion for i in", "print('predicting the test data...\\n') i=0 for m in models: i+=1", "final_labels=np.zeros(len(test_data_1),dtype=float) #average the predicttion for i in range(len(test_data_1)): final_labels[i]=np.mean(preds[:,i]) if", "the predicttion for i in range(len(test_data_1)): final_labels[i]=round(np.mean(preds[:,i])) if i%100==0: print(i", "i in range(n_models): models.append(load_model(i+1,nb_words,n_h_features)) preds=[] print('predicting the test data...\\n') i=0", "in range(n_models): models.append(load_model(i+1,nb_words,n_h_features)) preds=[] print('predicting the test data...\\n') i=0 for", "in models: i+=1 preds_prob=m.predict([test_data_1, test_data_2,nlp_f], batch_size=125, verbose=0) preds.append(preds_prob[:,1]) preds=np.asarray(preds) final_labels=np.zeros(len(test_data_1),dtype=float)", "preds=np.asarray(preds) final_labels=np.zeros(len(test_data_1),dtype=float) #average the predicttion for i in range(len(test_data_1)): final_labels[i]=np.mean(preds[:,i])", ",' out of ',len(test_data_1)) print('making the sumbission file') submission =", "of ',len(test_data_1)) print('making the sumbission file') submission = pd.DataFrame({\"is_duplicate\": final_labels})", "final_labels=np.zeros(len(test_data_1),dtype=int) #average the predicttion for i in range(len(test_data_1)): final_labels[i]=round(np.mean(preds[:,i])) if", "import load_model import pandas as pd def predict_MSRP_test_data(n_models,nb_words,nlp_f,test_data_1,test_data_2,test_labels): models=[] n_h_features=nlp_f.shape[1]", "accuracy_score(final_labels,test_labels)) print(\"test data f_measure: \", f1_score(final_labels, test_labels)) submission = pd.DataFrame({\"Quality\":", "numpy as np from utilities.tools import load_model import pandas as", "models: i+=1 preds_prob=m.predict([test_data_1, test_data_2,nlp_f], batch_size=64, verbose=0) preds.append(preds_prob[:,1]) preds=np.asarray(preds) final_labels=np.zeros(len(test_data_1),dtype=int) #average", "for i in range(n_models): models.append(load_model(i+1,nb_words,n_h_features)) preds=[] print('predicting the test data...\\n')", "n_h_features=nlp_f.shape[1] print('loading the models...') for i in range(n_models): models.append(load_model(i+1,nb_words,n_h_features)) preds=[]", "pd def predict_MSRP_test_data(n_models,nb_words,nlp_f,test_data_1,test_data_2,test_labels): models=[] n_h_features=nlp_f.shape[1] print('loading the models...') for i", "f_measure: \", f1_score(final_labels, test_labels)) submission = pd.DataFrame({\"Quality\": final_labels}) submission.to_csv(\"predictions/MSRP.tsv\", index=True,index_label='test_id')", "i=0 for m in models: i+=1 preds_prob=m.predict([test_data_1, test_data_2,nlp_f], batch_size=125, verbose=0)", "for m in models: i+=1 preds_prob=m.predict([test_data_1, test_data_2,nlp_f], batch_size=64, verbose=0) preds.append(preds_prob[:,1])", "#average the predicttion for i in range(len(test_data_1)): final_labels[i]=np.mean(preds[:,i]) if i%10000==0:", "pandas as pd def predict_MSRP_test_data(n_models,nb_words,nlp_f,test_data_1,test_data_2,test_labels): models=[] n_h_features=nlp_f.shape[1] print('loading the models...')", "for i in range(len(test_data_1)): final_labels[i]=np.mean(preds[:,i]) if i%10000==0: print(i ,' out", "predicttion for i in range(len(test_data_1)): final_labels[i]=round(np.mean(preds[:,i])) if i%100==0: print(i ,'", "predict_MSRP_test_data(n_models,nb_words,nlp_f,test_data_1,test_data_2,test_labels): models=[] n_h_features=nlp_f.shape[1] print('loading the models...') for i in range(n_models):", "\", f1_score(final_labels, test_labels)) submission = pd.DataFrame({\"Quality\": final_labels}) submission.to_csv(\"predictions/MSRP.tsv\", index=True,index_label='test_id') def", "preds.append(preds_prob[:,1]) preds=np.asarray(preds) final_labels=np.zeros(len(test_data_1),dtype=float) #average the predicttion for i in range(len(test_data_1)):", "batch_size=64, verbose=0) preds.append(preds_prob[:,1]) preds=np.asarray(preds) final_labels=np.zeros(len(test_data_1),dtype=int) #average the predicttion for i", "in models: i+=1 preds_prob=m.predict([test_data_1, test_data_2,nlp_f], batch_size=64, verbose=0) preds.append(preds_prob[:,1]) preds=np.asarray(preds) final_labels=np.zeros(len(test_data_1),dtype=int)", "i%100==0: print(i ,' out of ',len(test_data_1)) print(\"test data accuracy: \",", "data...\\n') i=0 for m in models: i+=1 preds_prob=m.predict([test_data_1, test_data_2,nlp_f], batch_size=64,", "preds=[] print('predicting the test data...\\n') i=0 for m in models:", "range(len(test_data_1)): final_labels[i]=np.mean(preds[:,i]) if i%10000==0: print(i ,' out of ',len(test_data_1)) print('making", "index=True,index_label='test_id') def predict_Quora_test_data(n_models,nb_words,nlp_f,test_data_1,test_data_2): models=[] n_h_features=nlp_f.shape[1] print('loading the models...') for i", "',len(test_data_1)) print(\"test data accuracy: \", accuracy_score(final_labels,test_labels)) print(\"test data f_measure: \",", "in range(len(test_data_1)): final_labels[i]=np.mean(preds[:,i]) if i%10000==0: print(i ,' out of ',len(test_data_1))", "models: i+=1 preds_prob=m.predict([test_data_1, test_data_2,nlp_f], batch_size=125, verbose=0) preds.append(preds_prob[:,1]) preds=np.asarray(preds) final_labels=np.zeros(len(test_data_1),dtype=float) #average", "range(n_models): models.append(load_model(i+1,nb_words,n_h_features)) preds=[] print('predicting the test data...\\n') i=0 for m", "print(i ,' out of ',len(test_data_1)) print(\"test data accuracy: \", accuracy_score(final_labels,test_labels))", "data...\\n') i=0 for m in models: i+=1 preds_prob=m.predict([test_data_1, test_data_2,nlp_f], batch_size=125,", "test_data_2,nlp_f], batch_size=64, verbose=0) preds.append(preds_prob[:,1]) preds=np.asarray(preds) final_labels=np.zeros(len(test_data_1),dtype=int) #average the predicttion for" ]
[ "all # lie within [-0.1, 1] ax1.set_xlim([-0.1, 1]) # The", "join(figures_dir, \"Clustered {}.png\".format(title)) fig.tight_layout() try: fig.savefig(figure_fp, transparent=transparent) except ValueError: logging.warning(traceback.format_exc())", "y_b, names_a, names_b, n_a=None, n_b=None, figsize=None, output_dir=None, alabel=\"socio-demographic\", blabel=\"purchases\", transparent=False,", "i, and sort them ith_cluster_silhouette_values = np.sort(silhouette_values[cluster_labels == k]) size_cluster_i", "== len(y_b) assert len(names_a) == n_a assert len(names_b) == n_b", "coefficient can range from -1, 1 but here all #", "plot # The silhouette coefficient can range from -1, 1", "y_upper), 0, ith_cluster_silhouette_values, facecolor=color, edgecolor=color, alpha=default_alpha, ) # Label the", "+ 1, X[y == i].shape) ) continue xtext, ytext =", "os.path import exists, join from textwrap import fill import matplotlib.patheffects", "silhouette plot # The silhouette coefficient can range from -1,", "coding=utf-8 import logging import traceback from os import makedirs from", "as np import seaborn as sns from koino.plot import big_square,", "cluster_names is None: cluster_names = [\"Cluster {}\".format(i + 1) for", "2nd Plot showing the actual clusters formed colors = cm.spectral(cluster_labels.astype(float)", "fill import matplotlib.patheffects as PathEffects import matplotlib.pyplot as plt import", "lw=0, s=20, c=palette[i], # label=cluster_names[i]) ax.set_title(title) ax.scatter(X[:, 0], X[:, 1],", "assert len(names_a) == n_a assert len(names_b) == n_b a_sets =", "np.array(sns.color_palette(\"hls\", n_clusters)) fig, ax = plt.subplots(figsize=big_square) # for i in", "range(n_clusters)]) # Choose a color palette with seaborn. palette =", "ax1.fill_betweenx( np.arange(y_lower, y_upper), 0, ith_cluster_silhouette_values, facecolor=color, edgecolor=color, alpha=default_alpha, ) #", "coefficient values\") ax1.set_ylabel(\"Cluster label\") # The vertical line for average", ":2]) if not len(samples): logging.warning( \"Probably singular cluster {} (shape:{})\".format(i", "ax1.set_ylabel(\"Cluster label\") # The vertical line for average silhouette score", "(shape:{})\".format(i + 1, X[y == i].shape) ) continue xtext, ytext", "= ax.text(xtext, ytext, name, fontsize=20, wrap=True, ha=\"left\") txt.set_path_effects( [PathEffects.Stroke(linewidth=5, foreground=\"w\"),", "ax = plt.subplots(figsize=figsize) plt.title(\"Overlap between {} and {} clusters\".format(alabel, blabel))", "to fix cluster centroid coordinates.\"\"\" if cluster_names is None: cluster_names", "0.5 * size_cluster_i, str(k)) # Compute the new y_lower for", "fig.savefig(figure_fp, transparent=transparent) except ValueError: logging.warning(traceback.format_exc()) finally: plt.close() plt.clf() def overlap_jaccard(", "lw=0, alpha=default_alpha, c=colors) ax2.set_title(\"The visualization of the clustered data.\") ax2.set_xlabel(\"Feature", "# coding=utf-8 import logging import traceback from os import makedirs", "coordinates.\"\"\" if cluster_names is None: cluster_names = [\"Cluster {}\".format(i +", "them clearly. ax1.set_ylim([0, len(X) + (n_clusters + 1) * 10])", "space for the 2nd feature\") plt.suptitle( (\"Silhouette analysis for KMeans", "sns.heatmap( inter_sets, annot=True, fmt=\"6.0f\", ax=ax, square=True, xticklabels=names_a, yticklabels=names_b, ) plt.tight_layout()", "y = np.hstack([y[y == i] for i in range(n_clusters)]) #", ") plt.savefig(figure_fp) plt.close() plt.clf() def plot_cluster_assignments( X, y, n_clusters, figures_dir,", "assert np.isfinite(xtext) assert np.isfinite(ytext) txt = ax.text(xtext, ytext, name, fontsize=20,", "(n_a or n_b) or not output_dir: return elif output_dir and", "len(indx) == len(y_a) == len(y_b) assert len(names_a) == n_a assert", "from os import makedirs from os.path import exists, join from", "X = np.vstack([X[y == i] for i in range(n_clusters)]) y", "k in range(n_clusters): # Aggregate the silhouette scores for samples", "centroid coordinates.\"\"\" if cluster_names is None: cluster_names = [\"Cluster {}\".format(i", "k]) size_cluster_i = ith_cluster_silhouette_values.shape[0] y_upper = y_lower + size_cluster_i color", "size_cluster_i = ith_cluster_silhouette_values.shape[0] y_upper = y_lower + size_cluster_i color =", "0.2, 0.4, 0.6, 0.8, 1]) # Construct cluster # 2nd", "inserting blank space between silhouette # plots of individual clusters,", "= ith_cluster_silhouette_values.shape[0] y_upper = y_lower + size_cluster_i color = cm.spectral(float(k)", "or median to fix cluster centroid coordinates.\"\"\" if cluster_names is", "n_b a_sets = [set(indx[y_a == i]) for i in range(n_a)]", "fontsize=14, fontweight=\"bold\", ) plt.savefig(figure_fp) plt.close() plt.clf() def plot_cluster_assignments( X, y,", "not output_dir: return elif output_dir and not exists(output_dir): makedirs(output_dir) else:", "a color palette with seaborn. palette = np.array(sns.color_palette(\"hls\", n_clusters)) fig,", "ytext, name, fontsize=20, wrap=True, ha=\"left\") txt.set_path_effects( [PathEffects.Stroke(linewidth=5, foreground=\"w\"), PathEffects.Normal()] )", "b_sets], dtype=np.int_ ) fig, ax = plt.subplots(figsize=figsize) plt.title(\"Overlap between {}", "as sns from koino.plot import big_square, default_alpha from matplotlib import", "def plot_cluster_assignments( X, y, n_clusters, figures_dir, transparent=False, cluster_names=None, title=\"\" ):", "square=True, xticklabels=names_a, yticklabels=names_b, ) plt.tight_layout() jaccard_path = join(output_dir, \"Clusters Jaccard.png\")", "each cluster. for i in range(n_clusters): # Position of each", "for the 2nd feature\") plt.suptitle( (\"Silhouette analysis for KMeans \"", "# The (n_clusters+1)*10 is for inserting blank space between silhouette", "color=\"red\", linestyle=\"--\") ax1.set_yticks([]) # Clear the yaxis labels / ticks", "for the various clusters.\") ax1.set_xlabel(\"The silhouette coefficient values\") ax1.set_ylabel(\"Cluster label\")", "# Clear the yaxis labels / ticks ax1.set_xticks([-0.1, 0, 0.2,", "plt.subplots(figsize=figsize) plt.title(\"Overlap between {} and {} clusters\".format(alabel, blabel)) sns.heatmap( inter_sets,", "the silhouette scores for samples belonging to # cluster i,", "the silhouette plots with their cluster numbers at the #", "logging import traceback from os import makedirs from os.path import", "axis=0) name = fill(cluster_names[i], width=20) assert np.isfinite(xtext) assert np.isfinite(ytext) txt", "+ 1) for i in range(n_clusters)] # We first reorder", "and 2 columns fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(26,", "is the silhouette plot # The silhouette coefficient can range", "ytext = np.median(samples, axis=0) name = fill(cluster_names[i], width=20) assert np.isfinite(xtext)", "\"Clustered {}.png\".format(title)) fig.tight_layout() try: fig.savefig(figure_fp, transparent=transparent) except ValueError: logging.warning(traceback.format_exc()) finally:", "ax1.set_xlim([-0.1, 1]) # The (n_clusters+1)*10 is for inserting blank space", "yticklabels=names_b, ) plt.tight_layout() inter_path = join(output_dir, \"Clusters Intersection.png\") plt.savefig(inter_path, transparent=transparent)", "txt.set_path_effects( [PathEffects.Stroke(linewidth=5, foreground=\"w\"), PathEffects.Normal()] ) # plt.legend() figure_fp = join(figures_dir,", "range(n_a)] b_sets = [set(indx[y_b == i]) for i in range(n_b)]", "ith_cluster_silhouette_values = np.sort(silhouette_values[cluster_labels == k]) size_cluster_i = ith_cluster_silhouette_values.shape[0] y_upper =", "contingency tables based on set intersection and jaccard score. #", "jac_arr, annot=True, fmt=\".3f\", ax=ax, square=True, xticklabels=names_a, yticklabels=names_b, ) plt.tight_layout() jaccard_path", "0], X[:, 1], lw=0, s=20, c=palette[y.astype(np.int)]) ax.axis(\"off\") # Add the", "range(n_clusters)] # We first reorder the data points according to", "Choose a color palette with seaborn. palette = np.array(sns.color_palette(\"hls\", n_clusters))", "the 0 samples ax1.set_title(\"The silhouette plot for the various clusters.\")", "silhouette scores for samples belonging to # cluster i, and", "len(y_a) == len(y_b) assert len(names_a) == n_a assert len(names_b) ==", "square=True, xticklabels=names_a, yticklabels=names_b, ) plt.tight_layout() inter_path = join(output_dir, \"Clusters Intersection.png\")", "data points according to the centroids labels X = np.vstack([X[y", "X, figure_fp, n_clusters, silhouette_values, cluster_labels, silhouette_avg ): # Create a", "ax.scatter(X[:, 0], X[:, 1], lw=0, s=20, c=palette[y.astype(np.int)]) ax.axis(\"off\") # Add", "{} and {} clusters\".format(alabel, blabel)) sns.heatmap( inter_sets, annot=True, fmt=\"6.0f\", ax=ax,", "makedirs from os.path import exists, join from textwrap import fill", "ValueError: logging.warning(traceback.format_exc()) finally: plt.close() plt.clf() def overlap_jaccard( indx, y_a, y_b,", "space between silhouette # plots of individual clusters, to demarcate", "i in range(n_clusters): # Position of each label. samples =", "feature\") plt.suptitle( (\"Silhouette analysis for KMeans \" \"with n_clusters =", "in range(n_b)] inter_sets = np.asarray( [[len(set_a & set_t) for set_a", "sns.heatmap( jac_arr, annot=True, fmt=\".3f\", ax=ax, square=True, xticklabels=names_a, yticklabels=names_b, ) plt.tight_layout()", "0], X[mask, 1], lw=0, s=20, c=palette[i], # label=cluster_names[i]) ax.set_title(title) ax.scatter(X[:,", "finally: plt.close() plt.clf() def overlap_jaccard( indx, y_a, y_b, names_a, names_b,", "set_b) for set_a in a_sets] for set_b in b_sets], dtype=np.float_,", "sort them ith_cluster_silhouette_values = np.sort(silhouette_values[cluster_labels == k]) size_cluster_i = ith_cluster_silhouette_values.shape[0]", "= plt.subplots(figsize=figsize) plt.title(\"Jaccard scores between {} and {} clusters\".format(alabel, blabel))", "not len(samples): logging.warning( \"Probably singular cluster {} (shape:{})\".format(i + 1,", "ax.set_title(title) ax.scatter(X[:, 0], X[:, 1], lw=0, s=20, c=palette[y.astype(np.int)]) ax.axis(\"off\") #", "foreground=\"w\"), PathEffects.Normal()] ) # plt.legend() figure_fp = join(figures_dir, \"Clustered {}.png\".format(title))", "PathEffects import matplotlib.pyplot as plt import numpy as np import", "fontweight=\"bold\", ) plt.savefig(figure_fp) plt.close() plt.clf() def plot_cluster_assignments( X, y, n_clusters,", "a_sets] for set_t in b_sets], dtype=np.int_ ) fig, ax =", "import exists, join from textwrap import fill import matplotlib.patheffects as", "n_b=None, figsize=None, output_dir=None, alabel=\"socio-demographic\", blabel=\"purchases\", transparent=False, ): \"\"\"Compute and plot", "range(n_clusters)]) y = np.hstack([y[y == i] for i in range(n_clusters)])", "blabel)) sns.heatmap( inter_sets, annot=True, fmt=\"6.0f\", ax=ax, square=True, xticklabels=names_a, yticklabels=names_b, )", "len(names_a) == n_a assert len(names_b) == n_b a_sets = [set(indx[y_a", "i in range(n_clusters): # mask = y == i #", "and n_b assert len(indx) == len(y_a) == len(y_b) assert len(names_a)", "(ax1, ax2) = plt.subplots(1, 2, figsize=(26, 10)) # The 1st", "as plt import numpy as np import seaborn as sns", "range(n_clusters): # Aggregate the silhouette scores for samples belonging to", "# label=cluster_names[i]) ax.set_title(title) ax.scatter(X[:, 0], X[:, 1], lw=0, s=20, c=palette[y.astype(np.int)])", "= fill(cluster_names[i], width=20) assert np.isfinite(xtext) assert np.isfinite(ytext) txt = ax.text(xtext,", "i]) for i in range(n_a)] b_sets = [set(indx[y_b == i])", "y_lower = y_upper + 10 # 10 for the 0", "1st subplot is the silhouette plot # The silhouette coefficient", "label\") # The vertical line for average silhouette score of", "X[mask, 1], lw=0, s=20, c=palette[i], # label=cluster_names[i]) ax.set_title(title) ax.scatter(X[:, 0],", "# Position of each label. samples = np.atleast_2d(X[y == i,", "import matplotlib.patheffects as PathEffects import matplotlib.pyplot as plt import numpy", "the 1st feature\") ax2.set_ylabel(\"Feature space for the 2nd feature\") plt.suptitle(", "plt.tight_layout() inter_path = join(output_dir, \"Clusters Intersection.png\") plt.savefig(inter_path, transparent=transparent) plt.close() plt.clf()", "2 columns fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(26, 10))", "{} clusters\".format(alabel, blabel)) sns.heatmap( inter_sets, annot=True, fmt=\"6.0f\", ax=ax, square=True, xticklabels=names_a,", "values\") ax1.set_ylabel(\"Cluster label\") # The vertical line for average silhouette", "s=20, lw=0, alpha=default_alpha, c=colors) ax2.set_title(\"The visualization of the clustered data.\")", "& set_t) for set_a in a_sets] for set_t in b_sets],", "palette = np.array(sns.color_palette(\"hls\", n_clusters)) fig, ax = plt.subplots(figsize=big_square) # for", "a_sets] for set_b in b_sets], dtype=np.float_, ) fig, ax =", "10 for the 0 samples ax1.set_title(\"The silhouette plot for the", "? \"\"\" if not (n_a or n_b) or not output_dir:", "formed colors = cm.spectral(cluster_labels.astype(float) / n_clusters) # colors = y", "cluster centroid coordinates.\"\"\" if cluster_names is None: cluster_names = [\"Cluster", "cluster {} (shape:{})\".format(i + 1, X[y == i].shape) ) continue", "ax=ax, square=True, xticklabels=names_a, yticklabels=names_b, ) plt.tight_layout() jaccard_path = join(output_dir, \"Clusters", "plt.clf() def plot_cluster_assignments( X, y, n_clusters, figures_dir, transparent=False, cluster_names=None, title=\"\"", "here all # lie within [-0.1, 1] ax1.set_xlim([-0.1, 1]) #", "on set intersection and jaccard score. # TODO: Normaliser par", "traceback from os import makedirs from os.path import exists, join", "within [-0.1, 1] ax1.set_xlim([-0.1, 1]) # The (n_clusters+1)*10 is for", "subplot with 1 row and 2 columns fig, (ax1, ax2)", "PathEffects.Normal()] ) # plt.legend() figure_fp = join(figures_dir, \"Clustered {}.png\".format(title)) fig.tight_layout()", "edgecolor=color, alpha=default_alpha, ) # Label the silhouette plots with their", "# Add the labels for each cluster. for i in", "y_lower for next plot y_lower = y_upper + 10 #", "ax1.set_xlabel(\"The silhouette coefficient values\") ax1.set_ylabel(\"Cluster label\") # The vertical line", "i] for i in range(n_clusters)]) # Choose a color palette", "for i in range(n_clusters)]) y = np.hstack([y[y == i] for", "sns from koino.plot import big_square, default_alpha from matplotlib import cm", "values ax1.axvline(x=silhouette_avg, color=\"red\", linestyle=\"--\") ax1.set_yticks([]) # Clear the yaxis labels", "annot=True, fmt=\".3f\", ax=ax, square=True, xticklabels=names_a, yticklabels=names_b, ) plt.tight_layout() jaccard_path =", "+ size_cluster_i color = cm.spectral(float(k) / n_clusters) ax1.fill_betweenx( np.arange(y_lower, y_upper),", "i].shape) ) continue xtext, ytext = np.median(samples, axis=0) name =", "with seaborn. palette = np.array(sns.color_palette(\"hls\", n_clusters)) fig, ax = plt.subplots(figsize=big_square)", "def plot_silhouette( X, figure_fp, n_clusters, silhouette_values, cluster_labels, silhouette_avg ): #", "linestyle=\"--\") ax1.set_yticks([]) # Clear the yaxis labels / ticks ax1.set_xticks([-0.1,", "+ 1) * 10]) y_lower = 10 for k in", "reorder the data points according to the centroids labels X", "== i # ax.scatter(X[mask, 0], X[mask, 1], lw=0, s=20, c=palette[i],", "tables based on set intersection and jaccard score. # TODO:", "a_sets = [set(indx[y_a == i]) for i in range(n_a)] b_sets", "if cluster_names is None: cluster_names = [\"Cluster {}\".format(i + 1)", "n_a assert len(names_b) == n_b a_sets = [set(indx[y_a == i])", "ax.scatter(X[mask, 0], X[mask, 1], lw=0, s=20, c=palette[i], # label=cluster_names[i]) ax.set_title(title)", "(n_clusters+1)*10 is for inserting blank space between silhouette # plots", "range from -1, 1 but here all # lie within", "transparent=False, ): \"\"\"Compute and plot contingency tables based on set", "cm.spectral(cluster_labels.astype(float) / n_clusters) # colors = y ax2.scatter(X[:, 0], X[:,", "= np.array(sns.color_palette(\"hls\", n_clusters)) fig, ax = plt.subplots(figsize=big_square) # for i", "from ..utils.base import jaccard def plot_silhouette( X, figure_fp, n_clusters, silhouette_values,", "len(samples): logging.warning( \"Probably singular cluster {} (shape:{})\".format(i + 1, X[y", "Normaliser par len(sd_set) ou len(diet_set) ? \"\"\" if not (n_a", "len(sd_set) ou len(diet_set) ? \"\"\" if not (n_a or n_b)", "silhouette_avg ): # Create a subplot with 1 row and", "plt.subplots(figsize=figsize) plt.title(\"Jaccard scores between {} and {} clusters\".format(alabel, blabel)) sns.heatmap(", "in range(n_clusters): # Position of each label. samples = np.atleast_2d(X[y", ") plt.tight_layout() inter_path = join(output_dir, \"Clusters Intersection.png\") plt.savefig(inter_path, transparent=transparent) plt.close()", "= join(output_dir, \"Clusters Intersection.png\") plt.savefig(inter_path, transparent=transparent) plt.close() plt.clf() jac_arr =", "if not len(samples): logging.warning( \"Probably singular cluster {} (shape:{})\".format(i +", "c=colors) ax2.set_title(\"The visualization of the clustered data.\") ax2.set_xlabel(\"Feature space for", "y_upper + 10 # 10 for the 0 samples ax1.set_title(\"The", "# Choose a color palette with seaborn. palette = np.array(sns.color_palette(\"hls\",", "assert len(names_b) == n_b a_sets = [set(indx[y_a == i]) for", "ticks ax1.set_xticks([-0.1, 0, 0.2, 0.4, 0.6, 0.8, 1]) # Construct", "== i]) for i in range(n_b)] inter_sets = np.asarray( [[len(set_a", "ha=\"left\") txt.set_path_effects( [PathEffects.Stroke(linewidth=5, foreground=\"w\"), PathEffects.Normal()] ) # plt.legend() figure_fp =", "demarcate them clearly. ax1.set_ylim([0, len(X) + (n_clusters + 1) *", "..utils.base import jaccard def plot_silhouette( X, figure_fp, n_clusters, silhouette_values, cluster_labels,", "== i, :2]) if not len(samples): logging.warning( \"Probably singular cluster", "fig, ax = plt.subplots(figsize=figsize) plt.title(\"Overlap between {} and {} clusters\".format(alabel,", "colors = y ax2.scatter(X[:, 0], X[:, 1], marker=\".\", s=20, lw=0,", "the 2nd feature\") plt.suptitle( (\"Silhouette analysis for KMeans \" \"with", "0 samples ax1.set_title(\"The silhouette plot for the various clusters.\") ax1.set_xlabel(\"The", "ax.text(xtext, ytext, name, fontsize=20, wrap=True, ha=\"left\") txt.set_path_effects( [PathEffects.Stroke(linewidth=5, foreground=\"w\"), PathEffects.Normal()]", "+ 10 # 10 for the 0 samples ax1.set_title(\"The silhouette", ") # Label the silhouette plots with their cluster numbers", "\"\"\"Clustering assignments scatter plot Notes ----- Can use mean or", "The (n_clusters+1)*10 is for inserting blank space between silhouette #", "i in range(n_clusters)]) y = np.hstack([y[y == i] for i", "according to the centroids labels X = np.vstack([X[y == i]", "= cm.spectral(float(k) / n_clusters) ax1.fill_betweenx( np.arange(y_lower, y_upper), 0, ith_cluster_silhouette_values, facecolor=color,", "The 1st subplot is the silhouette plot # The silhouette", "i # ax.scatter(X[mask, 0], X[mask, 1], lw=0, s=20, c=palette[i], #", "Create a subplot with 1 row and 2 columns fig,", "np.arange(y_lower, y_upper), 0, ith_cluster_silhouette_values, facecolor=color, edgecolor=color, alpha=default_alpha, ) # Label", "assert np.isfinite(ytext) txt = ax.text(xtext, ytext, name, fontsize=20, wrap=True, ha=\"left\")", "range(n_b)] inter_sets = np.asarray( [[len(set_a & set_t) for set_a in", "for i in range(n_clusters)] # We first reorder the data", "scores for samples belonging to # cluster i, and sort", "par len(sd_set) ou len(diet_set) ? \"\"\" if not (n_a or", "or n_b) or not output_dir: return elif output_dir and not", "not exists(output_dir): makedirs(output_dir) else: assert n_a and n_b assert len(indx)", "0, ith_cluster_silhouette_values, facecolor=color, edgecolor=color, alpha=default_alpha, ) # Label the silhouette", "{}.png\".format(title)) fig.tight_layout() try: fig.savefig(figure_fp, transparent=transparent) except ValueError: logging.warning(traceback.format_exc()) finally: plt.close()", "plt.savefig(inter_path, transparent=transparent) plt.close() plt.clf() jac_arr = np.asarray( [[jaccard(set_a, set_b) for", "yticklabels=names_b, ) plt.tight_layout() jaccard_path = join(output_dir, \"Clusters Jaccard.png\") plt.savefig(jaccard_path, transparent=transparent)", "from os.path import exists, join from textwrap import fill import", "import traceback from os import makedirs from os.path import exists,", "to demarcate them clearly. ax1.set_ylim([0, len(X) + (n_clusters + 1)", "alpha=default_alpha, ) # Label the silhouette plots with their cluster", "plot for the various clusters.\") ax1.set_xlabel(\"The silhouette coefficient values\") ax1.set_ylabel(\"Cluster", "0.6, 0.8, 1]) # Construct cluster # 2nd Plot showing", "jac_arr = np.asarray( [[jaccard(set_a, set_b) for set_a in a_sets] for", "the clustered data.\") ax2.set_xlabel(\"Feature space for the 1st feature\") ax2.set_ylabel(\"Feature", "fill(cluster_names[i], width=20) assert np.isfinite(xtext) assert np.isfinite(ytext) txt = ax.text(xtext, ytext,", "continue xtext, ytext = np.median(samples, axis=0) name = fill(cluster_names[i], width=20)", "between silhouette # plots of individual clusters, to demarcate them", "i, :2]) if not len(samples): logging.warning( \"Probably singular cluster {}", "1 row and 2 columns fig, (ax1, ax2) = plt.subplots(1,", "cm from ..utils.base import jaccard def plot_silhouette( X, figure_fp, n_clusters,", "i]) for i in range(n_b)] inter_sets = np.asarray( [[len(set_a &", "plt.title(\"Overlap between {} and {} clusters\".format(alabel, blabel)) sns.heatmap( inter_sets, annot=True,", "1], marker=\".\", s=20, lw=0, alpha=default_alpha, c=colors) ax2.set_title(\"The visualization of the", "Clear the yaxis labels / ticks ax1.set_xticks([-0.1, 0, 0.2, 0.4,", "n_clusters) ax1.fill_betweenx( np.arange(y_lower, y_upper), 0, ith_cluster_silhouette_values, facecolor=color, edgecolor=color, alpha=default_alpha, )", "seaborn as sns from koino.plot import big_square, default_alpha from matplotlib", "them ith_cluster_silhouette_values = np.sort(silhouette_values[cluster_labels == k]) size_cluster_i = ith_cluster_silhouette_values.shape[0] y_upper", "columns fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(26, 10)) #", "y_upper = y_lower + size_cluster_i color = cm.spectral(float(k) / n_clusters)", "# The vertical line for average silhouette score of all", "range(n_clusters): # Position of each label. samples = np.atleast_2d(X[y ==", "from koino.plot import big_square, default_alpha from matplotlib import cm from", "and not exists(output_dir): makedirs(output_dir) else: assert n_a and n_b assert", "y_lower + 0.5 * size_cluster_i, str(k)) # Compute the new", "cluster_labels, silhouette_avg ): # Create a subplot with 1 row", "clustered data.\") ax2.set_xlabel(\"Feature space for the 1st feature\") ax2.set_ylabel(\"Feature space", "or not output_dir: return elif output_dir and not exists(output_dir): makedirs(output_dir)", "average silhouette score of all the values ax1.axvline(x=silhouette_avg, color=\"red\", linestyle=\"--\")", "in range(n_clusters)] # We first reorder the data points according", "b_sets = [set(indx[y_b == i]) for i in range(n_b)] inter_sets", "numpy as np import seaborn as sns from koino.plot import", "np.hstack([y[y == i] for i in range(n_clusters)]) # Choose a", "wrap=True, ha=\"left\") txt.set_path_effects( [PathEffects.Stroke(linewidth=5, foreground=\"w\"), PathEffects.Normal()] ) # plt.legend() figure_fp", "== n_b a_sets = [set(indx[y_a == i]) for i in", "figures_dir, transparent=False, cluster_names=None, title=\"\" ): \"\"\"Clustering assignments scatter plot Notes", "set_a in a_sets] for set_b in b_sets], dtype=np.float_, ) fig,", "1], lw=0, s=20, c=palette[y.astype(np.int)]) ax.axis(\"off\") # Add the labels for", "clusters, to demarcate them clearly. ax1.set_ylim([0, len(X) + (n_clusters +", "output_dir and not exists(output_dir): makedirs(output_dir) else: assert n_a and n_b", "from -1, 1 but here all # lie within [-0.1,", "but here all # lie within [-0.1, 1] ax1.set_xlim([-0.1, 1])", "# colors = y ax2.scatter(X[:, 0], X[:, 1], marker=\".\", s=20,", "in a_sets] for set_t in b_sets], dtype=np.int_ ) fig, ax", "1, X[y == i].shape) ) continue xtext, ytext = np.median(samples,", "not (n_a or n_b) or not output_dir: return elif output_dir", "s=20, c=palette[y.astype(np.int)]) ax.axis(\"off\") # Add the labels for each cluster.", "len(X) + (n_clusters + 1) * 10]) y_lower = 10", "data.\") ax2.set_xlabel(\"Feature space for the 1st feature\") ax2.set_ylabel(\"Feature space for", "else: assert n_a and n_b assert len(indx) == len(y_a) ==", "# plots of individual clusters, to demarcate them clearly. ax1.set_ylim([0,", "= [set(indx[y_b == i]) for i in range(n_b)] inter_sets =", "label. samples = np.atleast_2d(X[y == i, :2]) if not len(samples):", "import seaborn as sns from koino.plot import big_square, default_alpha from", "{} (shape:{})\".format(i + 1, X[y == i].shape) ) continue xtext,", "the labels for each cluster. for i in range(n_clusters): #", "color = cm.spectral(float(k) / n_clusters) ax1.fill_betweenx( np.arange(y_lower, y_upper), 0, ith_cluster_silhouette_values,", "/ ticks ax1.set_xticks([-0.1, 0, 0.2, 0.4, 0.6, 0.8, 1]) #", "palette with seaborn. palette = np.array(sns.color_palette(\"hls\", n_clusters)) fig, ax =", "= np.atleast_2d(X[y == i, :2]) if not len(samples): logging.warning( \"Probably", "in range(n_clusters): # Aggregate the silhouette scores for samples belonging", "b_sets], dtype=np.float_, ) fig, ax = plt.subplots(figsize=figsize) plt.title(\"Jaccard scores between", "figure_fp, n_clusters, silhouette_values, cluster_labels, silhouette_avg ): # Create a subplot", "indx, y_a, y_b, names_a, names_b, n_a=None, n_b=None, figsize=None, output_dir=None, alabel=\"socio-demographic\",", "{}\".format(i + 1) for i in range(n_clusters)] # We first", "2, figsize=(26, 10)) # The 1st subplot is the silhouette", "i] for i in range(n_clusters)]) y = np.hstack([y[y == i]", "for i in range(n_clusters): # Position of each label. samples", "alabel=\"socio-demographic\", blabel=\"purchases\", transparent=False, ): \"\"\"Compute and plot contingency tables based", "for set_t in b_sets], dtype=np.int_ ) fig, ax = plt.subplots(figsize=figsize)", "np.sort(silhouette_values[cluster_labels == k]) size_cluster_i = ith_cluster_silhouette_values.shape[0] y_upper = y_lower +", ") fig, ax = plt.subplots(figsize=figsize) plt.title(\"Overlap between {} and {}", "plt.close() plt.clf() def plot_cluster_assignments( X, y, n_clusters, figures_dir, transparent=False, cluster_names=None,", "= [set(indx[y_a == i]) for i in range(n_a)] b_sets =", "+ 0.5 * size_cluster_i, str(k)) # Compute the new y_lower", "for i in range(n_clusters): # mask = y == i", "inter_sets = np.asarray( [[len(set_a & set_t) for set_a in a_sets]", "for set_a in a_sets] for set_t in b_sets], dtype=np.int_ )", "10 # 10 for the 0 samples ax1.set_title(\"The silhouette plot", ") continue xtext, ytext = np.median(samples, axis=0) name = fill(cluster_names[i],", "import fill import matplotlib.patheffects as PathEffects import matplotlib.pyplot as plt", "ax.axis(\"off\") # Add the labels for each cluster. for i", "if not (n_a or n_b) or not output_dir: return elif", "np.asarray( [[len(set_a & set_t) for set_a in a_sets] for set_t", "1 but here all # lie within [-0.1, 1] ax1.set_xlim([-0.1,", "[[jaccard(set_a, set_b) for set_a in a_sets] for set_b in b_sets],", "individual clusters, to demarcate them clearly. ax1.set_ylim([0, len(X) + (n_clusters", "Notes ----- Can use mean or median to fix cluster", "\"\"\" if not (n_a or n_b) or not output_dir: return", "= plt.subplots(1, 2, figsize=(26, 10)) # The 1st subplot is", "i in range(n_a)] b_sets = [set(indx[y_b == i]) for i", "10)) # The 1st subplot is the silhouette plot #", "# ax.scatter(X[mask, 0], X[mask, 1], lw=0, s=20, c=palette[i], # label=cluster_names[i])", "The vertical line for average silhouette score of all the", ") plt.tight_layout() jaccard_path = join(output_dir, \"Clusters Jaccard.png\") plt.savefig(jaccard_path, transparent=transparent) plt.close()", "----- Can use mean or median to fix cluster centroid", "dtype=np.int_ ) fig, ax = plt.subplots(figsize=figsize) plt.title(\"Overlap between {} and", "(n_clusters + 1) * 10]) y_lower = 10 for k", "ax1.set_yticks([]) # Clear the yaxis labels / ticks ax1.set_xticks([-0.1, 0,", "points according to the centroids labels X = np.vstack([X[y ==", "plots with their cluster numbers at the # middle ax1.text(-0.05,", "# cluster i, and sort them ith_cluster_silhouette_values = np.sort(silhouette_values[cluster_labels ==", "X, y, n_clusters, figures_dir, transparent=False, cluster_names=None, title=\"\" ): \"\"\"Clustering assignments", "def overlap_jaccard( indx, y_a, y_b, names_a, names_b, n_a=None, n_b=None, figsize=None,", "the values ax1.axvline(x=silhouette_avg, color=\"red\", linestyle=\"--\") ax1.set_yticks([]) # Clear the yaxis", "for i in range(n_a)] b_sets = [set(indx[y_b == i]) for", "(\"Silhouette analysis for KMeans \" \"with n_clusters = %d\" %", "seaborn. palette = np.array(sns.color_palette(\"hls\", n_clusters)) fig, ax = plt.subplots(figsize=big_square) #", "clusters\".format(alabel, blabel)) sns.heatmap( inter_sets, annot=True, fmt=\"6.0f\", ax=ax, square=True, xticklabels=names_a, yticklabels=names_b,", "ith_cluster_silhouette_values.shape[0] y_upper = y_lower + size_cluster_i color = cm.spectral(float(k) /", "10 for k in range(n_clusters): # Aggregate the silhouette scores", "visualization of the clustered data.\") ax2.set_xlabel(\"Feature space for the 1st", "matplotlib import cm from ..utils.base import jaccard def plot_silhouette( X,", "= cm.spectral(cluster_labels.astype(float) / n_clusters) # colors = y ax2.scatter(X[:, 0],", "jaccard score. # TODO: Normaliser par len(sd_set) ou len(diet_set) ?", "plt.title(\"Jaccard scores between {} and {} clusters\".format(alabel, blabel)) sns.heatmap( jac_arr,", "plot_cluster_assignments( X, y, n_clusters, figures_dir, transparent=False, cluster_names=None, title=\"\" ): \"\"\"Clustering", "[\"Cluster {}\".format(i + 1) for i in range(n_clusters)] # We", "X[y == i].shape) ) continue xtext, ytext = np.median(samples, axis=0)", "np.median(samples, axis=0) name = fill(cluster_names[i], width=20) assert np.isfinite(xtext) assert np.isfinite(ytext)", "np.isfinite(ytext) txt = ax.text(xtext, ytext, name, fontsize=20, wrap=True, ha=\"left\") txt.set_path_effects(", "names_a, names_b, n_a=None, n_b=None, figsize=None, output_dir=None, alabel=\"socio-demographic\", blabel=\"purchases\", transparent=False, ):", "# Aggregate the silhouette scores for samples belonging to #", "ax1.axvline(x=silhouette_avg, color=\"red\", linestyle=\"--\") ax1.set_yticks([]) # Clear the yaxis labels /", "ax=ax, square=True, xticklabels=names_a, yticklabels=names_b, ) plt.tight_layout() inter_path = join(output_dir, \"Clusters", "y_a, y_b, names_a, names_b, n_a=None, n_b=None, figsize=None, output_dir=None, alabel=\"socio-demographic\", blabel=\"purchases\",", "the various clusters.\") ax1.set_xlabel(\"The silhouette coefficient values\") ax1.set_ylabel(\"Cluster label\") #", "of each label. samples = np.atleast_2d(X[y == i, :2]) if", "feature\") ax2.set_ylabel(\"Feature space for the 2nd feature\") plt.suptitle( (\"Silhouette analysis", "= np.asarray( [[jaccard(set_a, set_b) for set_a in a_sets] for set_b", "matplotlib.patheffects as PathEffects import matplotlib.pyplot as plt import numpy as", "figsize=None, output_dir=None, alabel=\"socio-demographic\", blabel=\"purchases\", transparent=False, ): \"\"\"Compute and plot contingency", "for KMeans \" \"with n_clusters = %d\" % n_clusters), fontsize=14,", "): # Create a subplot with 1 row and 2", "colors = cm.spectral(cluster_labels.astype(float) / n_clusters) # colors = y ax2.scatter(X[:,", "name = fill(cluster_names[i], width=20) assert np.isfinite(xtext) assert np.isfinite(ytext) txt =", "plt.clf() def overlap_jaccard( indx, y_a, y_b, names_a, names_b, n_a=None, n_b=None,", "title=\"\" ): \"\"\"Clustering assignments scatter plot Notes ----- Can use", "blabel=\"purchases\", transparent=False, ): \"\"\"Compute and plot contingency tables based on", "# mask = y == i # ax.scatter(X[mask, 0], X[mask,", "): \"\"\"Clustering assignments scatter plot Notes ----- Can use mean", "try: fig.savefig(figure_fp, transparent=transparent) except ValueError: logging.warning(traceback.format_exc()) finally: plt.close() plt.clf() def", "from textwrap import fill import matplotlib.patheffects as PathEffects import matplotlib.pyplot", "the actual clusters formed colors = cm.spectral(cluster_labels.astype(float) / n_clusters) #", "of the clustered data.\") ax2.set_xlabel(\"Feature space for the 1st feature\")", "set intersection and jaccard score. # TODO: Normaliser par len(sd_set)", "c=palette[i], # label=cluster_names[i]) ax.set_title(title) ax.scatter(X[:, 0], X[:, 1], lw=0, s=20,", "== i]) for i in range(n_a)] b_sets = [set(indx[y_b ==", "assignments scatter plot Notes ----- Can use mean or median", "np.asarray( [[jaccard(set_a, set_b) for set_a in a_sets] for set_b in", "samples = np.atleast_2d(X[y == i, :2]) if not len(samples): logging.warning(", "== i].shape) ) continue xtext, ytext = np.median(samples, axis=0) name", "default_alpha from matplotlib import cm from ..utils.base import jaccard def", "labels X = np.vstack([X[y == i] for i in range(n_clusters)])", "n_clusters, figures_dir, transparent=False, cluster_names=None, title=\"\" ): \"\"\"Clustering assignments scatter plot", "cluster_names=None, title=\"\" ): \"\"\"Clustering assignments scatter plot Notes ----- Can", "makedirs(output_dir) else: assert n_a and n_b assert len(indx) == len(y_a)", "1]) # Construct cluster # 2nd Plot showing the actual", "import matplotlib.pyplot as plt import numpy as np import seaborn", "transparent=transparent) plt.close() plt.clf() jac_arr = np.asarray( [[jaccard(set_a, set_b) for set_a", "in range(n_clusters)]) y = np.hstack([y[y == i] for i in", "elif output_dir and not exists(output_dir): makedirs(output_dir) else: assert n_a and", "np.isfinite(xtext) assert np.isfinite(ytext) txt = ax.text(xtext, ytext, name, fontsize=20, wrap=True,", "use mean or median to fix cluster centroid coordinates.\"\"\" if", "0.4, 0.6, 0.8, 1]) # Construct cluster # 2nd Plot", "1) for i in range(n_clusters)] # We first reorder the", "= y_upper + 10 # 10 for the 0 samples", "their cluster numbers at the # middle ax1.text(-0.05, y_lower +", "join(output_dir, \"Clusters Intersection.png\") plt.savefig(inter_path, transparent=transparent) plt.close() plt.clf() jac_arr = np.asarray(", "plot y_lower = y_upper + 10 # 10 for the", "is None: cluster_names = [\"Cluster {}\".format(i + 1) for i", "1]) # The (n_clusters+1)*10 is for inserting blank space between", "inter_sets, annot=True, fmt=\"6.0f\", ax=ax, square=True, xticklabels=names_a, yticklabels=names_b, ) plt.tight_layout() inter_path", "singular cluster {} (shape:{})\".format(i + 1, X[y == i].shape) )", "plt.clf() jac_arr = np.asarray( [[jaccard(set_a, set_b) for set_a in a_sets]", "blank space between silhouette # plots of individual clusters, to", "in range(n_a)] b_sets = [set(indx[y_b == i]) for i in", "plt.subplots(1, 2, figsize=(26, 10)) # The 1st subplot is the", "c=palette[y.astype(np.int)]) ax.axis(\"off\") # Add the labels for each cluster. for", "{} clusters\".format(alabel, blabel)) sns.heatmap( jac_arr, annot=True, fmt=\".3f\", ax=ax, square=True, xticklabels=names_a,", "textwrap import fill import matplotlib.patheffects as PathEffects import matplotlib.pyplot as", "plt.savefig(figure_fp) plt.close() plt.clf() def plot_cluster_assignments( X, y, n_clusters, figures_dir, transparent=False,", "set_b in b_sets], dtype=np.float_, ) fig, ax = plt.subplots(figsize=figsize) plt.title(\"Jaccard", "ax2.set_title(\"The visualization of the clustered data.\") ax2.set_xlabel(\"Feature space for the", "silhouette score of all the values ax1.axvline(x=silhouette_avg, color=\"red\", linestyle=\"--\") ax1.set_yticks([])", "for the 0 samples ax1.set_title(\"The silhouette plot for the various", "import logging import traceback from os import makedirs from os.path", "{} and {} clusters\".format(alabel, blabel)) sns.heatmap( jac_arr, annot=True, fmt=\".3f\", ax=ax,", "vertical line for average silhouette score of all the values", "== k]) size_cluster_i = ith_cluster_silhouette_values.shape[0] y_upper = y_lower + size_cluster_i", "based on set intersection and jaccard score. # TODO: Normaliser", "* 10]) y_lower = 10 for k in range(n_clusters): #", "clusters.\") ax1.set_xlabel(\"The silhouette coefficient values\") ax1.set_ylabel(\"Cluster label\") # The vertical", "lie within [-0.1, 1] ax1.set_xlim([-0.1, 1]) # The (n_clusters+1)*10 is", "labels / ticks ax1.set_xticks([-0.1, 0, 0.2, 0.4, 0.6, 0.8, 1])", "plt.suptitle( (\"Silhouette analysis for KMeans \" \"with n_clusters = %d\"", "logging.warning( \"Probably singular cluster {} (shape:{})\".format(i + 1, X[y ==", "None: cluster_names = [\"Cluster {}\".format(i + 1) for i in", "and plot contingency tables based on set intersection and jaccard", "for next plot y_lower = y_upper + 10 # 10", "n_b assert len(indx) == len(y_a) == len(y_b) assert len(names_a) ==", "transparent=False, cluster_names=None, title=\"\" ): \"\"\"Clustering assignments scatter plot Notes -----", "# TODO: Normaliser par len(sd_set) ou len(diet_set) ? \"\"\" if", "xticklabels=names_a, yticklabels=names_b, ) plt.tight_layout() inter_path = join(output_dir, \"Clusters Intersection.png\") plt.savefig(inter_path,", "KMeans \" \"with n_clusters = %d\" % n_clusters), fontsize=14, fontweight=\"bold\",", "fmt=\".3f\", ax=ax, square=True, xticklabels=names_a, yticklabels=names_b, ) plt.tight_layout() jaccard_path = join(output_dir,", "plt.close() plt.clf() jac_arr = np.asarray( [[jaccard(set_a, set_b) for set_a in", "showing the actual clusters formed colors = cm.spectral(cluster_labels.astype(float) / n_clusters)", "mean or median to fix cluster centroid coordinates.\"\"\" if cluster_names", "figure_fp = join(figures_dir, \"Clustered {}.png\".format(title)) fig.tight_layout() try: fig.savefig(figure_fp, transparent=transparent) except", "set_t) for set_a in a_sets] for set_t in b_sets], dtype=np.int_", "import cm from ..utils.base import jaccard def plot_silhouette( X, figure_fp,", "0, 0.2, 0.4, 0.6, 0.8, 1]) # Construct cluster #", "koino.plot import big_square, default_alpha from matplotlib import cm from ..utils.base", "plt import numpy as np import seaborn as sns from", "clearly. ax1.set_ylim([0, len(X) + (n_clusters + 1) * 10]) y_lower", "X[:, 1], lw=0, s=20, c=palette[y.astype(np.int)]) ax.axis(\"off\") # Add the labels", "cluster # 2nd Plot showing the actual clusters formed colors", "TODO: Normaliser par len(sd_set) ou len(diet_set) ? \"\"\" if not", "Aggregate the silhouette scores for samples belonging to # cluster", "blabel)) sns.heatmap( jac_arr, annot=True, fmt=\".3f\", ax=ax, square=True, xticklabels=names_a, yticklabels=names_b, )", "= np.sort(silhouette_values[cluster_labels == k]) size_cluster_i = ith_cluster_silhouette_values.shape[0] y_upper = y_lower", "for average silhouette score of all the values ax1.axvline(x=silhouette_avg, color=\"red\",", "except ValueError: logging.warning(traceback.format_exc()) finally: plt.close() plt.clf() def overlap_jaccard( indx, y_a,", "1], lw=0, s=20, c=palette[i], # label=cluster_names[i]) ax.set_title(title) ax.scatter(X[:, 0], X[:,", "in a_sets] for set_b in b_sets], dtype=np.float_, ) fig, ax", "analysis for KMeans \" \"with n_clusters = %d\" % n_clusters),", "%d\" % n_clusters), fontsize=14, fontweight=\"bold\", ) plt.savefig(figure_fp) plt.close() plt.clf() def", "1] ax1.set_xlim([-0.1, 1]) # The (n_clusters+1)*10 is for inserting blank", "exists, join from textwrap import fill import matplotlib.patheffects as PathEffects", "a subplot with 1 row and 2 columns fig, (ax1,", "row and 2 columns fig, (ax1, ax2) = plt.subplots(1, 2,", "+ (n_clusters + 1) * 10]) y_lower = 10 for", "fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(26, 10)) # The", "score of all the values ax1.axvline(x=silhouette_avg, color=\"red\", linestyle=\"--\") ax1.set_yticks([]) #", "figsize=(26, 10)) # The 1st subplot is the silhouette plot", ") fig, ax = plt.subplots(figsize=figsize) plt.title(\"Jaccard scores between {} and", "= 10 for k in range(n_clusters): # Aggregate the silhouette", "1) * 10]) y_lower = 10 for k in range(n_clusters):", "Add the labels for each cluster. for i in range(n_clusters):", "y_lower + size_cluster_i color = cm.spectral(float(k) / n_clusters) ax1.fill_betweenx( np.arange(y_lower,", "len(diet_set) ? \"\"\" if not (n_a or n_b) or not", "centroids labels X = np.vstack([X[y == i] for i in", "[set(indx[y_a == i]) for i in range(n_a)] b_sets = [set(indx[y_b", "inter_path = join(output_dir, \"Clusters Intersection.png\") plt.savefig(inter_path, transparent=transparent) plt.close() plt.clf() jac_arr", "[set(indx[y_b == i]) for i in range(n_b)] inter_sets = np.asarray(", "size_cluster_i color = cm.spectral(float(k) / n_clusters) ax1.fill_betweenx( np.arange(y_lower, y_upper), 0,", "marker=\".\", s=20, lw=0, alpha=default_alpha, c=colors) ax2.set_title(\"The visualization of the clustered", "= np.hstack([y[y == i] for i in range(n_clusters)]) # Choose", "== len(y_a) == len(y_b) assert len(names_a) == n_a assert len(names_b)", "cluster_names = [\"Cluster {}\".format(i + 1) for i in range(n_clusters)]", "at the # middle ax1.text(-0.05, y_lower + 0.5 * size_cluster_i,", "txt = ax.text(xtext, ytext, name, fontsize=20, wrap=True, ha=\"left\") txt.set_path_effects( [PathEffects.Stroke(linewidth=5,", "output_dir: return elif output_dir and not exists(output_dir): makedirs(output_dir) else: assert", "n_clusters), fontsize=14, fontweight=\"bold\", ) plt.savefig(figure_fp) plt.close() plt.clf() def plot_cluster_assignments( X,", "ax2) = plt.subplots(1, 2, figsize=(26, 10)) # The 1st subplot", "str(k)) # Compute the new y_lower for next plot y_lower", "in b_sets], dtype=np.float_, ) fig, ax = plt.subplots(figsize=figsize) plt.title(\"Jaccard scores", "cm.spectral(float(k) / n_clusters) ax1.fill_betweenx( np.arange(y_lower, y_upper), 0, ith_cluster_silhouette_values, facecolor=color, edgecolor=color,", "for the 1st feature\") ax2.set_ylabel(\"Feature space for the 2nd feature\")", "between {} and {} clusters\".format(alabel, blabel)) sns.heatmap( jac_arr, annot=True, fmt=\".3f\",", "% n_clusters), fontsize=14, fontweight=\"bold\", ) plt.savefig(figure_fp) plt.close() plt.clf() def plot_cluster_assignments(", "[PathEffects.Stroke(linewidth=5, foreground=\"w\"), PathEffects.Normal()] ) # plt.legend() figure_fp = join(figures_dir, \"Clustered", "and jaccard score. # TODO: Normaliser par len(sd_set) ou len(diet_set)", "middle ax1.text(-0.05, y_lower + 0.5 * size_cluster_i, str(k)) # Compute", "n_clusters = %d\" % n_clusters), fontsize=14, fontweight=\"bold\", ) plt.savefig(figure_fp) plt.close()", "# middle ax1.text(-0.05, y_lower + 0.5 * size_cluster_i, str(k)) #", "space for the 1st feature\") ax2.set_ylabel(\"Feature space for the 2nd", "[[len(set_a & set_t) for set_a in a_sets] for set_t in", "# Create a subplot with 1 row and 2 columns", "We first reorder the data points according to the centroids", "samples ax1.set_title(\"The silhouette plot for the various clusters.\") ax1.set_xlabel(\"The silhouette", "exists(output_dir): makedirs(output_dir) else: assert n_a and n_b assert len(indx) ==", "in b_sets], dtype=np.int_ ) fig, ax = plt.subplots(figsize=figsize) plt.title(\"Overlap between", "actual clusters formed colors = cm.spectral(cluster_labels.astype(float) / n_clusters) # colors", "i in range(n_clusters)] # We first reorder the data points", "\"with n_clusters = %d\" % n_clusters), fontsize=14, fontweight=\"bold\", ) plt.savefig(figure_fp)", "# Compute the new y_lower for next plot y_lower =", "X[:, 1], marker=\".\", s=20, lw=0, alpha=default_alpha, c=colors) ax2.set_title(\"The visualization of", "n_clusters, silhouette_values, cluster_labels, silhouette_avg ): # Create a subplot with", "scores between {} and {} clusters\".format(alabel, blabel)) sns.heatmap( jac_arr, annot=True,", "= np.asarray( [[len(set_a & set_t) for set_a in a_sets] for", "new y_lower for next plot y_lower = y_upper + 10", "and {} clusters\".format(alabel, blabel)) sns.heatmap( inter_sets, annot=True, fmt=\"6.0f\", ax=ax, square=True,", "in range(n_clusters): # mask = y == i # ax.scatter(X[mask,", "# Label the silhouette plots with their cluster numbers at", "clusters formed colors = cm.spectral(cluster_labels.astype(float) / n_clusters) # colors =", "all the values ax1.axvline(x=silhouette_avg, color=\"red\", linestyle=\"--\") ax1.set_yticks([]) # Clear the", "of all the values ax1.axvline(x=silhouette_avg, color=\"red\", linestyle=\"--\") ax1.set_yticks([]) # Clear", "matplotlib.pyplot as plt import numpy as np import seaborn as", "0], X[:, 1], marker=\".\", s=20, lw=0, alpha=default_alpha, c=colors) ax2.set_title(\"The visualization", "Compute the new y_lower for next plot y_lower = y_upper", "silhouette plot for the various clusters.\") ax1.set_xlabel(\"The silhouette coefficient values\")", "n_clusters)) fig, ax = plt.subplots(figsize=big_square) # for i in range(n_clusters):", "The silhouette coefficient can range from -1, 1 but here", "Plot showing the actual clusters formed colors = cm.spectral(cluster_labels.astype(float) /", "for i in range(n_clusters)]) # Choose a color palette with", "fmt=\"6.0f\", ax=ax, square=True, xticklabels=names_a, yticklabels=names_b, ) plt.tight_layout() inter_path = join(output_dir,", "n_a and n_b assert len(indx) == len(y_a) == len(y_b) assert", "next plot y_lower = y_upper + 10 # 10 for", "and sort them ith_cluster_silhouette_values = np.sort(silhouette_values[cluster_labels == k]) size_cluster_i =", "fix cluster centroid coordinates.\"\"\" if cluster_names is None: cluster_names =", "scatter plot Notes ----- Can use mean or median to", "mask = y == i # ax.scatter(X[mask, 0], X[mask, 1],", "assert len(indx) == len(y_a) == len(y_b) assert len(names_a) == n_a", "samples belonging to # cluster i, and sort them ith_cluster_silhouette_values", "ax2.scatter(X[:, 0], X[:, 1], marker=\".\", s=20, lw=0, alpha=default_alpha, c=colors) ax2.set_title(\"The", "from matplotlib import cm from ..utils.base import jaccard def plot_silhouette(", "== i] for i in range(n_clusters)]) # Choose a color", "np.vstack([X[y == i] for i in range(n_clusters)]) y = np.hstack([y[y", "ax1.set_ylim([0, len(X) + (n_clusters + 1) * 10]) y_lower =", "names_b, n_a=None, n_b=None, figsize=None, output_dir=None, alabel=\"socio-demographic\", blabel=\"purchases\", transparent=False, ): \"\"\"Compute", "logging.warning(traceback.format_exc()) finally: plt.close() plt.clf() def overlap_jaccard( indx, y_a, y_b, names_a,", "\"\"\"Compute and plot contingency tables based on set intersection and", "Construct cluster # 2nd Plot showing the actual clusters formed", "join from textwrap import fill import matplotlib.patheffects as PathEffects import", "between {} and {} clusters\".format(alabel, blabel)) sns.heatmap( inter_sets, annot=True, fmt=\"6.0f\",", "as PathEffects import matplotlib.pyplot as plt import numpy as np", "name, fontsize=20, wrap=True, ha=\"left\") txt.set_path_effects( [PathEffects.Stroke(linewidth=5, foreground=\"w\"), PathEffects.Normal()] ) #", "\"Clusters Intersection.png\") plt.savefig(inter_path, transparent=transparent) plt.close() plt.clf() jac_arr = np.asarray( [[jaccard(set_a,", "= plt.subplots(figsize=figsize) plt.title(\"Overlap between {} and {} clusters\".format(alabel, blabel)) sns.heatmap(", "-1, 1 but here all # lie within [-0.1, 1]", "Can use mean or median to fix cluster centroid coordinates.\"\"\"", "intersection and jaccard score. # TODO: Normaliser par len(sd_set) ou", "the new y_lower for next plot y_lower = y_upper +", "ax = plt.subplots(figsize=figsize) plt.title(\"Jaccard scores between {} and {} clusters\".format(alabel,", "alpha=default_alpha, c=colors) ax2.set_title(\"The visualization of the clustered data.\") ax2.set_xlabel(\"Feature space", "i in range(n_clusters)]) # Choose a color palette with seaborn.", "= np.median(samples, axis=0) name = fill(cluster_names[i], width=20) assert np.isfinite(xtext) assert", "in range(n_clusters)]) # Choose a color palette with seaborn. palette", "= y == i # ax.scatter(X[mask, 0], X[mask, 1], lw=0,", "jaccard def plot_silhouette( X, figure_fp, n_clusters, silhouette_values, cluster_labels, silhouette_avg ):", "the centroids labels X = np.vstack([X[y == i] for i", "10]) y_lower = 10 for k in range(n_clusters): # Aggregate", "first reorder the data points according to the centroids labels", "y == i # ax.scatter(X[mask, 0], X[mask, 1], lw=0, s=20,", "plt.close() plt.clf() def overlap_jaccard( indx, y_a, y_b, names_a, names_b, n_a=None,", "labels for each cluster. for i in range(n_clusters): # Position", "# 2nd Plot showing the actual clusters formed colors =", "fig, ax = plt.subplots(figsize=figsize) plt.title(\"Jaccard scores between {} and {}", "with 1 row and 2 columns fig, (ax1, ax2) =", "os import makedirs from os.path import exists, join from textwrap", "import jaccard def plot_silhouette( X, figure_fp, n_clusters, silhouette_values, cluster_labels, silhouette_avg", "the data points according to the centroids labels X =", "ax2.set_ylabel(\"Feature space for the 2nd feature\") plt.suptitle( (\"Silhouette analysis for", "return elif output_dir and not exists(output_dir): makedirs(output_dir) else: assert n_a", "for k in range(n_clusters): # Aggregate the silhouette scores for", "== n_a assert len(names_b) == n_b a_sets = [set(indx[y_a ==", "with their cluster numbers at the # middle ax1.text(-0.05, y_lower", "for set_a in a_sets] for set_b in b_sets], dtype=np.float_, )", "dtype=np.float_, ) fig, ax = plt.subplots(figsize=figsize) plt.title(\"Jaccard scores between {}", "annot=True, fmt=\"6.0f\", ax=ax, square=True, xticklabels=names_a, yticklabels=names_b, ) plt.tight_layout() inter_path =", "xtext, ytext = np.median(samples, axis=0) name = fill(cluster_names[i], width=20) assert", "cluster i, and sort them ith_cluster_silhouette_values = np.sort(silhouette_values[cluster_labels == k])", "y ax2.scatter(X[:, 0], X[:, 1], marker=\".\", s=20, lw=0, alpha=default_alpha, c=colors)", "= plt.subplots(figsize=big_square) # for i in range(n_clusters): # mask =", "2nd feature\") plt.suptitle( (\"Silhouette analysis for KMeans \" \"with n_clusters", "yaxis labels / ticks ax1.set_xticks([-0.1, 0, 0.2, 0.4, 0.6, 0.8,", "len(names_b) == n_b a_sets = [set(indx[y_a == i]) for i", "belonging to # cluster i, and sort them ith_cluster_silhouette_values =", "n_b) or not output_dir: return elif output_dir and not exists(output_dir):", "ax1.text(-0.05, y_lower + 0.5 * size_cluster_i, str(k)) # Compute the", "import numpy as np import seaborn as sns from koino.plot", "color palette with seaborn. palette = np.array(sns.color_palette(\"hls\", n_clusters)) fig, ax", "output_dir=None, alabel=\"socio-demographic\", blabel=\"purchases\", transparent=False, ): \"\"\"Compute and plot contingency tables", "facecolor=color, edgecolor=color, alpha=default_alpha, ) # Label the silhouette plots with", "import big_square, default_alpha from matplotlib import cm from ..utils.base import", "n_clusters) # colors = y ax2.scatter(X[:, 0], X[:, 1], marker=\".\",", "to the centroids labels X = np.vstack([X[y == i] for", "range(n_clusters): # mask = y == i # ax.scatter(X[mask, 0],", "\" \"with n_clusters = %d\" % n_clusters), fontsize=14, fontweight=\"bold\", )", "the silhouette plot # The silhouette coefficient can range from", "for inserting blank space between silhouette # plots of individual", "each label. samples = np.atleast_2d(X[y == i, :2]) if not", "assert n_a and n_b assert len(indx) == len(y_a) == len(y_b)", "size_cluster_i, str(k)) # Compute the new y_lower for next plot", "silhouette coefficient values\") ax1.set_ylabel(\"Cluster label\") # The vertical line for", "and {} clusters\".format(alabel, blabel)) sns.heatmap( jac_arr, annot=True, fmt=\".3f\", ax=ax, square=True,", "= np.vstack([X[y == i] for i in range(n_clusters)]) y =", "# plt.legend() figure_fp = join(figures_dir, \"Clustered {}.png\".format(title)) fig.tight_layout() try: fig.savefig(figure_fp,", "transparent=transparent) except ValueError: logging.warning(traceback.format_exc()) finally: plt.close() plt.clf() def overlap_jaccard( indx,", "numbers at the # middle ax1.text(-0.05, y_lower + 0.5 *", "Label the silhouette plots with their cluster numbers at the", "1st feature\") ax2.set_ylabel(\"Feature space for the 2nd feature\") plt.suptitle( (\"Silhouette", "fig.tight_layout() try: fig.savefig(figure_fp, transparent=transparent) except ValueError: logging.warning(traceback.format_exc()) finally: plt.close() plt.clf()", "label=cluster_names[i]) ax.set_title(title) ax.scatter(X[:, 0], X[:, 1], lw=0, s=20, c=palette[y.astype(np.int)]) ax.axis(\"off\")", "/ n_clusters) ax1.fill_betweenx( np.arange(y_lower, y_upper), 0, ith_cluster_silhouette_values, facecolor=color, edgecolor=color, alpha=default_alpha,", "cluster. for i in range(n_clusters): # Position of each label.", "# 10 for the 0 samples ax1.set_title(\"The silhouette plot for", "= join(figures_dir, \"Clustered {}.png\".format(title)) fig.tight_layout() try: fig.savefig(figure_fp, transparent=transparent) except ValueError:", "np.atleast_2d(X[y == i, :2]) if not len(samples): logging.warning( \"Probably singular", "lw=0, s=20, c=palette[y.astype(np.int)]) ax.axis(\"off\") # Add the labels for each", "plots of individual clusters, to demarcate them clearly. ax1.set_ylim([0, len(X)", "\"Probably singular cluster {} (shape:{})\".format(i + 1, X[y == i].shape)", "ith_cluster_silhouette_values, facecolor=color, edgecolor=color, alpha=default_alpha, ) # Label the silhouette plots", "plot contingency tables based on set intersection and jaccard score.", "for samples belonging to # cluster i, and sort them", "): \"\"\"Compute and plot contingency tables based on set intersection", "= y_lower + size_cluster_i color = cm.spectral(float(k) / n_clusters) ax1.fill_betweenx(", "Intersection.png\") plt.savefig(inter_path, transparent=transparent) plt.close() plt.clf() jac_arr = np.asarray( [[jaccard(set_a, set_b)", "the # middle ax1.text(-0.05, y_lower + 0.5 * size_cluster_i, str(k))", "clusters\".format(alabel, blabel)) sns.heatmap( jac_arr, annot=True, fmt=\".3f\", ax=ax, square=True, xticklabels=names_a, yticklabels=names_b,", "for set_b in b_sets], dtype=np.float_, ) fig, ax = plt.subplots(figsize=figsize)", "is for inserting blank space between silhouette # plots of", "plot_silhouette( X, figure_fp, n_clusters, silhouette_values, cluster_labels, silhouette_avg ): # Create", "n_a=None, n_b=None, figsize=None, output_dir=None, alabel=\"socio-demographic\", blabel=\"purchases\", transparent=False, ): \"\"\"Compute and", "# Construct cluster # 2nd Plot showing the actual clusters", "set_a in a_sets] for set_t in b_sets], dtype=np.int_ ) fig,", "i in range(n_b)] inter_sets = np.asarray( [[len(set_a & set_t) for", "plt.tight_layout() jaccard_path = join(output_dir, \"Clusters Jaccard.png\") plt.savefig(jaccard_path, transparent=transparent) plt.close() plt.clf()", "= [\"Cluster {}\".format(i + 1) for i in range(n_clusters)] #", "can range from -1, 1 but here all # lie", "[-0.1, 1] ax1.set_xlim([-0.1, 1]) # The (n_clusters+1)*10 is for inserting", "y_lower = 10 for k in range(n_clusters): # Aggregate the", "width=20) assert np.isfinite(xtext) assert np.isfinite(ytext) txt = ax.text(xtext, ytext, name,", "to # cluster i, and sort them ith_cluster_silhouette_values = np.sort(silhouette_values[cluster_labels", "s=20, c=palette[i], # label=cluster_names[i]) ax.set_title(title) ax.scatter(X[:, 0], X[:, 1], lw=0,", "fontsize=20, wrap=True, ha=\"left\") txt.set_path_effects( [PathEffects.Stroke(linewidth=5, foreground=\"w\"), PathEffects.Normal()] ) # plt.legend()", "== i] for i in range(n_clusters)]) y = np.hstack([y[y ==", "import makedirs from os.path import exists, join from textwrap import", "plt.legend() figure_fp = join(figures_dir, \"Clustered {}.png\".format(title)) fig.tight_layout() try: fig.savefig(figure_fp, transparent=transparent)", "for i in range(n_b)] inter_sets = np.asarray( [[len(set_a & set_t)", "/ n_clusters) # colors = y ax2.scatter(X[:, 0], X[:, 1],", "ax1.set_xticks([-0.1, 0, 0.2, 0.4, 0.6, 0.8, 1]) # Construct cluster", "fig, ax = plt.subplots(figsize=big_square) # for i in range(n_clusters): #", "# The 1st subplot is the silhouette plot # The", ") # plt.legend() figure_fp = join(figures_dir, \"Clustered {}.png\".format(title)) fig.tight_layout() try:", "silhouette coefficient can range from -1, 1 but here all", "ax1.set_title(\"The silhouette plot for the various clusters.\") ax1.set_xlabel(\"The silhouette coefficient", "y, n_clusters, figures_dir, transparent=False, cluster_names=None, title=\"\" ): \"\"\"Clustering assignments scatter", "for each cluster. for i in range(n_clusters): # Position of", "Position of each label. samples = np.atleast_2d(X[y == i, :2])", "score. # TODO: Normaliser par len(sd_set) ou len(diet_set) ? \"\"\"", "plot Notes ----- Can use mean or median to fix", "set_t in b_sets], dtype=np.int_ ) fig, ax = plt.subplots(figsize=figsize) plt.title(\"Overlap", "of individual clusters, to demarcate them clearly. ax1.set_ylim([0, len(X) +", "# We first reorder the data points according to the", "= y ax2.scatter(X[:, 0], X[:, 1], marker=\".\", s=20, lw=0, alpha=default_alpha,", "0.8, 1]) # Construct cluster # 2nd Plot showing the", "len(y_b) assert len(names_a) == n_a assert len(names_b) == n_b a_sets", "silhouette plots with their cluster numbers at the # middle", "ax = plt.subplots(figsize=big_square) # for i in range(n_clusters): # mask", "median to fix cluster centroid coordinates.\"\"\" if cluster_names is None:", "# The silhouette coefficient can range from -1, 1 but", "plt.subplots(figsize=big_square) # for i in range(n_clusters): # mask = y", "= %d\" % n_clusters), fontsize=14, fontweight=\"bold\", ) plt.savefig(figure_fp) plt.close() plt.clf()", "ou len(diet_set) ? \"\"\" if not (n_a or n_b) or", "silhouette_values, cluster_labels, silhouette_avg ): # Create a subplot with 1", "line for average silhouette score of all the values ax1.axvline(x=silhouette_avg,", "* size_cluster_i, str(k)) # Compute the new y_lower for next", "big_square, default_alpha from matplotlib import cm from ..utils.base import jaccard", "ax2.set_xlabel(\"Feature space for the 1st feature\") ax2.set_ylabel(\"Feature space for the", "overlap_jaccard( indx, y_a, y_b, names_a, names_b, n_a=None, n_b=None, figsize=None, output_dir=None,", "various clusters.\") ax1.set_xlabel(\"The silhouette coefficient values\") ax1.set_ylabel(\"Cluster label\") # The", "the yaxis labels / ticks ax1.set_xticks([-0.1, 0, 0.2, 0.4, 0.6,", "# for i in range(n_clusters): # mask = y ==", "xticklabels=names_a, yticklabels=names_b, ) plt.tight_layout() jaccard_path = join(output_dir, \"Clusters Jaccard.png\") plt.savefig(jaccard_path,", "subplot is the silhouette plot # The silhouette coefficient can", "# lie within [-0.1, 1] ax1.set_xlim([-0.1, 1]) # The (n_clusters+1)*10", "cluster numbers at the # middle ax1.text(-0.05, y_lower + 0.5", "np import seaborn as sns from koino.plot import big_square, default_alpha", "silhouette # plots of individual clusters, to demarcate them clearly." ]
[]
[ "import setting import helper db = setting.db_web # 删除聊天规则 url", "if not helper.logged(helper.PRIV_USER, 'TALKBOT'): raise web.seeother('/') render = helper.create_render() user_data", "#!/usr/bin/env python # -*- coding: utf-8 -*- # import web", "db = setting.db_web # 删除聊天规则 url = ('/plat/index_news_remove') class handler:", "class handler: def GET(self): if not helper.logged(helper.PRIV_USER, 'TALKBOT'): raise web.seeother('/')", "coding: utf-8 -*- # import web import time from bson.objectid", "handler: def GET(self): if not helper.logged(helper.PRIV_USER, 'TALKBOT'): raise web.seeother('/') render", "web.seeother('/') render = helper.create_render() user_data = web.input(news_id='') if user_data.news_id ==", "web.input(news_id='') if user_data.news_id == '': return render.info('参数错误!') db.index_news.delete_one({'_id':ObjectId(user_data.news_id)}) return render.info('成功删除!',", "python # -*- coding: utf-8 -*- # import web import", "# 删除聊天规则 url = ('/plat/index_news_remove') class handler: def GET(self): if", "def GET(self): if not helper.logged(helper.PRIV_USER, 'TALKBOT'): raise web.seeother('/') render =", "ObjectId from config import setting import helper db = setting.db_web", "import ObjectId from config import setting import helper db =", "import time from bson.objectid import ObjectId from config import setting", "helper.logged(helper.PRIV_USER, 'TALKBOT'): raise web.seeother('/') render = helper.create_render() user_data = web.input(news_id='')", "render = helper.create_render() user_data = web.input(news_id='') if user_data.news_id == '':", "-*- coding: utf-8 -*- # import web import time from", "user_data = web.input(news_id='') if user_data.news_id == '': return render.info('参数错误!') db.index_news.delete_one({'_id':ObjectId(user_data.news_id)})", "-*- # import web import time from bson.objectid import ObjectId", "url = ('/plat/index_news_remove') class handler: def GET(self): if not helper.logged(helper.PRIV_USER,", "helper.create_render() user_data = web.input(news_id='') if user_data.news_id == '': return render.info('参数错误!')", "web import time from bson.objectid import ObjectId from config import", "'TALKBOT'): raise web.seeother('/') render = helper.create_render() user_data = web.input(news_id='') if", "= helper.create_render() user_data = web.input(news_id='') if user_data.news_id == '': return", "('/plat/index_news_remove') class handler: def GET(self): if not helper.logged(helper.PRIV_USER, 'TALKBOT'): raise", "= ('/plat/index_news_remove') class handler: def GET(self): if not helper.logged(helper.PRIV_USER, 'TALKBOT'):", "helper db = setting.db_web # 删除聊天规则 url = ('/plat/index_news_remove') class", "<reponame>jack139/cnnc #!/usr/bin/env python # -*- coding: utf-8 -*- # import", "删除聊天规则 url = ('/plat/index_news_remove') class handler: def GET(self): if not", "not helper.logged(helper.PRIV_USER, 'TALKBOT'): raise web.seeother('/') render = helper.create_render() user_data =", "config import setting import helper db = setting.db_web # 删除聊天规则", "import helper db = setting.db_web # 删除聊天规则 url = ('/plat/index_news_remove')", "if user_data.news_id == '': return render.info('参数错误!') db.index_news.delete_one({'_id':ObjectId(user_data.news_id)}) return render.info('成功删除!', '/plat/index_news')", "GET(self): if not helper.logged(helper.PRIV_USER, 'TALKBOT'): raise web.seeother('/') render = helper.create_render()", "# import web import time from bson.objectid import ObjectId from", "import web import time from bson.objectid import ObjectId from config", "from bson.objectid import ObjectId from config import setting import helper", "setting.db_web # 删除聊天规则 url = ('/plat/index_news_remove') class handler: def GET(self):", "setting import helper db = setting.db_web # 删除聊天规则 url =", "= web.input(news_id='') if user_data.news_id == '': return render.info('参数错误!') db.index_news.delete_one({'_id':ObjectId(user_data.news_id)}) return", "raise web.seeother('/') render = helper.create_render() user_data = web.input(news_id='') if user_data.news_id", "time from bson.objectid import ObjectId from config import setting import", "utf-8 -*- # import web import time from bson.objectid import", "bson.objectid import ObjectId from config import setting import helper db", "# -*- coding: utf-8 -*- # import web import time", "from config import setting import helper db = setting.db_web #", "= setting.db_web # 删除聊天规则 url = ('/plat/index_news_remove') class handler: def" ]
[ "DataFrame or list or string The schema definition ''' if", "List of offline model objects Returns ------- :class:`ScoreWindow` ''' window_type", "= [] labels.append('id*:int64') for name, dtype in zip(variables['Name'], variables['Type']): if", "objects offline_models : list-of-OfflineModels List of offline model objects Returns", "import six from .base import BaseWindow, attribute from .features import", "from .utils import get_args, ensure_element class ScoreWindow(BaseWindow, SchemaFeature, ModelsFeature, ConnectorsFeature):", "disables it. description : string, optional Description of the window", "''' labels = [] labels.append('id*:int64') for name, dtype in zip(variables['Name'],", "labels = [] labels.append('id*:int64') for name, dtype in zip(variables['Name'], variables['Type']):", "for the window. When the project-level value of pubsub is", "output_variables_input : DataFrame or list or string The schema definition", "of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "Extract schema information from DataFrame Parameters ---------- variables : DataFrame", "optional The schema of the window pubsub : bool, optional", "''' window_type = 'score' def __init__(self, name=None, schema=None, pubsub=None, description=None,", "division, absolute_import, unicode_literals import os import pandas as pd import", "def __init__(self, name=None, schema=None, pubsub=None, description=None, copyvars=None): BaseWindow.__init__(self, **get_args(locals())) #", "---------- online_models : list-of-OnlineModels List of online model objects offline_models", "schema=None, pubsub=None, description=None, copyvars=None): BaseWindow.__init__(self, **get_args(locals())) # Set the online", "''' if isinstance(output_variables_input, six.string_types): if os.path.isfile(output_variables_input): output_variables_input = pd.read_csv(output_variables_input) else:", "use this file except in compliance with the License. #", "publishing and subscribing for the window and false disables it.", "the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "import pandas as pd import six from .base import BaseWindow,", "License. # You may obtain a copy of the License", "window. When the project-level value of pubsub is manual, true", "Returns ------- list ''' labels = [] labels.append('id*:int64') for name,", "under the License is distributed on an \"AS IS\" BASIS,", "The name of the window schema : Schema, optional The", "License for the specific language governing permissions and # limitations", "':double') elif dtype == 'Char': labels.append(name + ':string') return labels", "zip(variables['Name'], variables['Type']): if dtype == 'Num': labels.append(name + ':double') elif", "pandas as pd import six from .base import BaseWindow, attribute", "# Licensed under the Apache License, Version 2.0 (the License);", "When the project-level value of pubsub is manual, true enables", "dtype == 'Num': labels.append(name + ':double') elif dtype == 'Char':", "and # limitations under the License. # from __future__ import", "manual, true enables publishing and subscribing for the window and", "labels.append(name + ':double') elif dtype == 'Char': labels.append(name + ':string')", "unicode_literals import os import pandas as pd import six from", "project-level value of pubsub is manual, true enables publishing and", "in compliance with the License. # You may obtain a", "output_variables_input): ''' Import a schema from the astore CAS action", "software # distributed under the License is distributed on an", "and subscribing for the window and false disables it. description", "(the License); # you may not use this file except", "and false disables it. description : string, optional Description of", "Schema, optional The schema of the window pubsub : bool,", "online model for subclasses if type(self).__name__ != 'ScoreWindow': self.add_online_model(type(self).__name__) def", "!= 'ScoreWindow': self.add_online_model(type(self).__name__) def _create_schema_list(self, variables): ''' Extract schema information", "ConnectorsFeature): ''' Score window Parameters ---------- name : string, optional", "optional The name of the window schema : Schema, optional", "list-of-OnlineModels List of online model objects offline_models : list-of-OfflineModels List", "name : string, optional The name of the window schema", "the window Attributes ---------- online_models : list-of-OnlineModels List of online", "Import a schema from the astore CAS action output format", "DataFrame The DataFrame containing schema information Returns ------- list '''", "string The schema definition ''' if isinstance(output_variables_input, six.string_types): if os.path.isfile(output_variables_input):", "BaseWindow, attribute from .features import SchemaFeature, ModelsFeature, ConnectorsFeature from .utils", "Licensed under the Apache License, Version 2.0 (the License); #", "variables : DataFrame The DataFrame containing schema information Returns -------", "OF ANY KIND, either express or implied. # See the", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "License. # from __future__ import print_function, division, absolute_import, unicode_literals import", "value of pubsub is manual, true enables publishing and subscribing", "ANY KIND, either express or implied. # See the License", "See the License for the specific language governing permissions and", "output_variables_input = pd.read_csv(six.StringIO(output_variables_input)) if isinstance(output_variables_input, pd.DataFrame): self.schema = self._create_schema_list(output_variables_input) elif", "offline model objects Returns ------- :class:`ScoreWindow` ''' window_type = 'score'", "the License. # You may obtain a copy of the", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "for the specific language governing permissions and # limitations under", "to in writing, software # distributed under the License is", "Parameters ---------- output_variables_input : DataFrame or list or string The", "# encoding: utf-8 # # Copyright SAS Institute # #", "= pd.read_csv(six.StringIO(output_variables_input)) if isinstance(output_variables_input, pd.DataFrame): self.schema = self._create_schema_list(output_variables_input) elif isinstance(output_variables_input,", "if dtype == 'Num': labels.append(name + ':double') elif dtype ==", "# See the License for the specific language governing permissions", "SchemaFeature, ModelsFeature, ConnectorsFeature): ''' Score window Parameters ---------- name :", "import print_function, division, absolute_import, unicode_literals import os import pandas as", "language governing permissions and # limitations under the License. #", "or agreed to in writing, software # distributed under the", "required by applicable law or agreed to in writing, software", "self.schema = self._create_schema_list(output_variables_input) elif isinstance(output_variables_input, (tuple, list)): self.schema = list(output_variables_input)", "BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "isinstance(output_variables_input, pd.DataFrame): self.schema = self._create_schema_list(output_variables_input) elif isinstance(output_variables_input, (tuple, list)): self.schema", "with the License. # You may obtain a copy of", "variables['Type']): if dtype == 'Num': labels.append(name + ':double') elif dtype", "ScoreWindow(BaseWindow, SchemaFeature, ModelsFeature, ConnectorsFeature): ''' Score window Parameters ---------- name", "six.string_types): if os.path.isfile(output_variables_input): output_variables_input = pd.read_csv(output_variables_input) else: output_variables_input = pd.read_csv(six.StringIO(output_variables_input))", "#!/usr/bin/env python # encoding: utf-8 # # Copyright SAS Institute", "_create_schema_list(self, variables): ''' Extract schema information from DataFrame Parameters ----------", "objects Returns ------- :class:`ScoreWindow` ''' window_type = 'score' def __init__(self,", "compliance with the License. # You may obtain a copy", "agreed to in writing, software # distributed under the License", ": Schema, optional The schema of the window pubsub :", "list ''' labels = [] labels.append('id*:int64') for name, dtype in", "distributed under the License is distributed on an \"AS IS\"", "the Apache License, Version 2.0 (the License); # you may", "== 'Char': labels.append(name + ':string') return labels def import_schema_from_astore_output(self, output_variables_input):", "express or implied. # See the License for the specific", "except in compliance with the License. # You may obtain", "ModelsFeature, ConnectorsFeature): ''' Score window Parameters ---------- name : string,", "window schema : Schema, optional The schema of the window", "not use this file except in compliance with the License.", "permissions and # limitations under the License. # from __future__", "schema information from DataFrame Parameters ---------- variables : DataFrame The", "writing, software # distributed under the License is distributed on", "you may not use this file except in compliance with", "labels.append(name + ':string') return labels def import_schema_from_astore_output(self, output_variables_input): ''' Import", "online model objects offline_models : list-of-OfflineModels List of offline model", "[] labels.append('id*:int64') for name, dtype in zip(variables['Name'], variables['Type']): if dtype", "class ScoreWindow(BaseWindow, SchemaFeature, ModelsFeature, ConnectorsFeature): ''' Score window Parameters ----------", "Version 2.0 (the License); # you may not use this", "'score' def __init__(self, name=None, schema=None, pubsub=None, description=None, copyvars=None): BaseWindow.__init__(self, **get_args(locals()))", "optional Description of the window Attributes ---------- online_models : list-of-OnlineModels", "schema : Schema, optional The schema of the window pubsub", "string, optional The name of the window schema : Schema,", "Score window Parameters ---------- name : string, optional The name", "CONDITIONS OF ANY KIND, either express or implied. # See", "from .features import SchemaFeature, ModelsFeature, ConnectorsFeature from .utils import get_args,", "is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES", "online_models : list-of-OnlineModels List of online model objects offline_models :", "ModelsFeature, ConnectorsFeature from .utils import get_args, ensure_element class ScoreWindow(BaseWindow, SchemaFeature,", "from .base import BaseWindow, attribute from .features import SchemaFeature, ModelsFeature,", "License); # you may not use this file except in", "the project-level value of pubsub is manual, true enables publishing", "six from .base import BaseWindow, attribute from .features import SchemaFeature,", "'Num': labels.append(name + ':double') elif dtype == 'Char': labels.append(name +", "ensure_element class ScoreWindow(BaseWindow, SchemaFeature, ModelsFeature, ConnectorsFeature): ''' Score window Parameters", "**get_args(locals())) # Set the online model for subclasses if type(self).__name__", "The schema of the window pubsub : bool, optional Publish/subscribe", "2.0 (the License); # you may not use this file", "OR CONDITIONS OF ANY KIND, either express or implied. #", "get_args, ensure_element class ScoreWindow(BaseWindow, SchemaFeature, ModelsFeature, ConnectorsFeature): ''' Score window", "---------- name : string, optional The name of the window", "import_schema_from_astore_output(self, output_variables_input): ''' Import a schema from the astore CAS", "# Set the online model for subclasses if type(self).__name__ !=", "Copyright SAS Institute # # Licensed under the Apache License,", "name=None, schema=None, pubsub=None, description=None, copyvars=None): BaseWindow.__init__(self, **get_args(locals())) # Set the", "the License is distributed on an \"AS IS\" BASIS, #", "Parameters ---------- name : string, optional The name of the", "containing schema information Returns ------- list ''' labels = []", "DataFrame containing schema information Returns ------- list ''' labels =", "if isinstance(output_variables_input, pd.DataFrame): self.schema = self._create_schema_list(output_variables_input) elif isinstance(output_variables_input, (tuple, list)):", "'ScoreWindow': self.add_online_model(type(self).__name__) def _create_schema_list(self, variables): ''' Extract schema information from", "Description of the window Attributes ---------- online_models : list-of-OnlineModels List", "os import pandas as pd import six from .base import", "absolute_import, unicode_literals import os import pandas as pd import six", "of pubsub is manual, true enables publishing and subscribing for", "law or agreed to in writing, software # distributed under", "the window pubsub : bool, optional Publish/subscribe mode for the", "SAS Institute # # Licensed under the Apache License, Version", "action output format Parameters ---------- output_variables_input : DataFrame or list", "ConnectorsFeature from .utils import get_args, ensure_element class ScoreWindow(BaseWindow, SchemaFeature, ModelsFeature,", ": list-of-OfflineModels List of offline model objects Returns ------- :class:`ScoreWindow`", "import SchemaFeature, ModelsFeature, ConnectorsFeature from .utils import get_args, ensure_element class", "import BaseWindow, attribute from .features import SchemaFeature, ModelsFeature, ConnectorsFeature from", "window and false disables it. description : string, optional Description", ".base import BaseWindow, attribute from .features import SchemaFeature, ModelsFeature, ConnectorsFeature", "may obtain a copy of the License at # #", "Attributes ---------- online_models : list-of-OnlineModels List of online model objects", "''' Import a schema from the astore CAS action output", "schema information Returns ------- list ''' labels = [] labels.append('id*:int64')", "IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "window_type = 'score' def __init__(self, name=None, schema=None, pubsub=None, description=None, copyvars=None):", ": bool, optional Publish/subscribe mode for the window. When the", "may not use this file except in compliance with the", "the window schema : Schema, optional The schema of the", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "this file except in compliance with the License. # You", "import os import pandas as pd import six from .base", "labels def import_schema_from_astore_output(self, output_variables_input): ''' Import a schema from the", "else: output_variables_input = pd.read_csv(six.StringIO(output_variables_input)) if isinstance(output_variables_input, pd.DataFrame): self.schema = self._create_schema_list(output_variables_input)", "if type(self).__name__ != 'ScoreWindow': self.add_online_model(type(self).__name__) def _create_schema_list(self, variables): ''' Extract", "window pubsub : bool, optional Publish/subscribe mode for the window.", "optional Publish/subscribe mode for the window. When the project-level value", "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "or string The schema definition ''' if isinstance(output_variables_input, six.string_types): if", "# # Licensed under the Apache License, Version 2.0 (the", "description=None, copyvars=None): BaseWindow.__init__(self, **get_args(locals())) # Set the online model for", "string, optional Description of the window Attributes ---------- online_models :", "file except in compliance with the License. # You may", "on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS", "# # Copyright SAS Institute # # Licensed under the", "mode for the window. When the project-level value of pubsub", "model objects offline_models : list-of-OfflineModels List of offline model objects", "or list or string The schema definition ''' if isinstance(output_variables_input,", "as pd import six from .base import BaseWindow, attribute from", "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "# from __future__ import print_function, division, absolute_import, unicode_literals import os", "os.path.isfile(output_variables_input): output_variables_input = pd.read_csv(output_variables_input) else: output_variables_input = pd.read_csv(six.StringIO(output_variables_input)) if isinstance(output_variables_input,", ".features import SchemaFeature, ModelsFeature, ConnectorsFeature from .utils import get_args, ensure_element", "Parameters ---------- variables : DataFrame The DataFrame containing schema information", "from __future__ import print_function, division, absolute_import, unicode_literals import os import", "print_function, division, absolute_import, unicode_literals import os import pandas as pd", "list-of-OfflineModels List of offline model objects Returns ------- :class:`ScoreWindow` '''", "'Char': labels.append(name + ':string') return labels def import_schema_from_astore_output(self, output_variables_input): '''", "pubsub is manual, true enables publishing and subscribing for the", "dtype in zip(variables['Name'], variables['Type']): if dtype == 'Num': labels.append(name +", "+ ':string') return labels def import_schema_from_astore_output(self, output_variables_input): ''' Import a", "under the Apache License, Version 2.0 (the License); # you", "the online model for subclasses if type(self).__name__ != 'ScoreWindow': self.add_online_model(type(self).__name__)", "subclasses if type(self).__name__ != 'ScoreWindow': self.add_online_model(type(self).__name__) def _create_schema_list(self, variables): '''", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "pubsub=None, description=None, copyvars=None): BaseWindow.__init__(self, **get_args(locals())) # Set the online model", "__future__ import print_function, division, absolute_import, unicode_literals import os import pandas", "variables): ''' Extract schema information from DataFrame Parameters ---------- variables", "List of online model objects offline_models : list-of-OfflineModels List of", "or implied. # See the License for the specific language", "KIND, either express or implied. # See the License for", "specific language governing permissions and # limitations under the License.", "encoding: utf-8 # # Copyright SAS Institute # # Licensed", "SchemaFeature, ModelsFeature, ConnectorsFeature from .utils import get_args, ensure_element class ScoreWindow(BaseWindow,", "+ ':double') elif dtype == 'Char': labels.append(name + ':string') return", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "bool, optional Publish/subscribe mode for the window. When the project-level", "pd.read_csv(output_variables_input) else: output_variables_input = pd.read_csv(six.StringIO(output_variables_input)) if isinstance(output_variables_input, pd.DataFrame): self.schema =", "self.add_online_model(type(self).__name__) def _create_schema_list(self, variables): ''' Extract schema information from DataFrame", "# limitations under the License. # from __future__ import print_function,", "model objects Returns ------- :class:`ScoreWindow` ''' window_type = 'score' def", "information from DataFrame Parameters ---------- variables : DataFrame The DataFrame", "= pd.read_csv(output_variables_input) else: output_variables_input = pd.read_csv(six.StringIO(output_variables_input)) if isinstance(output_variables_input, pd.DataFrame): self.schema", "pd.DataFrame): self.schema = self._create_schema_list(output_variables_input) elif isinstance(output_variables_input, (tuple, list)): self.schema =", "dtype == 'Char': labels.append(name + ':string') return labels def import_schema_from_astore_output(self,", "# you may not use this file except in compliance", ": DataFrame or list or string The schema definition '''", "def _create_schema_list(self, variables): ''' Extract schema information from DataFrame Parameters", "__init__(self, name=None, schema=None, pubsub=None, description=None, copyvars=None): BaseWindow.__init__(self, **get_args(locals())) # Set", "false disables it. description : string, optional Description of the", "# # Unless required by applicable law or agreed to", "model for subclasses if type(self).__name__ != 'ScoreWindow': self.add_online_model(type(self).__name__) def _create_schema_list(self,", "''' Score window Parameters ---------- name : string, optional The", "output format Parameters ---------- output_variables_input : DataFrame or list or", "obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0", "= 'score' def __init__(self, name=None, schema=None, pubsub=None, description=None, copyvars=None): BaseWindow.__init__(self,", "true enables publishing and subscribing for the window and false", "information Returns ------- list ''' labels = [] labels.append('id*:int64') for", "Set the online model for subclasses if type(self).__name__ != 'ScoreWindow':", "License, Version 2.0 (the License); # you may not use", "The schema definition ''' if isinstance(output_variables_input, six.string_types): if os.path.isfile(output_variables_input): output_variables_input", "astore CAS action output format Parameters ---------- output_variables_input : DataFrame", ": list-of-OnlineModels List of online model objects offline_models : list-of-OfflineModels", "if os.path.isfile(output_variables_input): output_variables_input = pd.read_csv(output_variables_input) else: output_variables_input = pd.read_csv(six.StringIO(output_variables_input)) if", "the window. When the project-level value of pubsub is manual,", "implied. # See the License for the specific language governing", "''' Extract schema information from DataFrame Parameters ---------- variables :", "output_variables_input = pd.read_csv(output_variables_input) else: output_variables_input = pd.read_csv(six.StringIO(output_variables_input)) if isinstance(output_variables_input, pd.DataFrame):", "pubsub : bool, optional Publish/subscribe mode for the window. When", "by applicable law or agreed to in writing, software #", "from DataFrame Parameters ---------- variables : DataFrame The DataFrame containing", "Publish/subscribe mode for the window. When the project-level value of", "utf-8 # # Copyright SAS Institute # # Licensed under", "in zip(variables['Name'], variables['Type']): if dtype == 'Num': labels.append(name + ':double')", "Returns ------- :class:`ScoreWindow` ''' window_type = 'score' def __init__(self, name=None,", "window Attributes ---------- online_models : list-of-OnlineModels List of online model", "------- list ''' labels = [] labels.append('id*:int64') for name, dtype", "type(self).__name__ != 'ScoreWindow': self.add_online_model(type(self).__name__) def _create_schema_list(self, variables): ''' Extract schema", "for subclasses if type(self).__name__ != 'ScoreWindow': self.add_online_model(type(self).__name__) def _create_schema_list(self, variables):", "python # encoding: utf-8 # # Copyright SAS Institute #", "of the window Attributes ---------- online_models : list-of-OnlineModels List of", "of online model objects offline_models : list-of-OfflineModels List of offline", "an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF", "Unless required by applicable law or agreed to in writing,", "the astore CAS action output format Parameters ---------- output_variables_input :", "DataFrame Parameters ---------- variables : DataFrame The DataFrame containing schema", "the specific language governing permissions and # limitations under the", ": DataFrame The DataFrame containing schema information Returns ------- list", "== 'Num': labels.append(name + ':double') elif dtype == 'Char': labels.append(name", "applicable law or agreed to in writing, software # distributed", "list or string The schema definition ''' if isinstance(output_variables_input, six.string_types):", ": string, optional The name of the window schema :", "':string') return labels def import_schema_from_astore_output(self, output_variables_input): ''' Import a schema", "schema definition ''' if isinstance(output_variables_input, six.string_types): if os.path.isfile(output_variables_input): output_variables_input =", "labels.append('id*:int64') for name, dtype in zip(variables['Name'], variables['Type']): if dtype ==", "it. description : string, optional Description of the window Attributes", "is manual, true enables publishing and subscribing for the window", "in writing, software # distributed under the License is distributed", "of the window pubsub : bool, optional Publish/subscribe mode for", "------- :class:`ScoreWindow` ''' window_type = 'score' def __init__(self, name=None, schema=None,", "The DataFrame containing schema information Returns ------- list ''' labels", "of the window schema : Schema, optional The schema of", "---------- output_variables_input : DataFrame or list or string The schema", "CAS action output format Parameters ---------- output_variables_input : DataFrame or", "from the astore CAS action output format Parameters ---------- output_variables_input", "limitations under the License. # from __future__ import print_function, division,", "definition ''' if isinstance(output_variables_input, six.string_types): if os.path.isfile(output_variables_input): output_variables_input = pd.read_csv(output_variables_input)", "License is distributed on an \"AS IS\" BASIS, # WITHOUT", ": string, optional Description of the window Attributes ---------- online_models", "the window and false disables it. description : string, optional", "# You may obtain a copy of the License at", "window Parameters ---------- name : string, optional The name of", "elif dtype == 'Char': labels.append(name + ':string') return labels def", "copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #", "under the License. # from __future__ import print_function, division, absolute_import,", "return labels def import_schema_from_astore_output(self, output_variables_input): ''' Import a schema from", "the License for the specific language governing permissions and #", "either express or implied. # See the License for the", "the License. # from __future__ import print_function, division, absolute_import, unicode_literals", "BaseWindow.__init__(self, **get_args(locals())) # Set the online model for subclasses if", "isinstance(output_variables_input, six.string_types): if os.path.isfile(output_variables_input): output_variables_input = pd.read_csv(output_variables_input) else: output_variables_input =", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "schema of the window pubsub : bool, optional Publish/subscribe mode", "import get_args, ensure_element class ScoreWindow(BaseWindow, SchemaFeature, ModelsFeature, ConnectorsFeature): ''' Score", "governing permissions and # limitations under the License. # from", "<filename>esppy/windows/score.py<gh_stars>0 #!/usr/bin/env python # encoding: utf-8 # # Copyright SAS", ".utils import get_args, ensure_element class ScoreWindow(BaseWindow, SchemaFeature, ModelsFeature, ConnectorsFeature): '''", "name of the window schema : Schema, optional The schema", "for name, dtype in zip(variables['Name'], variables['Type']): if dtype == 'Num':", "a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #", "enables publishing and subscribing for the window and false disables", "if isinstance(output_variables_input, six.string_types): if os.path.isfile(output_variables_input): output_variables_input = pd.read_csv(output_variables_input) else: output_variables_input", "format Parameters ---------- output_variables_input : DataFrame or list or string", "def import_schema_from_astore_output(self, output_variables_input): ''' Import a schema from the astore", "Apache License, Version 2.0 (the License); # you may not", "copyvars=None): BaseWindow.__init__(self, **get_args(locals())) # Set the online model for subclasses", "offline_models : list-of-OfflineModels List of offline model objects Returns -------", "Institute # # Licensed under the Apache License, Version 2.0", ":class:`ScoreWindow` ''' window_type = 'score' def __init__(self, name=None, schema=None, pubsub=None,", "distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR", "for the window and false disables it. description : string,", "pd.read_csv(six.StringIO(output_variables_input)) if isinstance(output_variables_input, pd.DataFrame): self.schema = self._create_schema_list(output_variables_input) elif isinstance(output_variables_input, (tuple,", "# distributed under the License is distributed on an \"AS", "subscribing for the window and false disables it. description :", "# Unless required by applicable law or agreed to in", "schema from the astore CAS action output format Parameters ----------", "\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY", "of offline model objects Returns ------- :class:`ScoreWindow` ''' window_type =", "a schema from the astore CAS action output format Parameters", "You may obtain a copy of the License at #", "description : string, optional Description of the window Attributes ----------", "pd import six from .base import BaseWindow, attribute from .features", "---------- variables : DataFrame The DataFrame containing schema information Returns", "attribute from .features import SchemaFeature, ModelsFeature, ConnectorsFeature from .utils import", "# Copyright SAS Institute # # Licensed under the Apache", "name, dtype in zip(variables['Name'], variables['Type']): if dtype == 'Num': labels.append(name" ]
[ "time has exceeded.') wait_regex = re.search(r'\\d+', res.json()['message']) if wait_regex: wait_amount", "%s' % (res.status_code, res.reason)) return None return res.json() def html_description_to_human_readable(breach_description):", "if context_type == 'email': context_dict['Address'] = context_main_value else: context_dict['Name'] =", "2 if demisto.params().get('default_dbot_score_email') == 'SUSPICIOUS' else 3 DEFAULT_DBOT_SCORE_DOMAIN = 2", "'ID': paste_breach['Id'], 'Date': '', 'Amount of emails in paste': str(paste_breach['EmailCount'])", "command = demisto.command() LOG('Command being called is: {}'.format(command)) try: handle_proxy()", "'Verified' if breach['IsVerified'] else 'Unverified' md += '#### ' +", "pastes_list.append(paste_entry) md += tableToMarkdown('The email address was found in the", "tableToMarkdown('The email address was found in the following \"Pastes\":', pastes_list,", "create_context_entry('domain', domain, comp_sites, [], DEFAULT_DBOT_SCORE_DOMAIN) comp_domain[outputPaths['domain']] = domain_context comp_domain['DBotScore'] =", "RETRIES_END_TIME: return_error('Max retry time has exceeded.') wait_regex = re.search(r'\\d+', res.json()['message'])", "- in this case the domain list is needed :return:", "{ 'Indicator': indicator_value, 'Type': indicator_type, 'Vendor': VENDOR, 'Score': dbot_score }", "'Source': paste_breach['Source'], 'Title': paste_breach['Title'], 'ID': paste_breach['Id'], 'Date': '', 'Amount of", "= create_dbot_score_dictionary(email, 'email', dbot_score) return comp_email def domain_to_entry_context(domain, api_res): comp_sites", "ec_list, api_res_list def pwned_domain(domain_list): \"\"\" Executing the http request :param", "records_found: md += 'No records found' return md def create_dbot_score_dictionary(indicator_value,", "RETRIES_END_TIME = datetime.min ''' HELPER FUNCTIONS ''' def http_request(method, url_suffix,", "args_dict: the demisto argument - in this case the email", "dict() # dict if context_type == 'email': context_dict['Address'] = context_main_value", "email in email_list: email_suffix = SUFFIXES.get(\"email\") + email + SUFFIXES.get(\"email_truncate_verified\")", "0: dbot_score = DEFAULT_DBOT_SCORE_EMAIL email_context = create_context_entry('email', email, comp_sites, comp_pastes,", "not res.status_code == 200: if not res.status_code == 401: demisto.error(", "[], api_paste_res or [])) return md_list, ec_list, api_email_res_list def pwned_email(email_list):", "for the http requests :return: an array of http requests", "'domain': pwned_domain_command, 'pwned-domain': pwned_domain_command, 'pwned-username': pwned_username_command } if command in", "md += html_description_to_human_readable(breach['Description']) + '\\n' md += 'Data breached: **'", "outputs \"\"\" http_request('GET', SUFFIXES.get(\"username\", '') + 'test') return ['ok'], [None],", "api_res in zip(username_list, api_res_list): md_list.append(data_to_markdown('Username', username, api_res)) ec_list.append(domain_to_entry_context(username, api_res or", "*' + query_arg + '*\\n' if api_res: records_found = True", "'Accept': 'application/json' } DEFAULT_DBOT_SCORE_EMAIL = 2 if demisto.params().get('default_dbot_score_email') == 'SUSPICIOUS'", "Pwned Integration [%d]. Full text: %s' % (res.status_code, res.text)) return_error('Error", "+= 'Date: **' + breach['BreachDate'] + '**\\n\\n' md += html_description_to_human_readable(breach['Description'])", "'### Have I Been Pwned query for ' + query_type.lower()", "= sorted(comp_sites) comp_domain = dict() # type: dict dbot_score =", "MAX_RETRY_ALLOWED = demisto.params().get('max_retry_time', -1) API_KEY = demisto.params().get('api_key') USE_SSL = not", "def pwned_email_command(args_dict): \"\"\" Executing the pwned request for emails list,", "for md, ec, api_paste_res in zip(md_list, ec_list, api_email_res_list): return_outputs(md, ec,", "'Vendor': VENDOR, 'Description': 'The ' + malicious_type + ' has", "api_res or [])) return md_list, ec_list, api_res_list def pwned_username(username_list): \"\"\"", "IMPORTS ''' import re import requests # Disable insecure warnings", "for the http requests :return: 2 arrays of http requests", "http requests :return: 2 arrays of http requests outputs \"\"\"", "in this case the domain list is needed :return: 3", "global RETRIES_END_TIME if MAX_RETRY_ALLOWED != -1: RETRIES_END_TIME = datetime.now() +", "3 lists of outputs :param args_dict: the demisto argument -", "= argToList(args_dict.get('email', '')) api_email_res_list, api_paste_res_list = pwned_email(email_list) md_list = []", "' + str(breach['PwnCount']) + \\ ' records breached [' +", "paste_breach['Id'], 'Date': '', 'Amount of emails in paste': str(paste_breach['EmailCount']) }", "retry time has exceeded.') wait_regex = re.search(r'\\d+', res.json()['message']) if wait_regex:", "# type: dict comp_sites = sorted([item['Title'] for item in api_email_res])", "of http requests outputs \"\"\" api_email_res_list = [] api_paste_res_list =", "to Pwned Integration [%d]. Full text: %s' % (res.status_code, res.text))", "'SUSPICIOUS' else 3 SUFFIXES = { \"email\": '/breachedaccount/', \"domain\": '/breaches?domain=',", "(res.status_code, res.reason)) return None return res.json() def html_description_to_human_readable(breach_description): \"\"\" Converting", "''' HELPER FUNCTIONS ''' def http_request(method, url_suffix, params=None, data=None): while", "dict() # type: dict comp_sites = sorted([item['Title'] for item in", "limit response code break if datetime.now() > RETRIES_END_TIME: return_error('Max retry", "break if datetime.now() > RETRIES_END_TIME: return_error('Max retry time has exceeded.')", "= 0 comp_email = dict() # type: dict comp_sites =", "zip(domain_list, api_res_list): md_list.append(data_to_markdown('Domain', domain, api_res)) ec_list.append(domain_to_entry_context(domain, api_res or [])) return", "http requests outputs \"\"\" api_res_list = [] for username in", "called is: {}'.format(command)) try: handle_proxy() set_retry_end_time() commands = { 'test-module':", "' breach]\\n' md += 'Date: **' + breach['BreachDate'] + '**\\n\\n'", "= link[2] link_from_desc = '[' + html_readable_name + ']' +", "api_email_res_list = commands[command](demisto.args()) for md, ec, api_paste_res in zip(md_list, ec_list,", "+ verified_breach + ' breach]\\n' md += 'Date: **' +", "for email in email_list: email_suffix = SUFFIXES.get(\"email\") + email +", "an array of http requests outputs \"\"\" api_res_list = []", "= html_link_pattern.findall(breach_description) for link in patterns_found: html_actual_address = link[0] html_readable_name", "pwned_username(username_list) md_list = [] ec_list = [] for username, api_res", "pwned_email_command, 'pwned-email': pwned_email_command, 'domain': pwned_domain_command, 'pwned-domain': pwned_domain_command, 'pwned-username': pwned_username_command }", "\"Pastes\":', pastes_list, ['ID', 'Title', 'Date', 'Source', 'Amount of emails in", "# Rate limit response code break if datetime.now() > RETRIES_END_TIME:", "found' return md def create_dbot_score_dictionary(indicator_value, indicator_type, dbot_score): return { 'Indicator':", "= 'https://haveibeenpwned.com/api/v3' HEADERS = { 'hibp-api-key': API_KEY, 'user-agent': 'DBOT-API', 'Content-Type':", "[] for paste_breach in api_paste_res: paste_entry = \\ { 'Source':", "zip(email_list, api_email_res_list, api_paste_res_list): md_list.append(data_to_markdown('Email', email, api_email_res, api_paste_res)) ec_list.append(email_to_entry_context(email, api_email_res or", "= [] for username in username_list: suffix = SUFFIXES.get(\"username\") +", "paste_breach in api_paste_res: paste_entry = \\ { 'Source': paste_breach['Source'], 'Title':", "url_suffix, params=None, data=None): while True: res = requests.request( method, BASE_URL", "data_to_markdown(query_type, query_arg, api_res, api_paste_res=None): records_found = False md = '###", "'No records found' return md def create_dbot_score_dictionary(indicator_value, indicator_type, dbot_score): return", "email_list: the email list that needed for the http requests", "api_paste_res_list.append(http_request('GET', url_suffix=paste_suffix)) return api_email_res_list, api_paste_res_list def pwned_domain_command(args_dict): \"\"\" Executing the", "pwned_domain_command, 'pwned-username': pwned_username_command } if command in commands: md_list, ec_list,", "0 comp_email = dict() # type: dict comp_sites = sorted([item['Title']", "GLOBALS/PARAMS ''' VENDOR = 'Have I Been Pwned? V2' MAX_RETRY_ALLOWED", "HELPER FUNCTIONS ''' def http_request(method, url_suffix, params=None, data=None): while True:", "domains list that needed for the http requests :return: an", "html description to hr :param breach_description: Description of breach from", "comp_domain def set_retry_end_time(): global RETRIES_END_TIME if MAX_RETRY_ALLOWED != -1: RETRIES_END_TIME", "[] ec_list = [] for domain, api_res in zip(domain_list, api_res_list):", "return md def create_dbot_score_dictionary(indicator_value, indicator_type, dbot_score): return { 'Indicator': indicator_value,", "SUFFIXES.get(\"paste\") + email api_email_res_list.append(http_request('GET', url_suffix=email_suffix)) api_paste_res_list.append(http_request('GET', url_suffix=paste_suffix)) return api_email_res_list, api_paste_res_list", "demisto.params().get('max_retry_time', -1) API_KEY = demisto.params().get('api_key') USE_SSL = not demisto.params().get('insecure', False)", "warnings requests.packages.urllib3.disable_warnings() ''' GLOBALS/PARAMS ''' VENDOR = 'Have I Been", "def pwned_email(email_list): \"\"\" Executing the http requests :param email_list: the", "'test-module': test_module, 'email': pwned_email_command, 'pwned-email': pwned_email_command, 'domain': pwned_domain_command, 'pwned-domain': pwned_domain_command,", "3 DEFAULT_DBOT_SCORE_DOMAIN = 2 if demisto.params().get('default_dbot_score_domain') == 'SUSPICIOUS' else 3", "email_to_entry_context(email, api_email_res, api_paste_res): dbot_score = 0 comp_email = dict() #", "# dict if context_type == 'email': context_dict['Address'] = context_main_value else:", "Description string that altered HTML urls to clickable urls for", "'Title', 'Date', 'Source', 'Amount of emails in paste']) if not", "= context_main_value else: context_dict['Name'] = context_main_value context_dict['Pwned-V2'] = { 'Compromised':", "\"username\": '/breachedaccount/', \"paste\": '/pasteaccount/', \"email_truncate_verified\": '?truncateResponse=false&includeUnverified=true', \"domain_truncate_verified\": '&truncateResponse=false&includeUnverified=true', \"username_truncate_verified\": '?truncateResponse=false&includeUnverified=true'", "api_email_res_list, api_paste_res_list def pwned_domain_command(args_dict): \"\"\" Executing the pwned request for", "+ query_type.lower() + ': *' + query_arg + '*\\n' if", "!= 429: # Rate limit response code break if datetime.now()", "['ID', 'Title', 'Date', 'Source', 'Amount of emails in paste']) if", "input, the function returns 3 lists of outputs :param args_dict:", "wait_regex.group() else: demisto.error('failed extracting wait time will use default (5).", "' + query_type.lower() + ': *' + query_arg + '*\\n'", "- in this case the email list is needed :return:", "'Vendor': VENDOR, 'Reporters': ', '.join(comp_sites + comp_pastes) } } if", "url_suffix=suffix)) return api_res_list command = demisto.command() LOG('Command being called is:", "suffix = SUFFIXES.get(\"domain\") + domain + SUFFIXES.get(\"domain_truncate_verified\") api_res_list.append(http_request('GET', url_suffix=suffix)) return", "api_paste_res_list): md_list.append(data_to_markdown('Email', email, api_email_res, api_paste_res)) ec_list.append(email_to_entry_context(email, api_email_res or [], api_paste_res", "pwned_email(email_list) md_list = [] ec_list = [] for email, api_email_res,", "Res body: {}'.format(res.text)) wait_amount = 5 if datetime.now() + timedelta(seconds=int(wait_amount))", "True pastes_list = [] for paste_breach in api_paste_res: paste_entry =", "username_list: the username list that needed for the http requests", "email_list: email_suffix = SUFFIXES.get(\"email\") + email + SUFFIXES.get(\"email_truncate_verified\") paste_suffix =", "link_from_desc = '[' + html_readable_name + ']' + '(' +", "records breached [' + verified_breach + ' breach]\\n' md +=", "Converting from html description to hr :param breach_description: Description of", "+ \\ ' records breached [' + verified_breach + '", "paste']) if not records_found: md += 'No records found' return", "'DBOT-API', 'Content-Type': 'application/json', 'Accept': 'application/json' } DEFAULT_DBOT_SCORE_EMAIL = 2 if", "HTML urls to clickable urls for better readability in war-room", "== 'SUSPICIOUS' else 3 DEFAULT_DBOT_SCORE_DOMAIN = 2 if demisto.params().get('default_dbot_score_domain') ==", "for item in api_email_res]) comp_pastes = sorted(set(item['Source'] for item in", "in zip(email_list, api_email_res_list, api_paste_res_list): md_list.append(data_to_markdown('Email', email, api_email_res, api_paste_res)) ec_list.append(email_to_entry_context(email, api_email_res", "= [] ec_list = [] for domain, api_res in zip(domain_list,", "Executing the http request :param username_list: the username list that", "+ timedelta(seconds=int(MAX_RETRY_ALLOWED)) ''' COMMANDS + REQUESTS FUNCTIONS ''' def test_module(args_dict):", "to Pwned Integration [%d] - %s' % (res.status_code, res.reason)) return", "sorted(comp_sites) comp_domain = dict() # type: dict dbot_score = 0", "{ 'Source': paste_breach['Source'], 'Title': paste_breach['Title'], 'ID': paste_breach['Id'], 'Date': '', 'Amount", "paste_breach['Source'], 'Title': paste_breach['Title'], 'ID': paste_breach['Id'], 'Date': '', 'Amount of emails", "= True for breach in api_res: verified_breach = 'Verified' if", "domain_to_entry_context(domain, api_res): comp_sites = [item['Title'] for item in api_res] comp_sites", "= '[' + html_readable_name + ']' + '(' + html_actual_address", "api_res_list): md_list.append(data_to_markdown('Username', username, api_res)) ec_list.append(domain_to_entry_context(username, api_res or [])) return md_list,", "return None return res.json() def html_description_to_human_readable(breach_description): \"\"\" Converting from html", "context_dict = dict() # dict if context_type == 'email': context_dict['Address']", "return md_list, ec_list, api_res_list def pwned_username(username_list): \"\"\" Executing the http", "\"\"\" api_res_list = [] for username in username_list: suffix =", "'/breaches?domain=', \"username\": '/breachedaccount/', \"paste\": '/pasteaccount/', \"email_truncate_verified\": '?truncateResponse=false&includeUnverified=true', \"domain_truncate_verified\": '&truncateResponse=false&includeUnverified=true', \"username_truncate_verified\":", "= re.search(r'\\d+', res.json()['message']) if wait_regex: wait_amount = wait_regex.group() else: demisto.error('failed", "+ ','.join(breach['DataClasses']) + '**\\n' if api_paste_res: records_found = True pastes_list", "'Reporters': ', '.join(comp_sites + comp_pastes) } } if malicious_score ==", "api_email_res, api_paste_res in zip(email_list, api_email_res_list, api_paste_res_list): md_list.append(data_to_markdown('Email', email, api_email_res, api_paste_res))", "paste_breach['Title'], 'ID': paste_breach['Id'], 'Date': '', 'Amount of emails in paste':", "re.sub(html_link_pattern, link_from_desc, breach_description, count=1) return breach_description def data_to_markdown(query_type, query_arg, api_res,", "def set_retry_end_time(): global RETRIES_END_TIME if MAX_RETRY_ALLOWED != -1: RETRIES_END_TIME =", "breach_description = re.sub(html_link_pattern, link_from_desc, breach_description, count=1) return breach_description def data_to_markdown(query_type,", "http requests outputs \"\"\" api_res_list = [] for domain in", "demisto.params().get('default_dbot_score_email') == 'SUSPICIOUS' else 3 DEFAULT_DBOT_SCORE_DOMAIN = 2 if demisto.params().get('default_dbot_score_domain')", "VENDOR, 'Reporters': ', '.join(comp_sites + comp_pastes) } } if malicious_score", "3 arrays of outputs \"\"\" domain_list = argToList(args_dict.get('domain', '')) api_res_list", "for better readability in war-room \"\"\" html_link_pattern = re.compile('<a href=\"(.+?)\"(.+?)>(.+?)</a>')", "'application/json', 'Accept': 'application/json' } DEFAULT_DBOT_SCORE_EMAIL = 2 if demisto.params().get('default_dbot_score_email') ==", "3 SUFFIXES = { \"email\": '/breachedaccount/', \"domain\": '/breaches?domain=', \"username\": '/breachedaccount/',", "pwned request for domains list, in order to support list", "LOG('Command being called is: {}'.format(command)) try: handle_proxy() set_retry_end_time() commands =", "'Indicator': indicator_value, 'Type': indicator_type, 'Vendor': VENDOR, 'Score': dbot_score } def", "username, api_res in zip(username_list, api_res_list): md_list.append(data_to_markdown('Username', username, api_res)) ec_list.append(domain_to_entry_context(username, api_res", "re import requests # Disable insecure warnings requests.packages.urllib3.disable_warnings() ''' GLOBALS/PARAMS", "for ' + query_type.lower() + ': *' + query_arg +", "of outputs \"\"\" email_list = argToList(args_dict.get('email', '')) api_email_res_list, api_paste_res_list =", "patterns_found: html_actual_address = link[0] html_readable_name = link[2] link_from_desc = '['", "dbot_score): return { 'Indicator': indicator_value, 'Type': indicator_type, 'Vendor': VENDOR, 'Score':", "+= html_description_to_human_readable(breach['Description']) + '\\n' md += 'Data breached: **' +", "api_paste_res_list = [] for email in email_list: email_suffix = SUFFIXES.get(\"email\")", "pwned request for usernames list, in order to support list", "+ email + SUFFIXES.get(\"email_truncate_verified\") paste_suffix = SUFFIXES.get(\"paste\") + email api_email_res_list.append(http_request('GET',", "'hibp-api-key': API_KEY, 'user-agent': 'DBOT-API', 'Content-Type': 'application/json', 'Accept': 'application/json' } DEFAULT_DBOT_SCORE_EMAIL", "of outputs \"\"\" domain_list = argToList(args_dict.get('domain', '')) api_res_list = pwned_domain(domain_list)", "['ok'], [None], [None] def pwned_email_command(args_dict): \"\"\" Executing the pwned request", "V2' MAX_RETRY_ALLOWED = demisto.params().get('max_retry_time', -1) API_KEY = demisto.params().get('api_key') USE_SSL =", "commands = { 'test-module': test_module, 'email': pwned_email_command, 'pwned-email': pwned_email_command, 'domain':", "code break if datetime.now() > RETRIES_END_TIME: return_error('Max retry time has", "SUFFIXES.get(\"domain\") + domain + SUFFIXES.get(\"domain_truncate_verified\") api_res_list.append(http_request('GET', url_suffix=suffix)) return api_res_list def", "the pwned request for domains list, in order to support", "else 'Unverified' md += '#### ' + breach['Title'] + '", "'#### ' + breach['Title'] + ' (' + breach['Domain'] +", "{}'.format(res.text)) wait_amount = 5 if datetime.now() + timedelta(seconds=int(wait_amount)) > RETRIES_END_TIME:", "\"email_truncate_verified\": '?truncateResponse=false&includeUnverified=true', \"domain_truncate_verified\": '&truncateResponse=false&includeUnverified=true', \"username_truncate_verified\": '?truncateResponse=false&includeUnverified=true' } RETRIES_END_TIME = datetime.min", "request was successful the test will return OK :return: 3", "return OK :return: 3 arrays of outputs \"\"\" http_request('GET', SUFFIXES.get(\"username\",", ":param email_list: the email list that needed for the http", "\"domain_truncate_verified\": '&truncateResponse=false&includeUnverified=true', \"username_truncate_verified\": '?truncateResponse=false&includeUnverified=true' } RETRIES_END_TIME = datetime.min ''' HELPER", "If the http request was successful the test will return", "+ email api_email_res_list.append(http_request('GET', url_suffix=email_suffix)) api_paste_res_list.append(http_request('GET', url_suffix=paste_suffix)) return api_email_res_list, api_paste_res_list def", "BASE_URL = 'https://haveibeenpwned.com/api/v3' HEADERS = { 'hibp-api-key': API_KEY, 'user-agent': 'DBOT-API',", "call to Pwned Integration [%d]. Full text: %s' % (res.status_code,", "+ breach['Domain'] + '): ' + str(breach['PwnCount']) + \\ '", "= create_context_entry('domain', domain, comp_sites, [], DEFAULT_DBOT_SCORE_DOMAIN) comp_domain[outputPaths['domain']] = domain_context comp_domain['DBotScore']", "array of http requests outputs \"\"\" api_res_list = [] for", "args_dict: the demisto argument - in this case the domain", "comp_pastes) } } if malicious_score == 3: context_dict['Malicious'] = add_malicious_to_context(context_type)", "= requests.request( method, BASE_URL + url_suffix, verify=USE_SSL, params=params, data=data, headers=HEADERS", "api_res_list def pwned_domain(domain_list): \"\"\" Executing the http request :param domain_list:", "api_email_res_list.append(http_request('GET', url_suffix=email_suffix)) api_paste_res_list.append(http_request('GET', url_suffix=paste_suffix)) return api_email_res_list, api_paste_res_list def pwned_domain_command(args_dict): \"\"\"", "= paste_breach['Date'].split('T')[0] pastes_list.append(paste_entry) md += tableToMarkdown('The email address was found", "md_list = [] ec_list = [] for domain, api_res in", "api_res_list def pwned_username_command(args_dict): \"\"\" Executing the pwned request for usernames", "pwned_domain_command, 'pwned-domain': pwned_domain_command, 'pwned-username': pwned_username_command } if command in commands:", "return comp_email def domain_to_entry_context(domain, api_res): comp_sites = [item['Title'] for item", "if wait_regex: wait_amount = wait_regex.group() else: demisto.error('failed extracting wait time", "\"\"\" html_link_pattern = re.compile('<a href=\"(.+?)\"(.+?)>(.+?)</a>') patterns_found = html_link_pattern.findall(breach_description) for link", "True for breach in api_res: verified_breach = 'Verified' if breach['IsVerified']", "= DEFAULT_DBOT_SCORE_DOMAIN domain_context = create_context_entry('domain', domain, comp_sites, [], DEFAULT_DBOT_SCORE_DOMAIN) comp_domain[outputPaths['domain']]", "in email_list: email_suffix = SUFFIXES.get(\"email\") + email + SUFFIXES.get(\"email_truncate_verified\") paste_suffix", "username list that needed for the http requests :return: an", "domain_context = create_context_entry('domain', domain, comp_sites, [], DEFAULT_DBOT_SCORE_DOMAIN) comp_domain[outputPaths['domain']] = domain_context", "to support list input, the function returns 3 lists of", "case the username list is needed :return: 3 arrays of", "requests outputs \"\"\" api_res_list = [] for domain in domain_list:", "demisto.params().get('api_key') USE_SSL = not demisto.params().get('insecure', False) BASE_URL = 'https://haveibeenpwned.com/api/v3' HEADERS", "BASE_URL + url_suffix, verify=USE_SSL, params=params, data=data, headers=HEADERS ) if res.status_code", "'Score': dbot_score } def create_context_entry(context_type, context_main_value, comp_sites, comp_pastes, malicious_score): context_dict", "argument - in this case the domain list is needed", "link in patterns_found: html_actual_address = link[0] html_readable_name = link[2] link_from_desc", "create_dbot_score_dictionary(domain, 'domain', dbot_score) return comp_domain def set_retry_end_time(): global RETRIES_END_TIME if", "api_res_list command = demisto.command() LOG('Command being called is: {}'.format(command)) try:", "comp_sites, comp_pastes, DEFAULT_DBOT_SCORE_EMAIL) comp_email[outputPaths['email']] = email_context comp_email['DBotScore'] = create_dbot_score_dictionary(email, 'email',", "SUFFIXES.get(\"username\", '') + 'test') return ['ok'], [None], [None] def pwned_email_command(args_dict):", "= commands[command](demisto.args()) for md, ec, api_paste_res in zip(md_list, ec_list, api_email_res_list):", "requests.packages.urllib3.disable_warnings() ''' GLOBALS/PARAMS ''' VENDOR = 'Have I Been Pwned?", "if command in commands: md_list, ec_list, api_email_res_list = commands[command](demisto.args()) for", "email api_email_res_list.append(http_request('GET', url_suffix=email_suffix)) api_paste_res_list.append(http_request('GET', url_suffix=paste_suffix)) return api_email_res_list, api_paste_res_list def pwned_domain_command(args_dict):", "wait_regex: wait_amount = wait_regex.group() else: demisto.error('failed extracting wait time will", "Pwned? V2' MAX_RETRY_ALLOWED = demisto.params().get('max_retry_time', -1) API_KEY = demisto.params().get('api_key') USE_SSL", "api_paste_res_list def pwned_domain_command(args_dict): \"\"\" Executing the pwned request for domains", "or [])) return md_list, ec_list, api_email_res_list def pwned_email(email_list): \"\"\" Executing", "the test will return OK :return: 3 arrays of outputs", "\"email\": '/breachedaccount/', \"domain\": '/breaches?domain=', \"username\": '/breachedaccount/', \"paste\": '/pasteaccount/', \"email_truncate_verified\": '?truncateResponse=false&includeUnverified=true',", "is: {}'.format(command)) try: handle_proxy() set_retry_end_time() commands = { 'test-module': test_module,", "False md = '### Have I Been Pwned query for", "pwned_domain(domain_list) md_list = [] ec_list = [] for domain, api_res", "{ 'hibp-api-key': API_KEY, 'user-agent': 'DBOT-API', 'Content-Type': 'application/json', 'Accept': 'application/json' }", "> 0: dbot_score = DEFAULT_DBOT_SCORE_EMAIL email_context = create_context_entry('email', email, comp_sites,", "Integration [%d] - %s' % (res.status_code, res.reason)) return None return", "paste_breach['Date']: paste_entry['Date'] = paste_breach['Date'].split('T')[0] pastes_list.append(paste_entry) md += tableToMarkdown('The email address", "paste_breach['Date'].split('T')[0] pastes_list.append(paste_entry) md += tableToMarkdown('The email address was found in", "\"\"\" username_list = argToList(args_dict.get('username', '')) api_res_list = pwned_username(username_list) md_list =", "api_email_res]) comp_pastes = sorted(set(item['Source'] for item in api_paste_res)) if len(comp_sites)", "in api_paste_res)) if len(comp_sites) > 0: dbot_score = DEFAULT_DBOT_SCORE_EMAIL email_context", "breach['IsVerified'] else 'Unverified' md += '#### ' + breach['Title'] +", "SUFFIXES.get(\"domain_truncate_verified\") api_res_list.append(http_request('GET', url_suffix=suffix)) return api_res_list def pwned_username_command(args_dict): \"\"\" Executing the", "pwned request for emails list, in order to support list", "} if malicious_score == 3: context_dict['Malicious'] = add_malicious_to_context(context_type) return context_dict", "call to Pwned Integration [%d] - %s' % (res.status_code, res.reason))", "OK :return: 3 arrays of outputs \"\"\" http_request('GET', SUFFIXES.get(\"username\", '')", "of http requests outputs \"\"\" api_res_list = [] for username", "%s' % (res.status_code, res.text)) return_error('Error in API call to Pwned", "md_list.append(data_to_markdown('Domain', domain, api_res)) ec_list.append(domain_to_entry_context(domain, api_res or [])) return md_list, ec_list,", "for usernames list, in order to support list input, the", ":return: 3 arrays of outputs \"\"\" username_list = argToList(args_dict.get('username', ''))", "'): ' + str(breach['PwnCount']) + \\ ' records breached ['", "patterns_found = html_link_pattern.findall(breach_description) for link in patterns_found: html_actual_address = link[0]", "= wait_regex.group() else: demisto.error('failed extracting wait time will use default", "ec_list = [] for email, api_email_res, api_paste_res in zip(email_list, api_email_res_list,", "return api_res_list def pwned_username_command(args_dict): \"\"\" Executing the pwned request for", "or [], api_paste_res or [])) return md_list, ec_list, api_email_res_list def", "handle_proxy() set_retry_end_time() commands = { 'test-module': test_module, 'email': pwned_email_command, 'pwned-email':", "ec, api_paste_res in zip(md_list, ec_list, api_email_res_list): return_outputs(md, ec, api_paste_res) #", "dbot_score) return comp_email def domain_to_entry_context(domain, api_res): comp_sites = [item['Title'] for", "outputs \"\"\" domain_list = argToList(args_dict.get('domain', '')) api_res_list = pwned_domain(domain_list) md_list", "outputs \"\"\" api_email_res_list = [] api_paste_res_list = [] for email", "this case the domain list is needed :return: 3 arrays", "datetime.now() + timedelta(seconds=int(MAX_RETRY_ALLOWED)) ''' COMMANDS + REQUESTS FUNCTIONS ''' def", "api_res): comp_sites = [item['Title'] for item in api_res] comp_sites =", "'Have I Been Pwned? V2' MAX_RETRY_ALLOWED = demisto.params().get('max_retry_time', -1) API_KEY", "paste': str(paste_breach['EmailCount']) } if paste_breach['Date']: paste_entry['Date'] = paste_breach['Date'].split('T')[0] pastes_list.append(paste_entry) md", "'Data breached: **' + ','.join(breach['DataClasses']) + '**\\n' if api_paste_res: records_found", "malicious_score): context_dict = dict() # dict if context_type == 'email':", "[%d]. Full text: %s' % (res.status_code, res.text)) return_error('Error in API", "= sorted(set(item['Source'] for item in api_paste_res)) if len(comp_sites) > 0:", "argToList(args_dict.get('username', '')) api_res_list = pwned_username(username_list) md_list = [] ec_list =", "[item['Title'] for item in api_res] comp_sites = sorted(comp_sites) comp_domain =", "= { \"email\": '/breachedaccount/', \"domain\": '/breaches?domain=', \"username\": '/breachedaccount/', \"paste\": '/pasteaccount/',", "that needed for the http requests :return: 2 arrays of", "list that needed for the http requests :return: 2 arrays", "test_module, 'email': pwned_email_command, 'pwned-email': pwned_email_command, 'domain': pwned_domain_command, 'pwned-domain': pwned_domain_command, 'pwned-username':", "paste_entry = \\ { 'Source': paste_breach['Source'], 'Title': paste_breach['Title'], 'ID': paste_breach['Id'],", "count=1) return breach_description def data_to_markdown(query_type, query_arg, api_res, api_paste_res=None): records_found =", "} def create_context_entry(context_type, context_main_value, comp_sites, comp_pastes, malicious_score): context_dict = dict()", "ec_list.append(domain_to_entry_context(domain, api_res or [])) return md_list, ec_list, api_res_list def pwned_domain(domain_list):", "res.json() def html_description_to_human_readable(breach_description): \"\"\" Converting from html description to hr", "api_email_res_list def pwned_email(email_list): \"\"\" Executing the http requests :param email_list:", "Have I Been Pwned query for ' + query_type.lower() +", "+ url_suffix, verify=USE_SSL, params=params, data=data, headers=HEADERS ) if res.status_code !=", "\"\"\" Executing the http requests :param email_list: the email list", "== 'email': context_dict['Address'] = context_main_value else: context_dict['Name'] = context_main_value context_dict['Pwned-V2']", "context_dict def add_malicious_to_context(malicious_type): return { 'Vendor': VENDOR, 'Description': 'The '", "list, in order to support list input, the function returns", "item in api_res] comp_sites = sorted(comp_sites) comp_domain = dict() #", "= [] for username, api_res in zip(username_list, api_res_list): md_list.append(data_to_markdown('Username', username,", "hr :param breach_description: Description of breach from API response :return:", "= { 'Compromised': { 'Vendor': VENDOR, 'Reporters': ', '.join(comp_sites +", "= [] api_paste_res_list = [] for email in email_list: email_suffix", "0 if len(comp_sites) > 0: dbot_score = DEFAULT_DBOT_SCORE_DOMAIN domain_context =", "[] for email, api_email_res, api_paste_res in zip(email_list, api_email_res_list, api_paste_res_list): md_list.append(data_to_markdown('Email',", "outputs \"\"\" api_res_list = [] for domain in domain_list: suffix", "'pwned-email': pwned_email_command, 'domain': pwned_domain_command, 'pwned-domain': pwned_domain_command, 'pwned-username': pwned_username_command } if", "= re.sub(html_link_pattern, link_from_desc, breach_description, count=1) return breach_description def data_to_markdown(query_type, query_arg,", "http request :param username_list: the username list that needed for", "domain in domain_list: suffix = SUFFIXES.get(\"domain\") + domain + SUFFIXES.get(\"domain_truncate_verified\")", "comp_domain = dict() # type: dict dbot_score = 0 if", "api_paste_res in zip(email_list, api_email_res_list, api_paste_res_list): md_list.append(data_to_markdown('Email', email, api_email_res, api_paste_res)) ec_list.append(email_to_entry_context(email,", "outputs \"\"\" email_list = argToList(args_dict.get('email', '')) api_email_res_list, api_paste_res_list = pwned_email(email_list)", "sorted([item['Title'] for item in api_email_res]) comp_pastes = sorted(set(item['Source'] for item", "res.status_code == 401: demisto.error( 'Error in API call to Pwned", "the domains list that needed for the http requests :return:", "= demisto.params().get('api_key') USE_SSL = not demisto.params().get('insecure', False) BASE_URL = 'https://haveibeenpwned.com/api/v3'", "'?truncateResponse=false&includeUnverified=true', \"domain_truncate_verified\": '&truncateResponse=false&includeUnverified=true', \"username_truncate_verified\": '?truncateResponse=false&includeUnverified=true' } RETRIES_END_TIME = datetime.min '''", "request for emails list, in order to support list input,", "ec_list, api_email_res_list): return_outputs(md, ec, api_paste_res) # Log exceptions except Exception", "'&truncateResponse=false&includeUnverified=true', \"username_truncate_verified\": '?truncateResponse=false&includeUnverified=true' } RETRIES_END_TIME = datetime.min ''' HELPER FUNCTIONS", "Description of breach from API response :return: Description string that", "def pwned_domain_command(args_dict): \"\"\" Executing the pwned request for domains list,", "breach_description def data_to_markdown(query_type, query_arg, api_res, api_paste_res=None): records_found = False md", "breach_description, count=1) return breach_description def data_to_markdown(query_type, query_arg, api_res, api_paste_res=None): records_found", "ec_list = [] for domain, api_res in zip(domain_list, api_res_list): md_list.append(data_to_markdown('Domain',", "Been Pwned? V2' MAX_RETRY_ALLOWED = demisto.params().get('max_retry_time', -1) API_KEY = demisto.params().get('api_key')", "-1: RETRIES_END_TIME = datetime.now() + timedelta(seconds=int(MAX_RETRY_ALLOWED)) ''' COMMANDS + REQUESTS", "= 0 if len(comp_sites) > 0: dbot_score = DEFAULT_DBOT_SCORE_DOMAIN domain_context", "html_actual_address = link[0] html_readable_name = link[2] link_from_desc = '[' +", "verify=USE_SSL, params=params, data=data, headers=HEADERS ) if res.status_code != 429: #", "def create_context_entry(context_type, context_main_value, comp_sites, comp_pastes, malicious_score): context_dict = dict() #", "3 arrays of outputs \"\"\" username_list = argToList(args_dict.get('username', '')) api_res_list", "for item in api_paste_res)) if len(comp_sites) > 0: dbot_score =", "+ ']' + '(' + html_actual_address + ')' breach_description =", "Pwned query for ' + query_type.lower() + ': *' +", "pwned_email_command(args_dict): \"\"\" Executing the pwned request for emails list, in", "'', 'Amount of emails in paste': str(paste_breach['EmailCount']) } if paste_breach['Date']:", ":param breach_description: Description of breach from API response :return: Description", "']' + '(' + html_actual_address + ')' breach_description = re.sub(html_link_pattern,", "http_request(method, url_suffix, params=None, data=None): while True: res = requests.request( method,", "if paste_breach['Date']: paste_entry['Date'] = paste_breach['Date'].split('T')[0] pastes_list.append(paste_entry) md += tableToMarkdown('The email", "'') + 'test') return ['ok'], [None], [None] def pwned_email_command(args_dict): \"\"\"", "md += 'Data breached: **' + ','.join(breach['DataClasses']) + '**\\n' if", "dbot_score) return comp_domain def set_retry_end_time(): global RETRIES_END_TIME if MAX_RETRY_ALLOWED !=", "time.sleep(int(wait_amount)) if res.status_code == 404: return None if not res.status_code", "# type: dict dbot_score = 0 if len(comp_sites) > 0:", "+ domain + SUFFIXES.get(\"domain_truncate_verified\") api_res_list.append(http_request('GET', url_suffix=suffix)) return api_res_list def pwned_username_command(args_dict):", "2 arrays of http requests outputs \"\"\" api_email_res_list = []", "comp_sites = [item['Title'] for item in api_res] comp_sites = sorted(comp_sites)", "= SUFFIXES.get(\"email\") + email + SUFFIXES.get(\"email_truncate_verified\") paste_suffix = SUFFIXES.get(\"paste\") +", "response :return: Description string that altered HTML urls to clickable", "in zip(md_list, ec_list, api_email_res_list): return_outputs(md, ec, api_paste_res) # Log exceptions", "is needed :return: 3 arrays of outputs \"\"\" domain_list =", "''' VENDOR = 'Have I Been Pwned? V2' MAX_RETRY_ALLOWED =", "comp_email[outputPaths['email']] = email_context comp_email['DBotScore'] = create_dbot_score_dictionary(email, 'email', dbot_score) return comp_email", "res.json()['message']) if wait_regex: wait_amount = wait_regex.group() else: demisto.error('failed extracting wait", "time has exceeded.') time.sleep(int(wait_amount)) if res.status_code == 404: return None", "html_description_to_human_readable(breach['Description']) + '\\n' md += 'Data breached: **' + ','.join(breach['DataClasses'])", "pastes_list, ['ID', 'Title', 'Date', 'Source', 'Amount of emails in paste'])", "emails in paste': str(paste_breach['EmailCount']) } if paste_breach['Date']: paste_entry['Date'] = paste_breach['Date'].split('T')[0]", "item in api_email_res]) comp_pastes = sorted(set(item['Source'] for item in api_paste_res))", "requests outputs \"\"\" api_email_res_list = [] api_paste_res_list = [] for", "{}'.format(command)) try: handle_proxy() set_retry_end_time() commands = { 'test-module': test_module, 'email':", "domains list, in order to support list input, the function", "VENDOR = 'Have I Been Pwned? V2' MAX_RETRY_ALLOWED = demisto.params().get('max_retry_time',", "SUFFIXES.get(\"email\") + email + SUFFIXES.get(\"email_truncate_verified\") paste_suffix = SUFFIXES.get(\"paste\") + email", "commands[command](demisto.args()) for md, ec, api_paste_res in zip(md_list, ec_list, api_email_res_list): return_outputs(md,", "'Unverified' md += '#### ' + breach['Title'] + ' ('", "has exceeded.') wait_regex = re.search(r'\\d+', res.json()['message']) if wait_regex: wait_amount =", "+= 'No records found' return md def create_dbot_score_dictionary(indicator_value, indicator_type, dbot_score):", "else: context_dict['Name'] = context_main_value context_dict['Pwned-V2'] = { 'Compromised': { 'Vendor':", "(res.status_code, res.text)) return_error('Error in API call to Pwned Integration [%d]", "link[0] html_readable_name = link[2] link_from_desc = '[' + html_readable_name +", "was successful the test will return OK :return: 3 arrays", "\"\"\" http_request('GET', SUFFIXES.get(\"username\", '') + 'test') return ['ok'], [None], [None]", "[] for email in email_list: email_suffix = SUFFIXES.get(\"email\") + email", "case the email list is needed :return: 3 arrays of", "email + SUFFIXES.get(\"email_truncate_verified\") paste_suffix = SUFFIXES.get(\"paste\") + email api_email_res_list.append(http_request('GET', url_suffix=email_suffix))", "needed for the http requests :return: an array of http", "-1) API_KEY = demisto.params().get('api_key') USE_SSL = not demisto.params().get('insecure', False) BASE_URL", "'')) api_res_list = pwned_username(username_list) md_list = [] ec_list = []", "context_dict['Malicious'] = add_malicious_to_context(context_type) return context_dict def add_malicious_to_context(malicious_type): return { 'Vendor':", "+ query_arg + '*\\n' if api_res: records_found = True for", "+ breach['BreachDate'] + '**\\n\\n' md += html_description_to_human_readable(breach['Description']) + '\\n' md", "{ 'Vendor': VENDOR, 'Reporters': ', '.join(comp_sites + comp_pastes) } }", "= SUFFIXES.get(\"username\") + username + SUFFIXES.get(\"username_truncate_verified\") api_res_list.append(http_request('GET', url_suffix=suffix)) return api_res_list", "= [] ec_list = [] for email, api_email_res, api_paste_res in", "username_list: suffix = SUFFIXES.get(\"username\") + username + SUFFIXES.get(\"username_truncate_verified\") api_res_list.append(http_request('GET', url_suffix=suffix))", "return_error('Max retry time has exceeded.') time.sleep(int(wait_amount)) if res.status_code == 404:", "for domain, api_res in zip(domain_list, api_res_list): md_list.append(data_to_markdown('Domain', domain, api_res)) ec_list.append(domain_to_entry_context(domain,", "if datetime.now() > RETRIES_END_TIME: return_error('Max retry time has exceeded.') wait_regex", "been compromised' } def email_to_entry_context(email, api_email_res, api_paste_res): dbot_score = 0", "in the following \"Pastes\":', pastes_list, ['ID', 'Title', 'Date', 'Source', 'Amount", "email_context comp_email['DBotScore'] = create_dbot_score_dictionary(email, 'email', dbot_score) return comp_email def domain_to_entry_context(domain,", "request for domains list, in order to support list input,", "domain list is needed :return: 3 arrays of outputs \"\"\"", "= dict() # dict if context_type == 'email': context_dict['Address'] =", "api_paste_res_list = pwned_email(email_list) md_list = [] ec_list = [] for", "[] ec_list = [] for email, api_email_res, api_paste_res in zip(email_list,", "+ html_actual_address + ')' breach_description = re.sub(html_link_pattern, link_from_desc, breach_description, count=1)", "+ SUFFIXES.get(\"domain_truncate_verified\") api_res_list.append(http_request('GET', url_suffix=suffix)) return api_res_list def pwned_username_command(args_dict): \"\"\" Executing", "'*\\n' if api_res: records_found = True for breach in api_res:", "+ '**\\n' if api_paste_res: records_found = True pastes_list = []", "+ ' breach]\\n' md += 'Date: **' + breach['BreachDate'] +", "in order to support list input, the function returns 3", "verified_breach + ' breach]\\n' md += 'Date: **' + breach['BreachDate']", "+ str(breach['PwnCount']) + \\ ' records breached [' + verified_breach", "== 200: if not res.status_code == 401: demisto.error( 'Error in", "zip(md_list, ec_list, api_email_res_list): return_outputs(md, ec, api_paste_res) # Log exceptions except", "domain, comp_sites, [], DEFAULT_DBOT_SCORE_DOMAIN) comp_domain[outputPaths['domain']] = domain_context comp_domain['DBotScore'] = create_dbot_score_dictionary(domain,", "(5). Res body: {}'.format(res.text)) wait_amount = 5 if datetime.now() +", "response code break if datetime.now() > RETRIES_END_TIME: return_error('Max retry time", "= 'Have I Been Pwned? V2' MAX_RETRY_ALLOWED = demisto.params().get('max_retry_time', -1)", "= { 'hibp-api-key': API_KEY, 'user-agent': 'DBOT-API', 'Content-Type': 'application/json', 'Accept': 'application/json'", ":return: 3 arrays of outputs \"\"\" http_request('GET', SUFFIXES.get(\"username\", '') +", "api_res or [])) return md_list, ec_list, api_res_list def pwned_domain(domain_list): \"\"\"", "'Vendor': VENDOR, 'Score': dbot_score } def create_context_entry(context_type, context_main_value, comp_sites, comp_pastes,", "needed :return: 3 arrays of outputs \"\"\" email_list = argToList(args_dict.get('email',", "if breach['IsVerified'] else 'Unverified' md += '#### ' + breach['Title']", "[])) return md_list, ec_list, api_res_list def pwned_username(username_list): \"\"\" Executing the", "+ 'test') return ['ok'], [None], [None] def pwned_email_command(args_dict): \"\"\" Executing", "ec_list, api_email_res_list = commands[command](demisto.args()) for md, ec, api_paste_res in zip(md_list,", "needed :return: 3 arrays of outputs \"\"\" domain_list = argToList(args_dict.get('domain',", "+= '#### ' + breach['Title'] + ' (' + breach['Domain']", "= { 'test-module': test_module, 'email': pwned_email_command, 'pwned-email': pwned_email_command, 'domain': pwned_domain_command,", "'[' + html_readable_name + ']' + '(' + html_actual_address +", "[])) return md_list, ec_list, api_email_res_list def pwned_email(email_list): \"\"\" Executing the", "{ 'Vendor': VENDOR, 'Description': 'The ' + malicious_type + '", "that needed for the http requests :return: an array of", "requests.request( method, BASE_URL + url_suffix, verify=USE_SSL, params=params, data=data, headers=HEADERS )", "[] for username in username_list: suffix = SUFFIXES.get(\"username\") + username", "breach['Domain'] + '): ' + str(breach['PwnCount']) + \\ ' records", "of outputs \"\"\" http_request('GET', SUFFIXES.get(\"username\", '') + 'test') return ['ok'],", "{ 'Compromised': { 'Vendor': VENDOR, 'Reporters': ', '.join(comp_sites + comp_pastes)", "def email_to_entry_context(email, api_email_res, api_paste_res): dbot_score = 0 comp_email = dict()", "href=\"(.+?)\"(.+?)>(.+?)</a>') patterns_found = html_link_pattern.findall(breach_description) for link in patterns_found: html_actual_address =", "def html_description_to_human_readable(breach_description): \"\"\" Converting from html description to hr :param", "api_email_res, api_paste_res): dbot_score = 0 comp_email = dict() # type:", "url_suffix, verify=USE_SSL, params=params, data=data, headers=HEADERS ) if res.status_code != 429:", "= DEFAULT_DBOT_SCORE_EMAIL email_context = create_context_entry('email', email, comp_sites, comp_pastes, DEFAULT_DBOT_SCORE_EMAIL) comp_email[outputPaths['email']]", "> 0: dbot_score = DEFAULT_DBOT_SCORE_DOMAIN domain_context = create_context_entry('domain', domain, comp_sites,", "''' def http_request(method, url_suffix, params=None, data=None): while True: res =", "DEFAULT_DBOT_SCORE_EMAIL email_context = create_context_entry('email', email, comp_sites, comp_pastes, DEFAULT_DBOT_SCORE_EMAIL) comp_email[outputPaths['email']] =", "case the domain list is needed :return: 3 arrays of", "api_res_list = pwned_username(username_list) md_list = [] ec_list = [] for", "'user-agent': 'DBOT-API', 'Content-Type': 'application/json', 'Accept': 'application/json' } DEFAULT_DBOT_SCORE_EMAIL = 2", "comp_pastes, malicious_score): context_dict = dict() # dict if context_type ==", "context_dict['Pwned-V2'] = { 'Compromised': { 'Vendor': VENDOR, 'Reporters': ', '.join(comp_sites", "md_list, ec_list, api_res_list def pwned_username(username_list): \"\"\" Executing the http request", "in paste']) if not records_found: md += 'No records found'", "200: if not res.status_code == 401: demisto.error( 'Error in API", "params=None, data=None): while True: res = requests.request( method, BASE_URL +", "extracting wait time will use default (5). Res body: {}'.format(res.text))", "breached [' + verified_breach + ' breach]\\n' md += 'Date:", "in paste': str(paste_breach['EmailCount']) } if paste_breach['Date']: paste_entry['Date'] = paste_breach['Date'].split('T')[0] pastes_list.append(paste_entry)", "order to support list input, the function returns 3 lists", ":param args_dict: the demisto argument - in this case the", "return api_email_res_list, api_paste_res_list def pwned_domain_command(args_dict): \"\"\" Executing the pwned request", "ec_list, api_email_res_list def pwned_email(email_list): \"\"\" Executing the http requests :param", "\"\"\" api_email_res_list = [] api_paste_res_list = [] for email in", "being called is: {}'.format(command)) try: handle_proxy() set_retry_end_time() commands = {", "query_arg, api_res, api_paste_res=None): records_found = False md = '### Have", "headers=HEADERS ) if res.status_code != 429: # Rate limit response", "Executing the http requests :param email_list: the email list that", "= 5 if datetime.now() + timedelta(seconds=int(wait_amount)) > RETRIES_END_TIME: return_error('Max retry", "breach]\\n' md += 'Date: **' + breach['BreachDate'] + '**\\n\\n' md", "api_res_list = [] for username in username_list: suffix = SUFFIXES.get(\"username\")", "for emails list, in order to support list input, the", "timedelta(seconds=int(MAX_RETRY_ALLOWED)) ''' COMMANDS + REQUESTS FUNCTIONS ''' def test_module(args_dict): \"\"\"", "SUFFIXES.get(\"username\") + username + SUFFIXES.get(\"username_truncate_verified\") api_res_list.append(http_request('GET', url_suffix=suffix)) return api_res_list command", "Executing the pwned request for domains list, in order to", "found in the following \"Pastes\":', pastes_list, ['ID', 'Title', 'Date', 'Source',", "http_request('GET', SUFFIXES.get(\"username\", '') + 'test') return ['ok'], [None], [None] def", "SUFFIXES = { \"email\": '/breachedaccount/', \"domain\": '/breaches?domain=', \"username\": '/breachedaccount/', \"paste\":", "' (' + breach['Domain'] + '): ' + str(breach['PwnCount']) +", "api_res)) ec_list.append(domain_to_entry_context(username, api_res or [])) return md_list, ec_list, api_res_list def", "item in api_paste_res)) if len(comp_sites) > 0: dbot_score = DEFAULT_DBOT_SCORE_EMAIL", "api_email_res_list, api_paste_res_list): md_list.append(data_to_markdown('Email', email, api_email_res, api_paste_res)) ec_list.append(email_to_entry_context(email, api_email_res or [],", "'email': context_dict['Address'] = context_main_value else: context_dict['Name'] = context_main_value context_dict['Pwned-V2'] =", "context_main_value context_dict['Pwned-V2'] = { 'Compromised': { 'Vendor': VENDOR, 'Reporters': ',", "url_suffix=suffix)) return api_res_list def pwned_username_command(args_dict): \"\"\" Executing the pwned request", "url_suffix=email_suffix)) api_paste_res_list.append(http_request('GET', url_suffix=paste_suffix)) return api_email_res_list, api_paste_res_list def pwned_domain_command(args_dict): \"\"\" Executing", "3: context_dict['Malicious'] = add_malicious_to_context(context_type) return context_dict def add_malicious_to_context(malicious_type): return {", "dbot_score = DEFAULT_DBOT_SCORE_EMAIL email_context = create_context_entry('email', email, comp_sites, comp_pastes, DEFAULT_DBOT_SCORE_EMAIL)", "context_main_value, comp_sites, comp_pastes, malicious_score): context_dict = dict() # dict if", "to hr :param breach_description: Description of breach from API response", "if datetime.now() + timedelta(seconds=int(wait_amount)) > RETRIES_END_TIME: return_error('Max retry time has", "demisto.error('failed extracting wait time will use default (5). Res body:", "'Date: **' + breach['BreachDate'] + '**\\n\\n' md += html_description_to_human_readable(breach['Description']) +", "set_retry_end_time(): global RETRIES_END_TIME if MAX_RETRY_ALLOWED != -1: RETRIES_END_TIME = datetime.now()", "''' def test_module(args_dict): \"\"\" If the http request was successful", "'**\\n\\n' md += html_description_to_human_readable(breach['Description']) + '\\n' md += 'Data breached:", "'**\\n' if api_paste_res: records_found = True pastes_list = [] for", "> RETRIES_END_TIME: return_error('Max retry time has exceeded.') wait_regex = re.search(r'\\d+',", "arrays of outputs \"\"\" domain_list = argToList(args_dict.get('domain', '')) api_res_list =", "str(breach['PwnCount']) + \\ ' records breached [' + verified_breach +", "email_list = argToList(args_dict.get('email', '')) api_email_res_list, api_paste_res_list = pwned_email(email_list) md_list =", "'(' + html_actual_address + ')' breach_description = re.sub(html_link_pattern, link_from_desc, breach_description,", "not records_found: md += 'No records found' return md def", "} def email_to_entry_context(email, api_email_res, api_paste_res): dbot_score = 0 comp_email =", "outputs :param args_dict: the demisto argument - in this case", "api_paste_res or [])) return md_list, ec_list, api_email_res_list def pwned_email(email_list): \"\"\"", "the http request :param domain_list: the domains list that needed", "api_res_list.append(http_request('GET', url_suffix=suffix)) return api_res_list def pwned_username_command(args_dict): \"\"\" Executing the pwned", "== 'SUSPICIOUS' else 3 SUFFIXES = { \"email\": '/breachedaccount/', \"domain\":", "\"username_truncate_verified\": '?truncateResponse=false&includeUnverified=true' } RETRIES_END_TIME = datetime.min ''' HELPER FUNCTIONS '''", "md = '### Have I Been Pwned query for '", "the pwned request for emails list, in order to support", "dict() # type: dict dbot_score = 0 if len(comp_sites) >", "RETRIES_END_TIME if MAX_RETRY_ALLOWED != -1: RETRIES_END_TIME = datetime.now() + timedelta(seconds=int(MAX_RETRY_ALLOWED))", "html_readable_name + ']' + '(' + html_actual_address + ')' breach_description", "api_res: records_found = True for breach in api_res: verified_breach =", "} RETRIES_END_TIME = datetime.min ''' HELPER FUNCTIONS ''' def http_request(method,", "= re.compile('<a href=\"(.+?)\"(.+?)>(.+?)</a>') patterns_found = html_link_pattern.findall(breach_description) for link in patterns_found:", "+ ': *' + query_arg + '*\\n' if api_res: records_found", "for domains list, in order to support list input, the", "api_paste_res): dbot_score = 0 comp_email = dict() # type: dict", "comp_sites, [], DEFAULT_DBOT_SCORE_DOMAIN) comp_domain[outputPaths['domain']] = domain_context comp_domain['DBotScore'] = create_dbot_score_dictionary(domain, 'domain',", "'application/json' } DEFAULT_DBOT_SCORE_EMAIL = 2 if demisto.params().get('default_dbot_score_email') == 'SUSPICIOUS' else", "for username, api_res in zip(username_list, api_res_list): md_list.append(data_to_markdown('Username', username, api_res)) ec_list.append(domain_to_entry_context(username,", "domain, api_res)) ec_list.append(domain_to_entry_context(domain, api_res or [])) return md_list, ec_list, api_res_list", "the demisto argument - in this case the username list", "email list is needed :return: 3 arrays of outputs \"\"\"", "'\\n' md += 'Data breached: **' + ','.join(breach['DataClasses']) + '**\\n'", "DEFAULT_DBOT_SCORE_DOMAIN domain_context = create_context_entry('domain', domain, comp_sites, [], DEFAULT_DBOT_SCORE_DOMAIN) comp_domain[outputPaths['domain']] =", "[] for domain, api_res in zip(domain_list, api_res_list): md_list.append(data_to_markdown('Domain', domain, api_res))", "in API call to Pwned Integration [%d] - %s' %", "''' IMPORTS ''' import re import requests # Disable insecure", "else 3 SUFFIXES = { \"email\": '/breachedaccount/', \"domain\": '/breaches?domain=', \"username\":", "malicious_score == 3: context_dict['Malicious'] = add_malicious_to_context(context_type) return context_dict def add_malicious_to_context(malicious_type):", "% (res.status_code, res.text)) return_error('Error in API call to Pwned Integration", "= datetime.min ''' HELPER FUNCTIONS ''' def http_request(method, url_suffix, params=None,", "of outputs :param args_dict: the demisto argument - in this", "data=data, headers=HEADERS ) if res.status_code != 429: # Rate limit", "requests :return: an array of http requests outputs \"\"\" api_res_list", "RETRIES_END_TIME = datetime.now() + timedelta(seconds=int(MAX_RETRY_ALLOWED)) ''' COMMANDS + REQUESTS FUNCTIONS", "'?truncateResponse=false&includeUnverified=true' } RETRIES_END_TIME = datetime.min ''' HELPER FUNCTIONS ''' def", "test will return OK :return: 3 arrays of outputs \"\"\"", "api_paste_res)) ec_list.append(email_to_entry_context(email, api_email_res or [], api_paste_res or [])) return md_list,", "the http requests :param email_list: the email list that needed", "the http requests :return: 2 arrays of http requests outputs", "emails list, in order to support list input, the function", "records_found = True pastes_list = [] for paste_breach in api_paste_res:", "username list is needed :return: 3 arrays of outputs \"\"\"", "md_list, ec_list, api_email_res_list = commands[command](demisto.args()) for md, ec, api_paste_res in", "md_list.append(data_to_markdown('Email', email, api_email_res, api_paste_res)) ec_list.append(email_to_entry_context(email, api_email_res or [], api_paste_res or", "= [] ec_list = [] for username, api_res in zip(username_list,", "DEFAULT_DBOT_SCORE_DOMAIN) comp_domain[outputPaths['domain']] = domain_context comp_domain['DBotScore'] = create_dbot_score_dictionary(domain, 'domain', dbot_score) return", "wait_amount = wait_regex.group() else: demisto.error('failed extracting wait time will use", "md_list, ec_list, api_email_res_list def pwned_email(email_list): \"\"\" Executing the http requests", "+= 'Data breached: **' + ','.join(breach['DataClasses']) + '**\\n' if api_paste_res:", "'https://haveibeenpwned.com/api/v3' HEADERS = { 'hibp-api-key': API_KEY, 'user-agent': 'DBOT-API', 'Content-Type': 'application/json',", "= [] for paste_breach in api_paste_res: paste_entry = \\ {", "\"\"\" Executing the http request :param username_list: the username list", "' records breached [' + verified_breach + ' breach]\\n' md", "= demisto.params().get('max_retry_time', -1) API_KEY = demisto.params().get('api_key') USE_SSL = not demisto.params().get('insecure',", "of emails in paste': str(paste_breach['EmailCount']) } if paste_breach['Date']: paste_entry['Date'] =", "- in this case the username list is needed :return:", "None if not res.status_code == 200: if not res.status_code ==", "= [] for email, api_email_res, api_paste_res in zip(email_list, api_email_res_list, api_paste_res_list):", "in username_list: suffix = SUFFIXES.get(\"username\") + username + SUFFIXES.get(\"username_truncate_verified\") api_res_list.append(http_request('GET',", "SUFFIXES.get(\"username_truncate_verified\") api_res_list.append(http_request('GET', url_suffix=suffix)) return api_res_list command = demisto.command() LOG('Command being", "better readability in war-room \"\"\" html_link_pattern = re.compile('<a href=\"(.+?)\"(.+?)>(.+?)</a>') patterns_found", "Pwned Integration [%d] - %s' % (res.status_code, res.reason)) return None", "md_list.append(data_to_markdown('Username', username, api_res)) ec_list.append(domain_to_entry_context(username, api_res or [])) return md_list, ec_list,", "while True: res = requests.request( method, BASE_URL + url_suffix, verify=USE_SSL,", "breach['BreachDate'] + '**\\n\\n' md += html_description_to_human_readable(breach['Description']) + '\\n' md +=", "'Content-Type': 'application/json', 'Accept': 'application/json' } DEFAULT_DBOT_SCORE_EMAIL = 2 if demisto.params().get('default_dbot_score_email')", "datetime.min ''' HELPER FUNCTIONS ''' def http_request(method, url_suffix, params=None, data=None):", "will use default (5). Res body: {}'.format(res.text)) wait_amount = 5", "return context_dict def add_malicious_to_context(malicious_type): return { 'Vendor': VENDOR, 'Description': 'The", "= argToList(args_dict.get('domain', '')) api_res_list = pwned_domain(domain_list) md_list = [] ec_list", "'Date', 'Source', 'Amount of emails in paste']) if not records_found:", "use default (5). Res body: {}'.format(res.text)) wait_amount = 5 if", "returns 3 lists of outputs :param args_dict: the demisto argument", "wait_amount = 5 if datetime.now() + timedelta(seconds=int(wait_amount)) > RETRIES_END_TIME: return_error('Max", "argToList(args_dict.get('email', '')) api_email_res_list, api_paste_res_list = pwned_email(email_list) md_list = [] ec_list", "return md_list, ec_list, api_email_res_list def pwned_email(email_list): \"\"\" Executing the http", "md def create_dbot_score_dictionary(indicator_value, indicator_type, dbot_score): return { 'Indicator': indicator_value, 'Type':", "domain + SUFFIXES.get(\"domain_truncate_verified\") api_res_list.append(http_request('GET', url_suffix=suffix)) return api_res_list def pwned_username_command(args_dict): \"\"\"", "from API response :return: Description string that altered HTML urls", "3 arrays of outputs \"\"\" http_request('GET', SUFFIXES.get(\"username\", '') + 'test')", "= create_dbot_score_dictionary(domain, 'domain', dbot_score) return comp_domain def set_retry_end_time(): global RETRIES_END_TIME", "USE_SSL = not demisto.params().get('insecure', False) BASE_URL = 'https://haveibeenpwned.com/api/v3' HEADERS =", "api_res_list def pwned_username(username_list): \"\"\" Executing the http request :param username_list:", "= demisto.command() LOG('Command being called is: {}'.format(command)) try: handle_proxy() set_retry_end_time()", "= 2 if demisto.params().get('default_dbot_score_email') == 'SUSPICIOUS' else 3 DEFAULT_DBOT_SCORE_DOMAIN =", "demisto.params().get('insecure', False) BASE_URL = 'https://haveibeenpwned.com/api/v3' HEADERS = { 'hibp-api-key': API_KEY,", "= email_context comp_email['DBotScore'] = create_dbot_score_dictionary(email, 'email', dbot_score) return comp_email def", "Been Pwned query for ' + query_type.lower() + ': *'", "FUNCTIONS ''' def http_request(method, url_suffix, params=None, data=None): while True: res", "html_description_to_human_readable(breach_description): \"\"\" Converting from html description to hr :param breach_description:", "[%d] - %s' % (res.status_code, res.reason)) return None return res.json()", "API_KEY = demisto.params().get('api_key') USE_SSL = not demisto.params().get('insecure', False) BASE_URL =", "Executing the pwned request for usernames list, in order to", "'SUSPICIOUS' else 3 DEFAULT_DBOT_SCORE_DOMAIN = 2 if demisto.params().get('default_dbot_score_domain') == 'SUSPICIOUS'", "def add_malicious_to_context(malicious_type): return { 'Vendor': VENDOR, 'Description': 'The ' +", "api_res_list): md_list.append(data_to_markdown('Domain', domain, api_res)) ec_list.append(domain_to_entry_context(domain, api_res or [])) return md_list,", "[])) return md_list, ec_list, api_res_list def pwned_domain(domain_list): \"\"\" Executing the", "'')) api_res_list = pwned_domain(domain_list) md_list = [] ec_list = []", "suffix = SUFFIXES.get(\"username\") + username + SUFFIXES.get(\"username_truncate_verified\") api_res_list.append(http_request('GET', url_suffix=suffix)) return", "http requests :param email_list: the email list that needed for", "return { 'Vendor': VENDOR, 'Description': 'The ' + malicious_type +", "url_suffix=paste_suffix)) return api_email_res_list, api_paste_res_list def pwned_domain_command(args_dict): \"\"\" Executing the pwned", "I Been Pwned? V2' MAX_RETRY_ALLOWED = demisto.params().get('max_retry_time', -1) API_KEY =", "= dict() # type: dict dbot_score = 0 if len(comp_sites)", "\\ { 'Source': paste_breach['Source'], 'Title': paste_breach['Title'], 'ID': paste_breach['Id'], 'Date': '',", "DEFAULT_DBOT_SCORE_EMAIL) comp_email[outputPaths['email']] = email_context comp_email['DBotScore'] = create_dbot_score_dictionary(email, 'email', dbot_score) return", "'The ' + malicious_type + ' has been compromised' }", "API call to Pwned Integration [%d]. Full text: %s' %", "Full text: %s' % (res.status_code, res.text)) return_error('Error in API call", "in api_email_res]) comp_pastes = sorted(set(item['Source'] for item in api_paste_res)) if", "comp_pastes, DEFAULT_DBOT_SCORE_EMAIL) comp_email[outputPaths['email']] = email_context comp_email['DBotScore'] = create_dbot_score_dictionary(email, 'email', dbot_score)", "str(paste_breach['EmailCount']) } if paste_breach['Date']: paste_entry['Date'] = paste_breach['Date'].split('T')[0] pastes_list.append(paste_entry) md +=", "'Compromised': { 'Vendor': VENDOR, 'Reporters': ', '.join(comp_sites + comp_pastes) }", "html_link_pattern = re.compile('<a href=\"(.+?)\"(.+?)>(.+?)</a>') patterns_found = html_link_pattern.findall(breach_description) for link in", "that altered HTML urls to clickable urls for better readability", "list input, the function returns 3 lists of outputs :param", "the demisto argument - in this case the email list", "not res.status_code == 401: demisto.error( 'Error in API call to", "will return OK :return: 3 arrays of outputs \"\"\" http_request('GET',", "in zip(domain_list, api_res_list): md_list.append(data_to_markdown('Domain', domain, api_res)) ec_list.append(domain_to_entry_context(domain, api_res or []))", "False) BASE_URL = 'https://haveibeenpwned.com/api/v3' HEADERS = { 'hibp-api-key': API_KEY, 'user-agent':", "429: # Rate limit response code break if datetime.now() >", "# Disable insecure warnings requests.packages.urllib3.disable_warnings() ''' GLOBALS/PARAMS ''' VENDOR =", "'Amount of emails in paste': str(paste_breach['EmailCount']) } if paste_breach['Date']: paste_entry['Date']", "needed :return: 3 arrays of outputs \"\"\" username_list = argToList(args_dict.get('username',", "api_paste_res in zip(md_list, ec_list, api_email_res_list): return_outputs(md, ec, api_paste_res) # Log", "5 if datetime.now() + timedelta(seconds=int(wait_amount)) > RETRIES_END_TIME: return_error('Max retry time", "if res.status_code != 429: # Rate limit response code break", "re.compile('<a href=\"(.+?)\"(.+?)>(.+?)</a>') patterns_found = html_link_pattern.findall(breach_description) for link in patterns_found: html_actual_address", "the email list is needed :return: 3 arrays of outputs", "context_dict['Name'] = context_main_value context_dict['Pwned-V2'] = { 'Compromised': { 'Vendor': VENDOR,", "wait time will use default (5). Res body: {}'.format(res.text)) wait_amount", "body: {}'.format(res.text)) wait_amount = 5 if datetime.now() + timedelta(seconds=int(wait_amount)) >", "COMMANDS + REQUESTS FUNCTIONS ''' def test_module(args_dict): \"\"\" If the", "compromised' } def email_to_entry_context(email, api_email_res, api_paste_res): dbot_score = 0 comp_email", "2 if demisto.params().get('default_dbot_score_domain') == 'SUSPICIOUS' else 3 SUFFIXES = {", "in api_res: verified_breach = 'Verified' if breach['IsVerified'] else 'Unverified' md", "% (res.status_code, res.reason)) return None return res.json() def html_description_to_human_readable(breach_description): \"\"\"", "indicator_value, 'Type': indicator_type, 'Vendor': VENDOR, 'Score': dbot_score } def create_context_entry(context_type,", "[] for username, api_res in zip(username_list, api_res_list): md_list.append(data_to_markdown('Username', username, api_res))", "import re import requests # Disable insecure warnings requests.packages.urllib3.disable_warnings() '''", "+ timedelta(seconds=int(wait_amount)) > RETRIES_END_TIME: return_error('Max retry time has exceeded.') time.sleep(int(wait_amount))", "= argToList(args_dict.get('username', '')) api_res_list = pwned_username(username_list) md_list = [] ec_list", "import requests # Disable insecure warnings requests.packages.urllib3.disable_warnings() ''' GLOBALS/PARAMS '''", "res = requests.request( method, BASE_URL + url_suffix, verify=USE_SSL, params=params, data=data,", "datetime.now() > RETRIES_END_TIME: return_error('Max retry time has exceeded.') wait_regex =", "pastes_list = [] for paste_breach in api_paste_res: paste_entry = \\", "= 'Verified' if breach['IsVerified'] else 'Unverified' md += '#### '", "readability in war-room \"\"\" html_link_pattern = re.compile('<a href=\"(.+?)\"(.+?)>(.+?)</a>') patterns_found =", "for link in patterns_found: html_actual_address = link[0] html_readable_name = link[2]", "} DEFAULT_DBOT_SCORE_EMAIL = 2 if demisto.params().get('default_dbot_score_email') == 'SUSPICIOUS' else 3", "arrays of outputs \"\"\" email_list = argToList(args_dict.get('email', '')) api_email_res_list, api_paste_res_list", "in war-room \"\"\" html_link_pattern = re.compile('<a href=\"(.+?)\"(.+?)>(.+?)</a>') patterns_found = html_link_pattern.findall(breach_description)", "api_paste_res: records_found = True pastes_list = [] for paste_breach in", "md += 'No records found' return md def create_dbot_score_dictionary(indicator_value, indicator_type,", "malicious_type + ' has been compromised' } def email_to_entry_context(email, api_email_res,", "API_KEY, 'user-agent': 'DBOT-API', 'Content-Type': 'application/json', 'Accept': 'application/json' } DEFAULT_DBOT_SCORE_EMAIL =", "= datetime.now() + timedelta(seconds=int(MAX_RETRY_ALLOWED)) ''' COMMANDS + REQUESTS FUNCTIONS '''", "clickable urls for better readability in war-room \"\"\" html_link_pattern =", "needed for the http requests :return: 2 arrays of http", "http requests :return: an array of http requests outputs \"\"\"", ":return: 3 arrays of outputs \"\"\" domain_list = argToList(args_dict.get('domain', ''))", "[' + verified_breach + ' breach]\\n' md += 'Date: **'", "the function returns 3 lists of outputs :param args_dict: the", "= SUFFIXES.get(\"domain\") + domain + SUFFIXES.get(\"domain_truncate_verified\") api_res_list.append(http_request('GET', url_suffix=suffix)) return api_res_list", "= context_main_value context_dict['Pwned-V2'] = { 'Compromised': { 'Vendor': VENDOR, 'Reporters':", "arrays of http requests outputs \"\"\" api_email_res_list = [] api_paste_res_list", "re.search(r'\\d+', res.json()['message']) if wait_regex: wait_amount = wait_regex.group() else: demisto.error('failed extracting", "401: demisto.error( 'Error in API call to Pwned Integration [%d].", "type: dict comp_sites = sorted([item['Title'] for item in api_email_res]) comp_pastes", "ec, api_paste_res) # Log exceptions except Exception as e: return_error(str(e))", "insecure warnings requests.packages.urllib3.disable_warnings() ''' GLOBALS/PARAMS ''' VENDOR = 'Have I", "for item in api_res] comp_sites = sorted(comp_sites) comp_domain = dict()", "list is needed :return: 3 arrays of outputs \"\"\" email_list", "pwned_domain_command(args_dict): \"\"\" Executing the pwned request for domains list, in", "ec_list.append(domain_to_entry_context(username, api_res or [])) return md_list, ec_list, api_res_list def pwned_username(username_list):", "domain_list = argToList(args_dict.get('domain', '')) api_res_list = pwned_domain(domain_list) md_list = []", "api_res] comp_sites = sorted(comp_sites) comp_domain = dict() # type: dict", ":param domain_list: the domains list that needed for the http", "= \\ { 'Source': paste_breach['Source'], 'Title': paste_breach['Title'], 'ID': paste_breach['Id'], 'Date':", "or [])) return md_list, ec_list, api_res_list def pwned_username(username_list): \"\"\" Executing", "','.join(breach['DataClasses']) + '**\\n' if api_paste_res: records_found = True pastes_list =", "True: res = requests.request( method, BASE_URL + url_suffix, verify=USE_SSL, params=params,", "\"paste\": '/pasteaccount/', \"email_truncate_verified\": '?truncateResponse=false&includeUnverified=true', \"domain_truncate_verified\": '&truncateResponse=false&includeUnverified=true', \"username_truncate_verified\": '?truncateResponse=false&includeUnverified=true' } RETRIES_END_TIME", "res.reason)) return None return res.json() def html_description_to_human_readable(breach_description): \"\"\" Converting from", "len(comp_sites) > 0: dbot_score = DEFAULT_DBOT_SCORE_EMAIL email_context = create_context_entry('email', email,", "domain_context comp_domain['DBotScore'] = create_dbot_score_dictionary(domain, 'domain', dbot_score) return comp_domain def set_retry_end_time():", "argument - in this case the email list is needed", "method, BASE_URL + url_suffix, verify=USE_SSL, params=params, data=data, headers=HEADERS ) if", "api_res_list = pwned_domain(domain_list) md_list = [] ec_list = [] for", "lists of outputs :param args_dict: the demisto argument - in", "list is needed :return: 3 arrays of outputs \"\"\" username_list", "dict dbot_score = 0 if len(comp_sites) > 0: dbot_score =", "= dict() # type: dict comp_sites = sorted([item['Title'] for item", "Disable insecure warnings requests.packages.urllib3.disable_warnings() ''' GLOBALS/PARAMS ''' VENDOR = 'Have", "api_res, api_paste_res=None): records_found = False md = '### Have I", "comp_pastes = sorted(set(item['Source'] for item in api_paste_res)) if len(comp_sites) >", "address was found in the following \"Pastes\":', pastes_list, ['ID', 'Title',", "comp_domain['DBotScore'] = create_dbot_score_dictionary(domain, 'domain', dbot_score) return comp_domain def set_retry_end_time(): global", "return_error('Error in API call to Pwned Integration [%d] - %s'", "def create_dbot_score_dictionary(indicator_value, indicator_type, dbot_score): return { 'Indicator': indicator_value, 'Type': indicator_type,", "SUFFIXES.get(\"email_truncate_verified\") paste_suffix = SUFFIXES.get(\"paste\") + email api_email_res_list.append(http_request('GET', url_suffix=email_suffix)) api_paste_res_list.append(http_request('GET', url_suffix=paste_suffix))", "support list input, the function returns 3 lists of outputs", "def pwned_username(username_list): \"\"\" Executing the http request :param username_list: the", "default (5). Res body: {}'.format(res.text)) wait_amount = 5 if datetime.now()", "for email, api_email_res, api_paste_res in zip(email_list, api_email_res_list, api_paste_res_list): md_list.append(data_to_markdown('Email', email,", "html_link_pattern.findall(breach_description) for link in patterns_found: html_actual_address = link[0] html_readable_name =", "create_context_entry('email', email, comp_sites, comp_pastes, DEFAULT_DBOT_SCORE_EMAIL) comp_email[outputPaths['email']] = email_context comp_email['DBotScore'] =", "FUNCTIONS ''' def test_module(args_dict): \"\"\" If the http request was", "comp_email = dict() # type: dict comp_sites = sorted([item['Title'] for", "'Title': paste_breach['Title'], 'ID': paste_breach['Id'], 'Date': '', 'Amount of emails in", "context_dict['Address'] = context_main_value else: context_dict['Name'] = context_main_value context_dict['Pwned-V2'] = {", ":param username_list: the username list that needed for the http", "create_context_entry(context_type, context_main_value, comp_sites, comp_pastes, malicious_score): context_dict = dict() # dict", "has been compromised' } def email_to_entry_context(email, api_email_res, api_paste_res): dbot_score =", "http request :param domain_list: the domains list that needed for", "try: handle_proxy() set_retry_end_time() commands = { 'test-module': test_module, 'email': pwned_email_command,", "* ''' IMPORTS ''' import re import requests # Disable", "if malicious_score == 3: context_dict['Malicious'] = add_malicious_to_context(context_type) return context_dict def", "MAX_RETRY_ALLOWED != -1: RETRIES_END_TIME = datetime.now() + timedelta(seconds=int(MAX_RETRY_ALLOWED)) ''' COMMANDS", "the email list that needed for the http requests :return:", "== 404: return None if not res.status_code == 200: if", "usernames list, in order to support list input, the function", "demisto argument - in this case the email list is", "3 arrays of outputs \"\"\" email_list = argToList(args_dict.get('email', '')) api_email_res_list,", "\"\"\" Executing the http request :param domain_list: the domains list", "+ breach['Title'] + ' (' + breach['Domain'] + '): '", ":return: 3 arrays of outputs \"\"\" email_list = argToList(args_dict.get('email', ''))", "comp_sites, comp_pastes, malicious_score): context_dict = dict() # dict if context_type", "+ SUFFIXES.get(\"email_truncate_verified\") paste_suffix = SUFFIXES.get(\"paste\") + email api_email_res_list.append(http_request('GET', url_suffix=email_suffix)) api_paste_res_list.append(http_request('GET',", "has exceeded.') time.sleep(int(wait_amount)) if res.status_code == 404: return None if", "'test') return ['ok'], [None], [None] def pwned_email_command(args_dict): \"\"\" Executing the", "{ 'test-module': test_module, 'email': pwned_email_command, 'pwned-email': pwned_email_command, 'domain': pwned_domain_command, 'pwned-domain':", "+ html_readable_name + ']' + '(' + html_actual_address + ')'", "return_error('Max retry time has exceeded.') wait_regex = re.search(r'\\d+', res.json()['message']) if", "== 3: context_dict['Malicious'] = add_malicious_to_context(context_type) return context_dict def add_malicious_to_context(malicious_type): return", "domain, api_res in zip(domain_list, api_res_list): md_list.append(data_to_markdown('Domain', domain, api_res)) ec_list.append(domain_to_entry_context(domain, api_res", "username + SUFFIXES.get(\"username_truncate_verified\") api_res_list.append(http_request('GET', url_suffix=suffix)) return api_res_list command = demisto.command()", "res.status_code == 200: if not res.status_code == 401: demisto.error( 'Error", "add_malicious_to_context(context_type) return context_dict def add_malicious_to_context(malicious_type): return { 'Vendor': VENDOR, 'Description':", "command in commands: md_list, ec_list, api_email_res_list = commands[command](demisto.args()) for md,", "**' + ','.join(breach['DataClasses']) + '**\\n' if api_paste_res: records_found = True", "is needed :return: 3 arrays of outputs \"\"\" email_list =", "pwned_domain(domain_list): \"\"\" Executing the http request :param domain_list: the domains", "this case the email list is needed :return: 3 arrays", "res.status_code != 429: # Rate limit response code break if", "'pwned-username': pwned_username_command } if command in commands: md_list, ec_list, api_email_res_list", "in patterns_found: html_actual_address = link[0] html_readable_name = link[2] link_from_desc =", "link_from_desc, breach_description, count=1) return breach_description def data_to_markdown(query_type, query_arg, api_res, api_paste_res=None):", "for breach in api_res: verified_breach = 'Verified' if breach['IsVerified'] else", "of http requests outputs \"\"\" api_res_list = [] for domain", "': *' + query_arg + '*\\n' if api_res: records_found =", "email, comp_sites, comp_pastes, DEFAULT_DBOT_SCORE_EMAIL) comp_email[outputPaths['email']] = email_context comp_email['DBotScore'] = create_dbot_score_dictionary(email,", "test_module(args_dict): \"\"\" If the http request was successful the test", "+ ' has been compromised' } def email_to_entry_context(email, api_email_res, api_paste_res):", "comp_domain[outputPaths['domain']] = domain_context comp_domain['DBotScore'] = create_dbot_score_dictionary(domain, 'domain', dbot_score) return comp_domain", "I Been Pwned query for ' + query_type.lower() + ':", "} } if malicious_score == 3: context_dict['Malicious'] = add_malicious_to_context(context_type) return", "[None], [None] def pwned_email_command(args_dict): \"\"\" Executing the pwned request for", "+ username + SUFFIXES.get(\"username_truncate_verified\") api_res_list.append(http_request('GET', url_suffix=suffix)) return api_res_list command =", "(' + breach['Domain'] + '): ' + str(breach['PwnCount']) + \\", "VENDOR, 'Description': 'The ' + malicious_type + ' has been", "= create_context_entry('email', email, comp_sites, comp_pastes, DEFAULT_DBOT_SCORE_EMAIL) comp_email[outputPaths['email']] = email_context comp_email['DBotScore']", "requests :return: 2 arrays of http requests outputs \"\"\" api_email_res_list", "the http requests :return: an array of http requests outputs", "md, ec, api_paste_res in zip(md_list, ec_list, api_email_res_list): return_outputs(md, ec, api_paste_res)", "\"\"\" Converting from html description to hr :param breach_description: Description", "DEFAULT_DBOT_SCORE_EMAIL = 2 if demisto.params().get('default_dbot_score_email') == 'SUSPICIOUS' else 3 DEFAULT_DBOT_SCORE_DOMAIN", "+ ')' breach_description = re.sub(html_link_pattern, link_from_desc, breach_description, count=1) return breach_description", "datetime.now() + timedelta(seconds=int(wait_amount)) > RETRIES_END_TIME: return_error('Max retry time has exceeded.')", "return_outputs(md, ec, api_paste_res) # Log exceptions except Exception as e:", "return ['ok'], [None], [None] def pwned_email_command(args_dict): \"\"\" Executing the pwned", "verified_breach = 'Verified' if breach['IsVerified'] else 'Unverified' md += '####", "from html description to hr :param breach_description: Description of breach", "war-room \"\"\" html_link_pattern = re.compile('<a href=\"(.+?)\"(.+?)>(.+?)</a>') patterns_found = html_link_pattern.findall(breach_description) for", "breached: **' + ','.join(breach['DataClasses']) + '**\\n' if api_paste_res: records_found =", "comp_sites = sorted(comp_sites) comp_domain = dict() # type: dict dbot_score", "**' + breach['BreachDate'] + '**\\n\\n' md += html_description_to_human_readable(breach['Description']) + '\\n'", "api_res: verified_breach = 'Verified' if breach['IsVerified'] else 'Unverified' md +=", "[], DEFAULT_DBOT_SCORE_DOMAIN) comp_domain[outputPaths['domain']] = domain_context comp_domain['DBotScore'] = create_dbot_score_dictionary(domain, 'domain', dbot_score)", "} if paste_breach['Date']: paste_entry['Date'] = paste_breach['Date'].split('T')[0] pastes_list.append(paste_entry) md += tableToMarkdown('The", "html_actual_address + ')' breach_description = re.sub(html_link_pattern, link_from_desc, breach_description, count=1) return", "following \"Pastes\":', pastes_list, ['ID', 'Title', 'Date', 'Source', 'Amount of emails", "= not demisto.params().get('insecure', False) BASE_URL = 'https://haveibeenpwned.com/api/v3' HEADERS = {", "dict if context_type == 'email': context_dict['Address'] = context_main_value else: context_dict['Name']", "[] ec_list = [] for username, api_res in zip(username_list, api_res_list):", "- %s' % (res.status_code, res.reason)) return None return res.json() def", "'email': pwned_email_command, 'pwned-email': pwned_email_command, 'domain': pwned_domain_command, 'pwned-domain': pwned_domain_command, 'pwned-username': pwned_username_command", "if demisto.params().get('default_dbot_score_email') == 'SUSPICIOUS' else 3 DEFAULT_DBOT_SCORE_DOMAIN = 2 if", "in api_paste_res: paste_entry = \\ { 'Source': paste_breach['Source'], 'Title': paste_breach['Title'],", "API call to Pwned Integration [%d] - %s' % (res.status_code,", "RETRIES_END_TIME: return_error('Max retry time has exceeded.') time.sleep(int(wait_amount)) if res.status_code ==", "= link[0] html_readable_name = link[2] link_from_desc = '[' + html_readable_name", "arrays of outputs \"\"\" http_request('GET', SUFFIXES.get(\"username\", '') + 'test') return", "comp_email['DBotScore'] = create_dbot_score_dictionary(email, 'email', dbot_score) return comp_email def domain_to_entry_context(domain, api_res):", "argToList(args_dict.get('domain', '')) api_res_list = pwned_domain(domain_list) md_list = [] ec_list =", "return None if not res.status_code == 200: if not res.status_code", "the following \"Pastes\":', pastes_list, ['ID', 'Title', 'Date', 'Source', 'Amount of", "'Type': indicator_type, 'Vendor': VENDOR, 'Score': dbot_score } def create_context_entry(context_type, context_main_value,", "= False md = '### Have I Been Pwned query", "md += '#### ' + breach['Title'] + ' (' +", "if len(comp_sites) > 0: dbot_score = DEFAULT_DBOT_SCORE_EMAIL email_context = create_context_entry('email',", "timedelta(seconds=int(wait_amount)) > RETRIES_END_TIME: return_error('Max retry time has exceeded.') time.sleep(int(wait_amount)) if", "requests :param email_list: the email list that needed for the", "args_dict: the demisto argument - in this case the username", "zip(username_list, api_res_list): md_list.append(data_to_markdown('Username', username, api_res)) ec_list.append(domain_to_entry_context(username, api_res or [])) return", "else: demisto.error('failed extracting wait time will use default (5). Res", "data=None): while True: res = requests.request( method, BASE_URL + url_suffix,", "\"\"\" Executing the pwned request for usernames list, in order", "return res.json() def html_description_to_human_readable(breach_description): \"\"\" Converting from html description to", "dbot_score } def create_context_entry(context_type, context_main_value, comp_sites, comp_pastes, malicious_score): context_dict =", "dbot_score = 0 comp_email = dict() # type: dict comp_sites", "pwned_username(username_list): \"\"\" Executing the http request :param username_list: the username", "email, api_email_res, api_paste_res in zip(email_list, api_email_res_list, api_paste_res_list): md_list.append(data_to_markdown('Email', email, api_email_res,", "api_res in zip(domain_list, api_res_list): md_list.append(data_to_markdown('Domain', domain, api_res)) ec_list.append(domain_to_entry_context(domain, api_res or", ":return: Description string that altered HTML urls to clickable urls", "def data_to_markdown(query_type, query_arg, api_res, api_paste_res=None): records_found = False md =", "query_arg + '*\\n' if api_res: records_found = True for breach", "'Description': 'The ' + malicious_type + ' has been compromised'", "api_res_list.append(http_request('GET', url_suffix=suffix)) return api_res_list command = demisto.command() LOG('Command being called", ") if res.status_code != 429: # Rate limit response code", "domain_list: suffix = SUFFIXES.get(\"domain\") + domain + SUFFIXES.get(\"domain_truncate_verified\") api_res_list.append(http_request('GET', url_suffix=suffix))", "email_context = create_context_entry('email', email, comp_sites, comp_pastes, DEFAULT_DBOT_SCORE_EMAIL) comp_email[outputPaths['email']] = email_context", "api_email_res_list, api_paste_res_list = pwned_email(email_list) md_list = [] ec_list = []", "for domain in domain_list: suffix = SUFFIXES.get(\"domain\") + domain +", "return breach_description def data_to_markdown(query_type, query_arg, api_res, api_paste_res=None): records_found = False", "CommonServerPython import * ''' IMPORTS ''' import re import requests", "+ comp_pastes) } } if malicious_score == 3: context_dict['Malicious'] =", "username, api_res)) ec_list.append(domain_to_entry_context(username, api_res or [])) return md_list, ec_list, api_res_list", "Executing the pwned request for emails list, in order to", "create_dbot_score_dictionary(email, 'email', dbot_score) return comp_email def domain_to_entry_context(domain, api_res): comp_sites =", "commands: md_list, ec_list, api_email_res_list = commands[command](demisto.args()) for md, ec, api_paste_res", "api_email_res, api_paste_res)) ec_list.append(email_to_entry_context(email, api_email_res or [], api_paste_res or [])) return", "api_res)) ec_list.append(domain_to_entry_context(domain, api_res or [])) return md_list, ec_list, api_res_list def", "if not res.status_code == 401: demisto.error( 'Error in API call", "paste_suffix = SUFFIXES.get(\"paste\") + email api_email_res_list.append(http_request('GET', url_suffix=email_suffix)) api_paste_res_list.append(http_request('GET', url_suffix=paste_suffix)) return", "' has been compromised' } def email_to_entry_context(email, api_email_res, api_paste_res): dbot_score", "exceeded.') wait_regex = re.search(r'\\d+', res.json()['message']) if wait_regex: wait_amount = wait_regex.group()", "'Date': '', 'Amount of emails in paste': str(paste_breach['EmailCount']) } if", "len(comp_sites) > 0: dbot_score = DEFAULT_DBOT_SCORE_DOMAIN domain_context = create_context_entry('domain', domain,", "breach['Title'] + ' (' + breach['Domain'] + '): ' +", "!= -1: RETRIES_END_TIME = datetime.now() + timedelta(seconds=int(MAX_RETRY_ALLOWED)) ''' COMMANDS +", "in api_res] comp_sites = sorted(comp_sites) comp_domain = dict() # type:", "to clickable urls for better readability in war-room \"\"\" html_link_pattern", "[None] def pwned_email_command(args_dict): \"\"\" Executing the pwned request for emails", "emails in paste']) if not records_found: md += 'No records", "time will use default (5). Res body: {}'.format(res.text)) wait_amount =", "email, api_email_res, api_paste_res)) ec_list.append(email_to_entry_context(email, api_email_res or [], api_paste_res or []))", "breach in api_res: verified_breach = 'Verified' if breach['IsVerified'] else 'Unverified'", "domain_list: the domains list that needed for the http requests", "api_res_list = [] for domain in domain_list: suffix = SUFFIXES.get(\"domain\")", "def test_module(args_dict): \"\"\" If the http request was successful the", "request :param domain_list: the domains list that needed for the", "None return res.json() def html_description_to_human_readable(breach_description): \"\"\" Converting from html description", "return api_res_list command = demisto.command() LOG('Command being called is: {}'.format(command))", "\"\"\" domain_list = argToList(args_dict.get('domain', '')) api_res_list = pwned_domain(domain_list) md_list =", "= add_malicious_to_context(context_type) return context_dict def add_malicious_to_context(malicious_type): return { 'Vendor': VENDOR,", "= pwned_username(username_list) md_list = [] ec_list = [] for username,", "'Amount of emails in paste']) if not records_found: md +=", "def domain_to_entry_context(domain, api_res): comp_sites = [item['Title'] for item in api_res]", "or [])) return md_list, ec_list, api_res_list def pwned_domain(domain_list): \"\"\" Executing", "import * ''' IMPORTS ''' import re import requests #", "\"domain\": '/breaches?domain=', \"username\": '/breachedaccount/', \"paste\": '/pasteaccount/', \"email_truncate_verified\": '?truncateResponse=false&includeUnverified=true', \"domain_truncate_verified\": '&truncateResponse=false&includeUnverified=true',", "def http_request(method, url_suffix, params=None, data=None): while True: res = requests.request(", "md_list = [] ec_list = [] for email, api_email_res, api_paste_res", "if demisto.params().get('default_dbot_score_domain') == 'SUSPICIOUS' else 3 SUFFIXES = { \"email\":", "} if command in commands: md_list, ec_list, api_email_res_list = commands[command](demisto.args())", "else 3 DEFAULT_DBOT_SCORE_DOMAIN = 2 if demisto.params().get('default_dbot_score_domain') == 'SUSPICIOUS' else", "the pwned request for usernames list, in order to support", "of outputs \"\"\" username_list = argToList(args_dict.get('username', '')) api_res_list = pwned_username(username_list)", "+ '*\\n' if api_res: records_found = True for breach in", "', '.join(comp_sites + comp_pastes) } } if malicious_score == 3:", "the username list that needed for the http requests :return:", "set_retry_end_time() commands = { 'test-module': test_module, 'email': pwned_email_command, 'pwned-email': pwned_email_command,", "from CommonServerPython import * ''' IMPORTS ''' import re import", "md += tableToMarkdown('The email address was found in the following", "create_dbot_score_dictionary(indicator_value, indicator_type, dbot_score): return { 'Indicator': indicator_value, 'Type': indicator_type, 'Vendor':", "''' import re import requests # Disable insecure warnings requests.packages.urllib3.disable_warnings()", "the http request :param username_list: the username list that needed", "')' breach_description = re.sub(html_link_pattern, link_from_desc, breach_description, count=1) return breach_description def", "= [] for domain in domain_list: suffix = SUFFIXES.get(\"domain\") +", "VENDOR, 'Score': dbot_score } def create_context_entry(context_type, context_main_value, comp_sites, comp_pastes, malicious_score):", "api_paste_res)) if len(comp_sites) > 0: dbot_score = DEFAULT_DBOT_SCORE_EMAIL email_context =", "of breach from API response :return: Description string that altered", "if not records_found: md += 'No records found' return md", "+ malicious_type + ' has been compromised' } def email_to_entry_context(email,", "urls to clickable urls for better readability in war-room \"\"\"", "text: %s' % (res.status_code, res.text)) return_error('Error in API call to", "= True pastes_list = [] for paste_breach in api_paste_res: paste_entry", "+ '**\\n\\n' md += html_description_to_human_readable(breach['Description']) + '\\n' md += 'Data", "function returns 3 lists of outputs :param args_dict: the demisto", "res.status_code == 404: return None if not res.status_code == 200:", "breach_description: Description of breach from API response :return: Description string", "pwned_email_command, 'domain': pwned_domain_command, 'pwned-domain': pwned_domain_command, 'pwned-username': pwned_username_command } if command", "'Error in API call to Pwned Integration [%d]. Full text:", "\"\"\" Executing the pwned request for emails list, in order", "request for usernames list, in order to support list input,", "requests # Disable insecure warnings requests.packages.urllib3.disable_warnings() ''' GLOBALS/PARAMS ''' VENDOR", "'pwned-domain': pwned_domain_command, 'pwned-username': pwned_username_command } if command in commands: md_list,", "'/breachedaccount/', \"domain\": '/breaches?domain=', \"username\": '/breachedaccount/', \"paste\": '/pasteaccount/', \"email_truncate_verified\": '?truncateResponse=false&includeUnverified=true', \"domain_truncate_verified\":", "return comp_domain def set_retry_end_time(): global RETRIES_END_TIME if MAX_RETRY_ALLOWED != -1:", "\"\"\" email_list = argToList(args_dict.get('email', '')) api_email_res_list, api_paste_res_list = pwned_email(email_list) md_list", "== 401: demisto.error( 'Error in API call to Pwned Integration", "the domain list is needed :return: 3 arrays of outputs", "successful the test will return OK :return: 3 arrays of", "'/breachedaccount/', \"paste\": '/pasteaccount/', \"email_truncate_verified\": '?truncateResponse=false&includeUnverified=true', \"domain_truncate_verified\": '&truncateResponse=false&includeUnverified=true', \"username_truncate_verified\": '?truncateResponse=false&includeUnverified=true' }", "query for ' + query_type.lower() + ': *' + query_arg", "pwned_username_command } if command in commands: md_list, ec_list, api_email_res_list =", "params=params, data=data, headers=HEADERS ) if res.status_code != 429: # Rate", "email address was found in the following \"Pastes\":', pastes_list, ['ID',", "indicator_type, dbot_score): return { 'Indicator': indicator_value, 'Type': indicator_type, 'Vendor': VENDOR,", "demisto.command() LOG('Command being called is: {}'.format(command)) try: handle_proxy() set_retry_end_time() commands", "demisto argument - in this case the domain list is", "outputs \"\"\" api_res_list = [] for username in username_list: suffix", "pwned_username_command(args_dict): \"\"\" Executing the pwned request for usernames list, in", "request :param username_list: the username list that needed for the", "def pwned_domain(domain_list): \"\"\" Executing the http request :param domain_list: the", "ec_list.append(email_to_entry_context(email, api_email_res or [], api_paste_res or [])) return md_list, ec_list,", "in zip(username_list, api_res_list): md_list.append(data_to_markdown('Username', username, api_res)) ec_list.append(domain_to_entry_context(username, api_res or []))", "' + breach['Title'] + ' (' + breach['Domain'] + '):", "dict comp_sites = sorted([item['Title'] for item in api_email_res]) comp_pastes =", "is needed :return: 3 arrays of outputs \"\"\" username_list =", "for paste_breach in api_paste_res: paste_entry = \\ { 'Source': paste_breach['Source'],", "Rate limit response code break if datetime.now() > RETRIES_END_TIME: return_error('Max", "string that altered HTML urls to clickable urls for better", "Executing the http request :param domain_list: the domains list that", "\"\"\" api_res_list = [] for domain in domain_list: suffix =", "if api_res: records_found = True for breach in api_res: verified_breach", "dbot_score = 0 if len(comp_sites) > 0: dbot_score = DEFAULT_DBOT_SCORE_DOMAIN", "http request was successful the test will return OK :return:", "= sorted([item['Title'] for item in api_email_res]) comp_pastes = sorted(set(item['Source'] for", "return md_list, ec_list, api_res_list def pwned_domain(domain_list): \"\"\" Executing the http", "in commands: md_list, ec_list, api_email_res_list = commands[command](demisto.args()) for md, ec,", "description to hr :param breach_description: Description of breach from API", "paste_entry['Date'] = paste_breach['Date'].split('T')[0] pastes_list.append(paste_entry) md += tableToMarkdown('The email address was", "[] for domain in domain_list: suffix = SUFFIXES.get(\"domain\") + domain", "\\ ' records breached [' + verified_breach + ' breach]\\n'", "= pwned_domain(domain_list) md_list = [] ec_list = [] for domain,", "in API call to Pwned Integration [%d]. Full text: %s'", "username_list = argToList(args_dict.get('username', '')) api_res_list = pwned_username(username_list) md_list = []", "DEFAULT_DBOT_SCORE_DOMAIN = 2 if demisto.params().get('default_dbot_score_domain') == 'SUSPICIOUS' else 3 SUFFIXES", "''' COMMANDS + REQUESTS FUNCTIONS ''' def test_module(args_dict): \"\"\" If", "API response :return: Description string that altered HTML urls to", "+ ' (' + breach['Domain'] + '): ' + str(breach['PwnCount'])", "\"\"\" If the http request was successful the test will", "outputs \"\"\" username_list = argToList(args_dict.get('username', '')) api_res_list = pwned_username(username_list) md_list", "username in username_list: suffix = SUFFIXES.get(\"username\") + username + SUFFIXES.get(\"username_truncate_verified\")", "list that needed for the http requests :return: an array", "= '### Have I Been Pwned query for ' +", "def pwned_username_command(args_dict): \"\"\" Executing the pwned request for usernames list,", "if len(comp_sites) > 0: dbot_score = DEFAULT_DBOT_SCORE_DOMAIN domain_context = create_context_entry('domain',", "api_email_res or [], api_paste_res or [])) return md_list, ec_list, api_email_res_list", "'Source', 'Amount of emails in paste']) if not records_found: md", "\"\"\" Executing the pwned request for domains list, in order", "return { 'Indicator': indicator_value, 'Type': indicator_type, 'Vendor': VENDOR, 'Score': dbot_score", "demisto argument - in this case the username list is", "= domain_context comp_domain['DBotScore'] = create_dbot_score_dictionary(domain, 'domain', dbot_score) return comp_domain def", "for username in username_list: suffix = SUFFIXES.get(\"username\") + username +", "records_found = True for breach in api_res: verified_breach = 'Verified'", "if api_paste_res: records_found = True pastes_list = [] for paste_breach", "http requests outputs \"\"\" api_email_res_list = [] api_paste_res_list = []", "ec_list, api_res_list def pwned_username(username_list): \"\"\" Executing the http request :param", "link[2] link_from_desc = '[' + html_readable_name + ']' + '('", "wait_regex = re.search(r'\\d+', res.json()['message']) if wait_regex: wait_amount = wait_regex.group() else:", "altered HTML urls to clickable urls for better readability in", "> RETRIES_END_TIME: return_error('Max retry time has exceeded.') time.sleep(int(wait_amount)) if res.status_code", "+ '(' + html_actual_address + ')' breach_description = re.sub(html_link_pattern, link_from_desc,", "res.text)) return_error('Error in API call to Pwned Integration [%d] -", "add_malicious_to_context(malicious_type): return { 'Vendor': VENDOR, 'Description': 'The ' + malicious_type", "comp_sites = sorted([item['Title'] for item in api_email_res]) comp_pastes = sorted(set(item['Source']", "'/pasteaccount/', \"email_truncate_verified\": '?truncateResponse=false&includeUnverified=true', \"domain_truncate_verified\": '&truncateResponse=false&includeUnverified=true', \"username_truncate_verified\": '?truncateResponse=false&includeUnverified=true' } RETRIES_END_TIME =", "demisto.error( 'Error in API call to Pwned Integration [%d]. Full", "'email', dbot_score) return comp_email def domain_to_entry_context(domain, api_res): comp_sites = [item['Title']", "api_paste_res=None): records_found = False md = '### Have I Been", "requests outputs \"\"\" api_res_list = [] for username in username_list:", "if res.status_code == 404: return None if not res.status_code ==", ":return: 2 arrays of http requests outputs \"\"\" api_email_res_list =", "the http request was successful the test will return OK", "dbot_score = DEFAULT_DBOT_SCORE_DOMAIN domain_context = create_context_entry('domain', domain, comp_sites, [], DEFAULT_DBOT_SCORE_DOMAIN)", "list is needed :return: 3 arrays of outputs \"\"\" domain_list", "urls for better readability in war-room \"\"\" html_link_pattern = re.compile('<a", "= [] for domain, api_res in zip(domain_list, api_res_list): md_list.append(data_to_markdown('Domain', domain,", "api_email_res_list): return_outputs(md, ec, api_paste_res) # Log exceptions except Exception as", "404: return None if not res.status_code == 200: if not", "html_readable_name = link[2] link_from_desc = '[' + html_readable_name + ']'", "exceeded.') time.sleep(int(wait_amount)) if res.status_code == 404: return None if not", "'domain', dbot_score) return comp_domain def set_retry_end_time(): global RETRIES_END_TIME if MAX_RETRY_ALLOWED", "records found' return md def create_dbot_score_dictionary(indicator_value, indicator_type, dbot_score): return {", "REQUESTS FUNCTIONS ''' def test_module(args_dict): \"\"\" If the http request", "in this case the username list is needed :return: 3", "pwned_email(email_list): \"\"\" Executing the http requests :param email_list: the email", "'.join(comp_sites + comp_pastes) } } if malicious_score == 3: context_dict['Malicious']", "api_paste_res: paste_entry = \\ { 'Source': paste_breach['Source'], 'Title': paste_breach['Title'], 'ID':", "if not res.status_code == 200: if not res.status_code == 401:", ":return: an array of http requests outputs \"\"\" api_res_list =", "context_type == 'email': context_dict['Address'] = context_main_value else: context_dict['Name'] = context_main_value", "arrays of outputs \"\"\" username_list = argToList(args_dict.get('username', '')) api_res_list =", "HEADERS = { 'hibp-api-key': API_KEY, 'user-agent': 'DBOT-API', 'Content-Type': 'application/json', 'Accept':", "demisto.params().get('default_dbot_score_domain') == 'SUSPICIOUS' else 3 SUFFIXES = { \"email\": '/breachedaccount/',", "+= tableToMarkdown('The email address was found in the following \"Pastes\":',", "0: dbot_score = DEFAULT_DBOT_SCORE_DOMAIN domain_context = create_context_entry('domain', domain, comp_sites, [],", "md_list, ec_list, api_res_list def pwned_domain(domain_list): \"\"\" Executing the http request", "md += 'Date: **' + breach['BreachDate'] + '**\\n\\n' md +=", "+ '): ' + str(breach['PwnCount']) + \\ ' records breached", "api_email_res_list = [] api_paste_res_list = [] for email in email_list:", "= SUFFIXES.get(\"paste\") + email api_email_res_list.append(http_request('GET', url_suffix=email_suffix)) api_paste_res_list.append(http_request('GET', url_suffix=paste_suffix)) return api_email_res_list,", "if MAX_RETRY_ALLOWED != -1: RETRIES_END_TIME = datetime.now() + timedelta(seconds=int(MAX_RETRY_ALLOWED)) '''", "argument - in this case the username list is needed", "{ \"email\": '/breachedaccount/', \"domain\": '/breaches?domain=', \"username\": '/breachedaccount/', \"paste\": '/pasteaccount/', \"email_truncate_verified\":", "= [] for email in email_list: email_suffix = SUFFIXES.get(\"email\") +", "'')) api_email_res_list, api_paste_res_list = pwned_email(email_list) md_list = [] ec_list =", "not demisto.params().get('insecure', False) BASE_URL = 'https://haveibeenpwned.com/api/v3' HEADERS = { 'hibp-api-key':", "breach from API response :return: Description string that altered HTML", "of emails in paste']) if not records_found: md += 'No", "= 2 if demisto.params().get('default_dbot_score_domain') == 'SUSPICIOUS' else 3 SUFFIXES =", "= pwned_email(email_list) md_list = [] ec_list = [] for email,", "' + malicious_type + ' has been compromised' } def", "retry time has exceeded.') time.sleep(int(wait_amount)) if res.status_code == 404: return", "+ '\\n' md += 'Data breached: **' + ','.join(breach['DataClasses']) +", "= [item['Title'] for item in api_res] comp_sites = sorted(comp_sites) comp_domain", "comp_email def domain_to_entry_context(domain, api_res): comp_sites = [item['Title'] for item in", "query_type.lower() + ': *' + query_arg + '*\\n' if api_res:", "context_main_value else: context_dict['Name'] = context_main_value context_dict['Pwned-V2'] = { 'Compromised': {", "sorted(set(item['Source'] for item in api_paste_res)) if len(comp_sites) > 0: dbot_score", "records_found = False md = '### Have I Been Pwned", "Integration [%d]. Full text: %s' % (res.status_code, res.text)) return_error('Error in", "+ SUFFIXES.get(\"username_truncate_verified\") api_res_list.append(http_request('GET', url_suffix=suffix)) return api_res_list command = demisto.command() LOG('Command", "[] api_paste_res_list = [] for email in email_list: email_suffix =", "md_list = [] ec_list = [] for username, api_res in", "+ REQUESTS FUNCTIONS ''' def test_module(args_dict): \"\"\" If the http", "the demisto argument - in this case the domain list", "in domain_list: suffix = SUFFIXES.get(\"domain\") + domain + SUFFIXES.get(\"domain_truncate_verified\") api_res_list.append(http_request('GET',", "the username list is needed :return: 3 arrays of outputs", "indicator_type, 'Vendor': VENDOR, 'Score': dbot_score } def create_context_entry(context_type, context_main_value, comp_sites,", "ec_list = [] for username, api_res in zip(username_list, api_res_list): md_list.append(data_to_markdown('Username',", "''' GLOBALS/PARAMS ''' VENDOR = 'Have I Been Pwned? V2'", "type: dict dbot_score = 0 if len(comp_sites) > 0: dbot_score", "email list that needed for the http requests :return: 2", "this case the username list is needed :return: 3 arrays", "in this case the email list is needed :return: 3", "was found in the following \"Pastes\":', pastes_list, ['ID', 'Title', 'Date',", "email_suffix = SUFFIXES.get(\"email\") + email + SUFFIXES.get(\"email_truncate_verified\") paste_suffix = SUFFIXES.get(\"paste\")" ]
[ "Pipeline(Extension): identifier = \"pipeline\" def __init__(self): self.visitor = PipelineVisitor(self.activation) def", "<reponame>Aloxaf/moshmosh<gh_stars>100-1000 from moshmosh.extension import Extension from moshmosh.ast_compat import ast class", "visit_BinOp(self, n: ast.BinOp): if n.lineno in self.activation and isinstance(n.op, ast.BitOr):", "n.lineno in self.activation and isinstance(n.op, ast.BitOr): return ast.Call( self.visit(n.right), [self.visit(n.left)],", "identifier = \"pipeline\" def __init__(self): self.visitor = PipelineVisitor(self.activation) def rewrite_ast(self,", "from moshmosh.ast_compat import ast class PipelineVisitor(ast.NodeTransformer): \"\"\" `a | f", "| f -> f(a)`, recursively \"\"\" def __init__(self, activation): self.activation", "import ast class PipelineVisitor(ast.NodeTransformer): \"\"\" `a | f -> f(a)`,", "__init__(self, activation): self.activation = activation def visit_BinOp(self, n: ast.BinOp): if", "def __init__(self, activation): self.activation = activation def visit_BinOp(self, n: ast.BinOp):", "in self.activation and isinstance(n.op, ast.BitOr): return ast.Call( self.visit(n.right), [self.visit(n.left)], [],", "ast class PipelineVisitor(ast.NodeTransformer): \"\"\" `a | f -> f(a)`, recursively", "if n.lineno in self.activation and isinstance(n.op, ast.BitOr): return ast.Call( self.visit(n.right),", "= \"pipeline\" def __init__(self): self.visitor = PipelineVisitor(self.activation) def rewrite_ast(self, node):", "[self.visit(n.left)], [], lineno=n.lineno, col_offset=n.col_offset ) return self.generic_visit(n) class Pipeline(Extension): identifier", "def __init__(self): self.visitor = PipelineVisitor(self.activation) def rewrite_ast(self, node): return self.visitor.visit(node)", "= activation def visit_BinOp(self, n: ast.BinOp): if n.lineno in self.activation", "\"\"\" `a | f -> f(a)`, recursively \"\"\" def __init__(self,", "ast.BinOp): if n.lineno in self.activation and isinstance(n.op, ast.BitOr): return ast.Call(", "n: ast.BinOp): if n.lineno in self.activation and isinstance(n.op, ast.BitOr): return", "and isinstance(n.op, ast.BitOr): return ast.Call( self.visit(n.right), [self.visit(n.left)], [], lineno=n.lineno, col_offset=n.col_offset", "\"\"\" def __init__(self, activation): self.activation = activation def visit_BinOp(self, n:", "return ast.Call( self.visit(n.right), [self.visit(n.left)], [], lineno=n.lineno, col_offset=n.col_offset ) return self.generic_visit(n)", "moshmosh.extension import Extension from moshmosh.ast_compat import ast class PipelineVisitor(ast.NodeTransformer): \"\"\"", "activation): self.activation = activation def visit_BinOp(self, n: ast.BinOp): if n.lineno", "moshmosh.ast_compat import ast class PipelineVisitor(ast.NodeTransformer): \"\"\" `a | f ->", "self.activation and isinstance(n.op, ast.BitOr): return ast.Call( self.visit(n.right), [self.visit(n.left)], [], lineno=n.lineno,", "ast.BitOr): return ast.Call( self.visit(n.right), [self.visit(n.left)], [], lineno=n.lineno, col_offset=n.col_offset ) return", "self.generic_visit(n) class Pipeline(Extension): identifier = \"pipeline\" def __init__(self): self.visitor =", "class Pipeline(Extension): identifier = \"pipeline\" def __init__(self): self.visitor = PipelineVisitor(self.activation)", "activation def visit_BinOp(self, n: ast.BinOp): if n.lineno in self.activation and", "recursively \"\"\" def __init__(self, activation): self.activation = activation def visit_BinOp(self,", "lineno=n.lineno, col_offset=n.col_offset ) return self.generic_visit(n) class Pipeline(Extension): identifier = \"pipeline\"", "def visit_BinOp(self, n: ast.BinOp): if n.lineno in self.activation and isinstance(n.op,", "self.visit(n.right), [self.visit(n.left)], [], lineno=n.lineno, col_offset=n.col_offset ) return self.generic_visit(n) class Pipeline(Extension):", "f(a)`, recursively \"\"\" def __init__(self, activation): self.activation = activation def", ") return self.generic_visit(n) class Pipeline(Extension): identifier = \"pipeline\" def __init__(self):", "isinstance(n.op, ast.BitOr): return ast.Call( self.visit(n.right), [self.visit(n.left)], [], lineno=n.lineno, col_offset=n.col_offset )", "from moshmosh.extension import Extension from moshmosh.ast_compat import ast class PipelineVisitor(ast.NodeTransformer):", "[], lineno=n.lineno, col_offset=n.col_offset ) return self.generic_visit(n) class Pipeline(Extension): identifier =", "Extension from moshmosh.ast_compat import ast class PipelineVisitor(ast.NodeTransformer): \"\"\" `a |", "-> f(a)`, recursively \"\"\" def __init__(self, activation): self.activation = activation", "ast.Call( self.visit(n.right), [self.visit(n.left)], [], lineno=n.lineno, col_offset=n.col_offset ) return self.generic_visit(n) class", "PipelineVisitor(ast.NodeTransformer): \"\"\" `a | f -> f(a)`, recursively \"\"\" def", "class PipelineVisitor(ast.NodeTransformer): \"\"\" `a | f -> f(a)`, recursively \"\"\"", "col_offset=n.col_offset ) return self.generic_visit(n) class Pipeline(Extension): identifier = \"pipeline\" def", "return self.generic_visit(n) class Pipeline(Extension): identifier = \"pipeline\" def __init__(self): self.visitor", "\"pipeline\" def __init__(self): self.visitor = PipelineVisitor(self.activation) def rewrite_ast(self, node): return", "import Extension from moshmosh.ast_compat import ast class PipelineVisitor(ast.NodeTransformer): \"\"\" `a", "f -> f(a)`, recursively \"\"\" def __init__(self, activation): self.activation =", "`a | f -> f(a)`, recursively \"\"\" def __init__(self, activation):", "self.activation = activation def visit_BinOp(self, n: ast.BinOp): if n.lineno in" ]
[ "funcExpectedHash = precompute_hash(r2, funcOffset, funcSize) print(\"funcOffset:{} funcSize:{} funcExpectedHash:{}\".format( funcOffset, funcSize,", "is entry0 in the binary function = 'entry0' print(\"Cannot precompute", "os.system( \"chmod +x {}\".format(patchedBinary)) if status != 0: print(\"Error in", "\"\" for name, attr in funcs.items(): # sometimes r2 prepends", "h = 0 print(\"r2 command to get the function body", "funcs[func['name']] = attr # Basic search for mangled names if", "name, attr in funcs.items(): # sometimes r2 prepends sym. to", "that?\") exit(1) match = 0 mangledName = \"\" for name,", "description='Postpatch protected C program.') parser.add_argument('-b', action=\"store\", dest=\"binary\", help=\"program.out protected program", "C program.') parser.add_argument('-b', action=\"store\", dest=\"binary\", help=\"program.out protected program binary\", required=True)", "funcSize, funcExpectedHash)) binaryFile, _ = os.path.splitext(results.binary) patchedBinary = \"{}-patched.out\".format(binaryFile) copyfile(results.binary,", "program.') parser.add_argument('-b', action=\"store\", dest=\"binary\", help=\"program.out protected program binary\", required=True) parser.add_argument('-f',", "{} for func in function_list: attr = {'size': func['size'], 'offset':", "-1: mm.seek(0) address = mm.find(search_bytes) mm.seek(address, os.SEEK_SET) patch_bytes = struct.pack(flag,", "in setting permission, try:\\n sudo chmod +x {}\".format(patchedBinary)) exit(1) if", "import r2pipe import struct import mmap import base64 from shutil", "hash\", required=True) results = parser.parse_args() print(\"python protect program\", results) r2", "of function, size, expected hash\", required=True) results = parser.parse_args() print(\"python", "print(\"Successfully stored patched binary {}\".format(patchedBinary)) status = os.system( \"chmod +x", "attr in funcs.items(): # sometimes r2 prepends sym. to function", "argparse.ArgumentParser( description='Postpatch protected C program.') parser.add_argument('-b', action=\"store\", dest=\"binary\", help=\"program.out protected", "print(\"python protect program\", results) r2 = r2pipe.open(results.binary) funcInfo = get_protected_function_info(r2,", "= \"<I\" # little-endian unsigned int search_bytes = struct.pack(flag, search_value)", "value:{}\".format(search_value, patch_value)) flag = \"<I\" # little-endian unsigned int search_bytes", "for name, attr in funcs.items(): # sometimes r2 prepends sym.", "= os.path.splitext(results.binary) patchedBinary = \"{}-patched.out\".format(binaryFile) copyfile(results.binary, patchedBinary) with open(patchedBinary, 'r+b')", "with open(patchedBinary, 'r+b') as binary: mm = mmap.mmap(binary.fileno(), 0) patch_binary(mm,", "pp = pprint.PrettyPrinter(indent=4) def precompute_hash(r2, offset, size): print('Precomputing hash') h", "binary {}\".format(patchedBinary)) status = os.system( \"chmod +x {}\".format(patchedBinary)) if status", "{'size': func['size'], 'offset': func['offset']} funcs[func['name']] = attr # Basic search", "as binary: mm = mmap.mmap(binary.fileno(), 0) patch_binary(mm, int(results.placeholders[0]), int(funcSize)) patch_binary(mm,", "the binary function = 'entry0' print(\"Cannot precompute the expected hash", "mm.seek(address, os.SEEK_SET) patch_bytes = struct.pack(flag, patch_value) mm.write(patch_bytes) def get_protected_function_info(r2, function):", "+x {}\".format(patchedBinary)) if status != 0: print(\"Error in setting permission,", "struct.pack(flag, patch_value) mm.write(patch_bytes) def get_protected_function_info(r2, function): # find addresses and", "required=True) parser.add_argument('-p', nargs=\"+\", dest=\"placeholders\", help=\"list of used placeholders in the", "to safely find function in the binary!\") pp.pprint(funcs) exit(1) return", "to get the function body in base64:\\np6e {}@{}\".format(size, offset)) b64_func", "int(results.placeholders[0]), int(funcSize)) patch_binary(mm, int(results.placeholders[1]), int(funcExpectedHash)) print(\"Successfully stored patched binary {}\".format(patchedBinary))", "patchedBinary = \"{}-patched.out\".format(binaryFile) copyfile(results.binary, patchedBinary) with open(patchedBinary, 'r+b') as binary:", "print(\"Error in setting permission, try:\\n sudo chmod +x {}\".format(patchedBinary)) exit(1)", "import os import r2pipe import struct import mmap import base64", "= 'entry0' print(\"Cannot precompute the expected hash for the main", "b64_func = r2.cmd(\"p6e {}@{}\".format(size, offset)) func_bytes = bytearray(base64.b64decode(b64_func)) for b", "function is entry0 in the binary function = 'entry0' print(\"Cannot", "funcInfo[\"size\"] funcExpectedHash = precompute_hash(r2, funcOffset, funcSize) print(\"funcOffset:{} funcSize:{} funcExpectedHash:{}\".format( funcOffset,", "find addresses and sizes of all functions r2.cmd(\"aa\") r2.cmd(\"aac\") function_list", "b in func_bytes: h = h ^ b print('Precomuted hash:',", "patch_bytes = struct.pack(flag, patch_value) mm.write(patch_bytes) def get_protected_function_info(r2, function): # find", "parser.parse_args() print(\"python protect program\", results) r2 = r2pipe.open(results.binary) funcInfo =", "results) r2 = r2pipe.open(results.binary) funcInfo = get_protected_function_info(r2, results.function) funcOffset =", "exact order of function, size, expected hash\", required=True) results =", "h = h ^ b print('Precomuted hash:', hex(h)) return h", "funcOffset = funcInfo[\"offset\"] funcSize = funcInfo[\"size\"] funcExpectedHash = precompute_hash(r2, funcOffset,", "b print('Precomuted hash:', hex(h)) return h def patch_binary(mm, search_value, patch_value):", "match = 0 mangledName = \"\" for name, attr in", "func['size'], 'offset': func['offset']} funcs[func['name']] = attr # Basic search for", "import mmap import base64 from shutil import copyfile import pprint", "parser.add_argument('-p', nargs=\"+\", dest=\"placeholders\", help=\"list of used placeholders in the exact", "funcSize:{} funcExpectedHash:{}\".format( funcOffset, funcSize, funcExpectedHash)) binaryFile, _ = os.path.splitext(results.binary) patchedBinary", "main function, why is that?\") exit(1) match = 0 mangledName", "help=\"program.out protected program binary\", required=True) parser.add_argument('-f', action=\"store\", dest=\"function\", help=\"protected function", "= precompute_hash(r2, funcOffset, funcSize) print(\"funcOffset:{} funcSize:{} funcExpectedHash:{}\".format( funcOffset, funcSize, funcExpectedHash))", "dest=\"placeholders\", help=\"list of used placeholders in the exact order of", "= mm.find(search_bytes) mm.seek(address, os.SEEK_SET) patch_bytes = struct.pack(flag, patch_value) mm.write(patch_bytes) def", "attr = {'size': func['size'], 'offset': func['offset']} funcs[func['name']] = attr #", "hex(h)) return h def patch_binary(mm, search_value, patch_value): print(\"search value:{} patch", "main function is entry0 in the binary function = 'entry0'", "import struct import mmap import base64 from shutil import copyfile", "= funcInfo[\"offset\"] funcSize = funcInfo[\"size\"] funcExpectedHash = precompute_hash(r2, funcOffset, funcSize)", "action=\"store\", dest=\"binary\", help=\"program.out protected program binary\", required=True) parser.add_argument('-f', action=\"store\", dest=\"function\",", "funcOffset, funcSize, funcExpectedHash)) binaryFile, _ = os.path.splitext(results.binary) patchedBinary = \"{}-patched.out\".format(binaryFile)", "0 mangledName = \"\" for name, attr in funcs.items(): #", "= 0 mangledName = \"\" for name, attr in funcs.items():", "= \"{}-patched.out\".format(binaryFile) copyfile(results.binary, patchedBinary) with open(patchedBinary, 'r+b') as binary: mm", "binary function = 'entry0' print(\"Cannot precompute the expected hash for", "in function_list: attr = {'size': func['size'], 'offset': func['offset']} funcs[func['name']] =", "= {} for func in function_list: attr = {'size': func['size'],", "int(funcSize)) patch_binary(mm, int(results.placeholders[1]), int(funcExpectedHash)) print(\"Successfully stored patched binary {}\".format(patchedBinary)) status", "dest=\"function\", help=\"protected function name\", required=True) parser.add_argument('-p', nargs=\"+\", dest=\"placeholders\", help=\"list of", "offset)) b64_func = r2.cmd(\"p6e {}@{}\".format(size, offset)) func_bytes = bytearray(base64.b64decode(b64_func)) for", "= r2pipe.open(results.binary) funcInfo = get_protected_function_info(r2, results.function) funcOffset = funcInfo[\"offset\"] funcSize", "r2 = r2pipe.open(results.binary) funcInfo = get_protected_function_info(r2, results.function) funcOffset = funcInfo[\"offset\"]", "setting permission, try:\\n sudo chmod +x {}\".format(patchedBinary)) exit(1) if __name__", "for func in function_list: attr = {'size': func['size'], 'offset': func['offset']}", "function): # find addresses and sizes of all functions r2.cmd(\"aa\")", "prepends sym. to function names if function in name: mangledName", "print(\"Failed to safely find function in the binary!\") pp.pprint(funcs) exit(1)", "= r2.cmd(\"p6e {}@{}\".format(size, offset)) func_bytes = bytearray(base64.b64decode(b64_func)) for b in", "print(function_list) funcs = {} for func in function_list: attr =", "mangled names if function == 'main': # main function is", "mm.find(search_bytes) if address == -1: mm.seek(0) address = mm.find(search_bytes) mm.seek(address,", "import copyfile import pprint pp = pprint.PrettyPrinter(indent=4) def precompute_hash(r2, offset,", "func['offset']} funcs[func['name']] = attr # Basic search for mangled names", "0: print(\"Error in setting permission, try:\\n sudo chmod +x {}\".format(patchedBinary))", "binary!\") pp.pprint(funcs) exit(1) return funcs[mangledName] def main(): parser = argparse.ArgumentParser(", "r2.cmd(\"aac\") function_list = r2.cmdj(\"aflj\") # print(function_list) funcs = {} for", "return funcs[mangledName] def main(): parser = argparse.ArgumentParser( description='Postpatch protected C", "sizes of all functions r2.cmd(\"aa\") r2.cmd(\"aac\") function_list = r2.cmdj(\"aflj\") #", "import pprint pp = pprint.PrettyPrinter(indent=4) def precompute_hash(r2, offset, size): print('Precomputing", "= 0 print(\"r2 command to get the function body in", "function body in base64:\\np6e {}@{}\".format(size, offset)) b64_func = r2.cmd(\"p6e {}@{}\".format(size,", "mm.write(patch_bytes) def get_protected_function_info(r2, function): # find addresses and sizes of", "mangledName = name match += 1 if match != 1:", "# print(function_list) funcs = {} for func in function_list: attr", "= bytearray(base64.b64decode(b64_func)) for b in func_bytes: h = h ^", "value:{} patch value:{}\".format(search_value, patch_value)) flag = \"<I\" # little-endian unsigned", "used placeholders in the exact order of function, size, expected", "the function body in base64:\\np6e {}@{}\".format(size, offset)) b64_func = r2.cmd(\"p6e", "# little-endian unsigned int search_bytes = struct.pack(flag, search_value) address =", "status != 0: print(\"Error in setting permission, try:\\n sudo chmod", "1 if match != 1: print(\"Failed to safely find function", "placeholders in the exact order of function, size, expected hash\",", "patched binary {}\".format(patchedBinary)) status = os.system( \"chmod +x {}\".format(patchedBinary)) if", "parser.add_argument('-b', action=\"store\", dest=\"binary\", help=\"program.out protected program binary\", required=True) parser.add_argument('-f', action=\"store\",", "command to get the function body in base64:\\np6e {}@{}\".format(size, offset))", "funcs = {} for func in function_list: attr = {'size':", "= struct.pack(flag, patch_value) mm.write(patch_bytes) def get_protected_function_info(r2, function): # find addresses", "base64 from shutil import copyfile import pprint pp = pprint.PrettyPrinter(indent=4)", "binary: mm = mmap.mmap(binary.fileno(), 0) patch_binary(mm, int(results.placeholders[0]), int(funcSize)) patch_binary(mm, int(results.placeholders[1]),", "= {'size': func['size'], 'offset': func['offset']} funcs[func['name']] = attr # Basic", "function names if function in name: mangledName = name match", "def main(): parser = argparse.ArgumentParser( description='Postpatch protected C program.') parser.add_argument('-b',", "def precompute_hash(r2, offset, size): print('Precomputing hash') h = 0 print(\"r2", "# sometimes r2 prepends sym. to function names if function", "def patch_binary(mm, search_value, patch_value): print(\"search value:{} patch value:{}\".format(search_value, patch_value)) flag", "0 print(\"r2 command to get the function body in base64:\\np6e", "'r+b') as binary: mm = mmap.mmap(binary.fileno(), 0) patch_binary(mm, int(results.placeholders[0]), int(funcSize))", "binary\", required=True) parser.add_argument('-f', action=\"store\", dest=\"function\", help=\"protected function name\", required=True) parser.add_argument('-p',", "{}\".format(patchedBinary)) status = os.system( \"chmod +x {}\".format(patchedBinary)) if status !=", "permission, try:\\n sudo chmod +x {}\".format(patchedBinary)) exit(1) if __name__ ==", "= argparse.ArgumentParser( description='Postpatch protected C program.') parser.add_argument('-b', action=\"store\", dest=\"binary\", help=\"program.out", "return h def patch_binary(mm, search_value, patch_value): print(\"search value:{} patch value:{}\".format(search_value,", "patch_value) mm.write(patch_bytes) def get_protected_function_info(r2, function): # find addresses and sizes", "copyfile import pprint pp = pprint.PrettyPrinter(indent=4) def precompute_hash(r2, offset, size):", "name\", required=True) parser.add_argument('-p', nargs=\"+\", dest=\"placeholders\", help=\"list of used placeholders in", "mmap.mmap(binary.fileno(), 0) patch_binary(mm, int(results.placeholders[0]), int(funcSize)) patch_binary(mm, int(results.placeholders[1]), int(funcExpectedHash)) print(\"Successfully stored", "binaryFile, _ = os.path.splitext(results.binary) patchedBinary = \"{}-patched.out\".format(binaryFile) copyfile(results.binary, patchedBinary) with", "# main function is entry0 in the binary function =", "funcSize) print(\"funcOffset:{} funcSize:{} funcExpectedHash:{}\".format( funcOffset, funcSize, funcExpectedHash)) binaryFile, _ =", "and sizes of all functions r2.cmd(\"aa\") r2.cmd(\"aac\") function_list = r2.cmdj(\"aflj\")", "funcExpectedHash:{}\".format( funcOffset, funcSize, funcExpectedHash)) binaryFile, _ = os.path.splitext(results.binary) patchedBinary =", "\"chmod +x {}\".format(patchedBinary)) if status != 0: print(\"Error in setting", "print(\"r2 command to get the function body in base64:\\np6e {}@{}\".format(size,", "funcInfo[\"offset\"] funcSize = funcInfo[\"size\"] funcExpectedHash = precompute_hash(r2, funcOffset, funcSize) print(\"funcOffset:{}", "mm.seek(0) address = mm.find(search_bytes) mm.seek(address, os.SEEK_SET) patch_bytes = struct.pack(flag, patch_value)", "protected C program.') parser.add_argument('-b', action=\"store\", dest=\"binary\", help=\"program.out protected program binary\",", "base64:\\np6e {}@{}\".format(size, offset)) b64_func = r2.cmd(\"p6e {}@{}\".format(size, offset)) func_bytes =", "patch_binary(mm, search_value, patch_value): print(\"search value:{} patch value:{}\".format(search_value, patch_value)) flag =", "name: mangledName = name match += 1 if match !=", "if function == 'main': # main function is entry0 in", "print(\"search value:{} patch value:{}\".format(search_value, patch_value)) flag = \"<I\" # little-endian", "sudo chmod +x {}\".format(patchedBinary)) exit(1) if __name__ == '__main__': main()", "name match += 1 if match != 1: print(\"Failed to", "help=\"protected function name\", required=True) parser.add_argument('-p', nargs=\"+\", dest=\"placeholders\", help=\"list of used", "== 'main': # main function is entry0 in the binary", "os import r2pipe import struct import mmap import base64 from", "if function in name: mangledName = name match += 1", "r2.cmdj(\"aflj\") # print(function_list) funcs = {} for func in function_list:", "func_bytes: h = h ^ b print('Precomuted hash:', hex(h)) return", "in the binary!\") pp.pprint(funcs) exit(1) return funcs[mangledName] def main(): parser", "stored patched binary {}\".format(patchedBinary)) status = os.system( \"chmod +x {}\".format(patchedBinary))", "os.path.splitext(results.binary) patchedBinary = \"{}-patched.out\".format(binaryFile) copyfile(results.binary, patchedBinary) with open(patchedBinary, 'r+b') as", "in func_bytes: h = h ^ b print('Precomuted hash:', hex(h))", "protected program binary\", required=True) parser.add_argument('-f', action=\"store\", dest=\"function\", help=\"protected function name\",", "func_bytes = bytearray(base64.b64decode(b64_func)) for b in func_bytes: h = h", "expected hash for the main function, why is that?\") exit(1)", "required=True) results = parser.parse_args() print(\"python protect program\", results) r2 =", "exit(1) return funcs[mangledName] def main(): parser = argparse.ArgumentParser( description='Postpatch protected", "# find addresses and sizes of all functions r2.cmd(\"aa\") r2.cmd(\"aac\")", "= attr # Basic search for mangled names if function", "address = mm.find(search_bytes) if address == -1: mm.seek(0) address =", "sometimes r2 prepends sym. to function names if function in", "action=\"store\", dest=\"function\", help=\"protected function name\", required=True) parser.add_argument('-p', nargs=\"+\", dest=\"placeholders\", help=\"list", "'main': # main function is entry0 in the binary function", "funcs[mangledName] def main(): parser = argparse.ArgumentParser( description='Postpatch protected C program.')", "'offset': func['offset']} funcs[func['name']] = attr # Basic search for mangled", "funcs.items(): # sometimes r2 prepends sym. to function names if", "is that?\") exit(1) match = 0 mangledName = \"\" for", "body in base64:\\np6e {}@{}\".format(size, offset)) b64_func = r2.cmd(\"p6e {}@{}\".format(size, offset))", "status = os.system( \"chmod +x {}\".format(patchedBinary)) if status != 0:", "\"<I\" # little-endian unsigned int search_bytes = struct.pack(flag, search_value) address", "size): print('Precomputing hash') h = 0 print(\"r2 command to get", "if status != 0: print(\"Error in setting permission, try:\\n sudo", "^ b print('Precomuted hash:', hex(h)) return h def patch_binary(mm, search_value,", "== -1: mm.seek(0) address = mm.find(search_bytes) mm.seek(address, os.SEEK_SET) patch_bytes =", "function, size, expected hash\", required=True) results = parser.parse_args() print(\"python protect", "shutil import copyfile import pprint pp = pprint.PrettyPrinter(indent=4) def precompute_hash(r2,", "match += 1 if match != 1: print(\"Failed to safely", "results = parser.parse_args() print(\"python protect program\", results) r2 = r2pipe.open(results.binary)", "= mm.find(search_bytes) if address == -1: mm.seek(0) address = mm.find(search_bytes)", "in funcs.items(): # sometimes r2 prepends sym. to function names", "function name\", required=True) parser.add_argument('-p', nargs=\"+\", dest=\"placeholders\", help=\"list of used placeholders", "copyfile(results.binary, patchedBinary) with open(patchedBinary, 'r+b') as binary: mm = mmap.mmap(binary.fileno(),", "program\", results) r2 = r2pipe.open(results.binary) funcInfo = get_protected_function_info(r2, results.function) funcOffset", "mm = mmap.mmap(binary.fileno(), 0) patch_binary(mm, int(results.placeholders[0]), int(funcSize)) patch_binary(mm, int(results.placeholders[1]), int(funcExpectedHash))", "the exact order of function, size, expected hash\", required=True) results", "size, expected hash\", required=True) results = parser.parse_args() print(\"python protect program\",", "parser = argparse.ArgumentParser( description='Postpatch protected C program.') parser.add_argument('-b', action=\"store\", dest=\"binary\",", "+= 1 if match != 1: print(\"Failed to safely find", "struct import mmap import base64 from shutil import copyfile import", "h ^ b print('Precomuted hash:', hex(h)) return h def patch_binary(mm,", "get the function body in base64:\\np6e {}@{}\".format(size, offset)) b64_func =", "address == -1: mm.seek(0) address = mm.find(search_bytes) mm.seek(address, os.SEEK_SET) patch_bytes", "all functions r2.cmd(\"aa\") r2.cmd(\"aac\") function_list = r2.cmdj(\"aflj\") # print(function_list) funcs", "flag = \"<I\" # little-endian unsigned int search_bytes = struct.pack(flag,", "search_value, patch_value): print(\"search value:{} patch value:{}\".format(search_value, patch_value)) flag = \"<I\"", "the expected hash for the main function, why is that?\")", "struct.pack(flag, search_value) address = mm.find(search_bytes) if address == -1: mm.seek(0)", "Basic search for mangled names if function == 'main': #", "in name: mangledName = name match += 1 if match", "of used placeholders in the exact order of function, size,", "0) patch_binary(mm, int(results.placeholders[0]), int(funcSize)) patch_binary(mm, int(results.placeholders[1]), int(funcExpectedHash)) print(\"Successfully stored patched", "main(): parser = argparse.ArgumentParser( description='Postpatch protected C program.') parser.add_argument('-b', action=\"store\",", "print(\"Cannot precompute the expected hash for the main function, why", "unsigned int search_bytes = struct.pack(flag, search_value) address = mm.find(search_bytes) if", "names if function in name: mangledName = name match +=", "function in the binary!\") pp.pprint(funcs) exit(1) return funcs[mangledName] def main():", "dest=\"binary\", help=\"program.out protected program binary\", required=True) parser.add_argument('-f', action=\"store\", dest=\"function\", help=\"protected", "protect program\", results) r2 = r2pipe.open(results.binary) funcInfo = get_protected_function_info(r2, results.function)", "addresses and sizes of all functions r2.cmd(\"aa\") r2.cmd(\"aac\") function_list =", "if address == -1: mm.seek(0) address = mm.find(search_bytes) mm.seek(address, os.SEEK_SET)", "\"{}-patched.out\".format(binaryFile) copyfile(results.binary, patchedBinary) with open(patchedBinary, 'r+b') as binary: mm =", "the main function, why is that?\") exit(1) match = 0", "try:\\n sudo chmod +x {}\".format(patchedBinary)) exit(1) if __name__ == '__main__':", "mangledName = \"\" for name, attr in funcs.items(): # sometimes", "!= 1: print(\"Failed to safely find function in the binary!\")", "parser.add_argument('-f', action=\"store\", dest=\"function\", help=\"protected function name\", required=True) parser.add_argument('-p', nargs=\"+\", dest=\"placeholders\",", "from shutil import copyfile import pprint pp = pprint.PrettyPrinter(indent=4) def", "= os.system( \"chmod +x {}\".format(patchedBinary)) if status != 0: print(\"Error", "patchedBinary) with open(patchedBinary, 'r+b') as binary: mm = mmap.mmap(binary.fileno(), 0)", "h def patch_binary(mm, search_value, patch_value): print(\"search value:{} patch value:{}\".format(search_value, patch_value))", "if match != 1: print(\"Failed to safely find function in", "attr # Basic search for mangled names if function ==", "program binary\", required=True) parser.add_argument('-f', action=\"store\", dest=\"function\", help=\"protected function name\", required=True)", "function = 'entry0' print(\"Cannot precompute the expected hash for the", "open(patchedBinary, 'r+b') as binary: mm = mmap.mmap(binary.fileno(), 0) patch_binary(mm, int(results.placeholders[0]),", "the binary!\") pp.pprint(funcs) exit(1) return funcs[mangledName] def main(): parser =", "r2 prepends sym. to function names if function in name:", "safely find function in the binary!\") pp.pprint(funcs) exit(1) return funcs[mangledName]", "= funcInfo[\"size\"] funcExpectedHash = precompute_hash(r2, funcOffset, funcSize) print(\"funcOffset:{} funcSize:{} funcExpectedHash:{}\".format(", "get_protected_function_info(r2, results.function) funcOffset = funcInfo[\"offset\"] funcSize = funcInfo[\"size\"] funcExpectedHash =", "address = mm.find(search_bytes) mm.seek(address, os.SEEK_SET) patch_bytes = struct.pack(flag, patch_value) mm.write(patch_bytes)", "bytearray(base64.b64decode(b64_func)) for b in func_bytes: h = h ^ b", "for mangled names if function == 'main': # main function", "patch_value): print(\"search value:{} patch value:{}\".format(search_value, patch_value)) flag = \"<I\" #", "expected hash\", required=True) results = parser.parse_args() print(\"python protect program\", results)", "precompute_hash(r2, funcOffset, funcSize) print(\"funcOffset:{} funcSize:{} funcExpectedHash:{}\".format( funcOffset, funcSize, funcExpectedHash)) binaryFile,", "search for mangled names if function == 'main': # main", "results.function) funcOffset = funcInfo[\"offset\"] funcSize = funcInfo[\"size\"] funcExpectedHash = precompute_hash(r2,", "= get_protected_function_info(r2, results.function) funcOffset = funcInfo[\"offset\"] funcSize = funcInfo[\"size\"] funcExpectedHash", "= \"\" for name, attr in funcs.items(): # sometimes r2", "in the binary function = 'entry0' print(\"Cannot precompute the expected", "search_bytes = struct.pack(flag, search_value) address = mm.find(search_bytes) if address ==", "# Basic search for mangled names if function == 'main':", "int(results.placeholders[1]), int(funcExpectedHash)) print(\"Successfully stored patched binary {}\".format(patchedBinary)) status = os.system(", "funcInfo = get_protected_function_info(r2, results.function) funcOffset = funcInfo[\"offset\"] funcSize = funcInfo[\"size\"]", "match != 1: print(\"Failed to safely find function in the", "in the exact order of function, size, expected hash\", required=True)", "os.SEEK_SET) patch_bytes = struct.pack(flag, patch_value) mm.write(patch_bytes) def get_protected_function_info(r2, function): #", "= name match += 1 if match != 1: print(\"Failed", "for b in func_bytes: h = h ^ b print('Precomuted", "function in name: mangledName = name match += 1 if", "'entry0' print(\"Cannot precompute the expected hash for the main function,", "r2.cmd(\"aa\") r2.cmd(\"aac\") function_list = r2.cmdj(\"aflj\") # print(function_list) funcs = {}", "= r2.cmdj(\"aflj\") # print(function_list) funcs = {} for func in", "exit(1) match = 0 mangledName = \"\" for name, attr", "{}@{}\".format(size, offset)) func_bytes = bytearray(base64.b64decode(b64_func)) for b in func_bytes: h", "{}@{}\".format(size, offset)) b64_func = r2.cmd(\"p6e {}@{}\".format(size, offset)) func_bytes = bytearray(base64.b64decode(b64_func))", "= h ^ b print('Precomuted hash:', hex(h)) return h def", "print('Precomputing hash') h = 0 print(\"r2 command to get the", "precompute_hash(r2, offset, size): print('Precomputing hash') h = 0 print(\"r2 command", "patch_value)) flag = \"<I\" # little-endian unsigned int search_bytes =", "in base64:\\np6e {}@{}\".format(size, offset)) b64_func = r2.cmd(\"p6e {}@{}\".format(size, offset)) func_bytes", "help=\"list of used placeholders in the exact order of function,", "little-endian unsigned int search_bytes = struct.pack(flag, search_value) address = mm.find(search_bytes)", "order of function, size, expected hash\", required=True) results = parser.parse_args()", "= pprint.PrettyPrinter(indent=4) def precompute_hash(r2, offset, size): print('Precomputing hash') h =", "funcExpectedHash)) binaryFile, _ = os.path.splitext(results.binary) patchedBinary = \"{}-patched.out\".format(binaryFile) copyfile(results.binary, patchedBinary)", "r2pipe import struct import mmap import base64 from shutil import", "= mmap.mmap(binary.fileno(), 0) patch_binary(mm, int(results.placeholders[0]), int(funcSize)) patch_binary(mm, int(results.placeholders[1]), int(funcExpectedHash)) print(\"Successfully", "nargs=\"+\", dest=\"placeholders\", help=\"list of used placeholders in the exact order", "of all functions r2.cmd(\"aa\") r2.cmd(\"aac\") function_list = r2.cmdj(\"aflj\") # print(function_list)", "print(\"funcOffset:{} funcSize:{} funcExpectedHash:{}\".format( funcOffset, funcSize, funcExpectedHash)) binaryFile, _ = os.path.splitext(results.binary)", "sym. to function names if function in name: mangledName =", "required=True) parser.add_argument('-f', action=\"store\", dest=\"function\", help=\"protected function name\", required=True) parser.add_argument('-p', nargs=\"+\",", "offset, size): print('Precomputing hash') h = 0 print(\"r2 command to", "names if function == 'main': # main function is entry0", "offset)) func_bytes = bytearray(base64.b64decode(b64_func)) for b in func_bytes: h =", "patch_binary(mm, int(results.placeholders[1]), int(funcExpectedHash)) print(\"Successfully stored patched binary {}\".format(patchedBinary)) status =", "{}\".format(patchedBinary)) if status != 0: print(\"Error in setting permission, try:\\n", "funcOffset, funcSize) print(\"funcOffset:{} funcSize:{} funcExpectedHash:{}\".format( funcOffset, funcSize, funcExpectedHash)) binaryFile, _", "function_list: attr = {'size': func['size'], 'offset': func['offset']} funcs[func['name']] = attr", "mmap import base64 from shutil import copyfile import pprint pp", "1: print(\"Failed to safely find function in the binary!\") pp.pprint(funcs)", "int(funcExpectedHash)) print(\"Successfully stored patched binary {}\".format(patchedBinary)) status = os.system( \"chmod", "!= 0: print(\"Error in setting permission, try:\\n sudo chmod +x", "import argparse import os import r2pipe import struct import mmap", "pp.pprint(funcs) exit(1) return funcs[mangledName] def main(): parser = argparse.ArgumentParser( description='Postpatch", "hash:', hex(h)) return h def patch_binary(mm, search_value, patch_value): print(\"search value:{}", "def get_protected_function_info(r2, function): # find addresses and sizes of all", "find function in the binary!\") pp.pprint(funcs) exit(1) return funcs[mangledName] def", "= parser.parse_args() print(\"python protect program\", results) r2 = r2pipe.open(results.binary) funcInfo", "_ = os.path.splitext(results.binary) patchedBinary = \"{}-patched.out\".format(binaryFile) copyfile(results.binary, patchedBinary) with open(patchedBinary,", "r2.cmd(\"p6e {}@{}\".format(size, offset)) func_bytes = bytearray(base64.b64decode(b64_func)) for b in func_bytes:", "get_protected_function_info(r2, function): # find addresses and sizes of all functions", "print('Precomuted hash:', hex(h)) return h def patch_binary(mm, search_value, patch_value): print(\"search", "function_list = r2.cmdj(\"aflj\") # print(function_list) funcs = {} for func", "function == 'main': # main function is entry0 in the", "int search_bytes = struct.pack(flag, search_value) address = mm.find(search_bytes) if address", "for the main function, why is that?\") exit(1) match =", "why is that?\") exit(1) match = 0 mangledName = \"\"", "pprint pp = pprint.PrettyPrinter(indent=4) def precompute_hash(r2, offset, size): print('Precomputing hash')", "= struct.pack(flag, search_value) address = mm.find(search_bytes) if address == -1:", "hash for the main function, why is that?\") exit(1) match", "precompute the expected hash for the main function, why is", "hash') h = 0 print(\"r2 command to get the function", "argparse import os import r2pipe import struct import mmap import", "func in function_list: attr = {'size': func['size'], 'offset': func['offset']} funcs[func['name']]", "r2pipe.open(results.binary) funcInfo = get_protected_function_info(r2, results.function) funcOffset = funcInfo[\"offset\"] funcSize =", "pprint.PrettyPrinter(indent=4) def precompute_hash(r2, offset, size): print('Precomputing hash') h = 0", "patch value:{}\".format(search_value, patch_value)) flag = \"<I\" # little-endian unsigned int", "entry0 in the binary function = 'entry0' print(\"Cannot precompute the", "import base64 from shutil import copyfile import pprint pp =", "to function names if function in name: mangledName = name", "<reponame>mr-ma/basic-self-checksumming import argparse import os import r2pipe import struct import", "function, why is that?\") exit(1) match = 0 mangledName =", "functions r2.cmd(\"aa\") r2.cmd(\"aac\") function_list = r2.cmdj(\"aflj\") # print(function_list) funcs =", "mm.find(search_bytes) mm.seek(address, os.SEEK_SET) patch_bytes = struct.pack(flag, patch_value) mm.write(patch_bytes) def get_protected_function_info(r2,", "search_value) address = mm.find(search_bytes) if address == -1: mm.seek(0) address", "patch_binary(mm, int(results.placeholders[0]), int(funcSize)) patch_binary(mm, int(results.placeholders[1]), int(funcExpectedHash)) print(\"Successfully stored patched binary", "funcSize = funcInfo[\"size\"] funcExpectedHash = precompute_hash(r2, funcOffset, funcSize) print(\"funcOffset:{} funcSize:{}" ]
[ "('sitewebapp', '0010_auditionanswers_auditionquestions_audtionrounds_candidates'), ] operations = [ migrations.CreateModel( name='auditionRounds', fields=[ ('id',", "('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('roundno', models.IntegerField(default=1)), ('candidate', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='inductees',", "from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies", "migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('sitewebapp',", "primary_key=True, serialize=False, verbose_name='ID')), ('roundno', models.IntegerField(default=1)), ('candidate', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='inductees', to='sitewebapp.Candidates')), ],", "models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('sitewebapp', '0010_auditionanswers_auditionquestions_audtionrounds_candidates'),", "django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('sitewebapp', '0010_auditionanswers_auditionquestions_audtionrounds_candidates'), ] operations", "'0010_auditionanswers_auditionquestions_audtionrounds_candidates'), ] operations = [ migrations.CreateModel( name='auditionRounds', fields=[ ('id', models.AutoField(auto_created=True,", "operations = [ migrations.CreateModel( name='auditionRounds', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False,", "] operations = [ migrations.CreateModel( name='auditionRounds', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True,", "migrations.AlterField( model_name='auditionquestions', name='round', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='round', to='sitewebapp.auditionRounds'), ), migrations.DeleteModel( name='audtionRounds', ),", "), migrations.AlterField( model_name='auditionquestions', name='round', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='round', to='sitewebapp.auditionRounds'), ), migrations.DeleteModel( name='audtionRounds',", "= [ migrations.CreateModel( name='auditionRounds', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),", "class Migration(migrations.Migration): dependencies = [ ('sitewebapp', '0010_auditionanswers_auditionquestions_audtionrounds_candidates'), ] operations =", "on 2021-01-29 20:20 from django.db import migrations, models import django.db.models.deletion", "serialize=False, verbose_name='ID')), ('roundno', models.IntegerField(default=1)), ('candidate', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='inductees', to='sitewebapp.Candidates')), ], ),", "fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('roundno', models.IntegerField(default=1)), ('candidate', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE,", "to='sitewebapp.Candidates')), ], ), migrations.AlterField( model_name='auditionquestions', name='round', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='round', to='sitewebapp.auditionRounds'), ),", "], ), migrations.AlterField( model_name='auditionquestions', name='round', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='round', to='sitewebapp.auditionRounds'), ), migrations.DeleteModel(", "model_name='auditionquestions', name='round', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='round', to='sitewebapp.auditionRounds'), ), migrations.DeleteModel( name='audtionRounds', ), ]", "models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('roundno', models.IntegerField(default=1)), ('candidate', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='inductees', to='sitewebapp.Candidates')),", "Migration(migrations.Migration): dependencies = [ ('sitewebapp', '0010_auditionanswers_auditionquestions_audtionrounds_candidates'), ] operations = [", "('roundno', models.IntegerField(default=1)), ('candidate', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='inductees', to='sitewebapp.Candidates')), ], ), migrations.AlterField( model_name='auditionquestions',", "related_name='inductees', to='sitewebapp.Candidates')), ], ), migrations.AlterField( model_name='auditionquestions', name='round', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='round', to='sitewebapp.auditionRounds'),", "[ migrations.CreateModel( name='auditionRounds', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('roundno',", "2021-01-29 20:20 from django.db import migrations, models import django.db.models.deletion class", "django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies =", "models.IntegerField(default=1)), ('candidate', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='inductees', to='sitewebapp.Candidates')), ], ), migrations.AlterField( model_name='auditionquestions', name='round',", "Django 2.2.15 on 2021-01-29 20:20 from django.db import migrations, models", "2.2.15 on 2021-01-29 20:20 from django.db import migrations, models import", "import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [", "migrations.CreateModel( name='auditionRounds', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('roundno', models.IntegerField(default=1)),", "verbose_name='ID')), ('roundno', models.IntegerField(default=1)), ('candidate', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='inductees', to='sitewebapp.Candidates')), ], ), migrations.AlterField(", "[ ('sitewebapp', '0010_auditionanswers_auditionquestions_audtionrounds_candidates'), ] operations = [ migrations.CreateModel( name='auditionRounds', fields=[", "20:20 from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration):", "dependencies = [ ('sitewebapp', '0010_auditionanswers_auditionquestions_audtionrounds_candidates'), ] operations = [ migrations.CreateModel(", "= [ ('sitewebapp', '0010_auditionanswers_auditionquestions_audtionrounds_candidates'), ] operations = [ migrations.CreateModel( name='auditionRounds',", "# Generated by Django 2.2.15 on 2021-01-29 20:20 from django.db", "('candidate', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='inductees', to='sitewebapp.Candidates')), ], ), migrations.AlterField( model_name='auditionquestions', name='round', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE,", "name='auditionRounds', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('roundno', models.IntegerField(default=1)), ('candidate',", "import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('sitewebapp', '0010_auditionanswers_auditionquestions_audtionrounds_candidates'), ]", "models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='inductees', to='sitewebapp.Candidates')), ], ), migrations.AlterField( model_name='auditionquestions', name='round', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='round',", "by Django 2.2.15 on 2021-01-29 20:20 from django.db import migrations,", "Generated by Django 2.2.15 on 2021-01-29 20:20 from django.db import" ]
[ "tags: description: - Resource tags. type: dict sample: { env:", "The properties of the resource. type: dict contains: platformFaultDomainCount: description:", "str sample: Aligned type: description: - Resource type. type: str", "azure_rm_availabilityset_info short_description: Get Azure Availability Set facts description: - Get", "str sample: eastus2 name: description: - Resource name. type: str", "[avase] return result def list_items(self): \"\"\"Get all availability sets\"\"\" self.log('List", "self.resource_group: self.fail(\"Parameter error: resource group required when filtering by name.\")", "return result def list_items(self): \"\"\"Get all availability sets\"\"\" self.log('List all", "for one availability set community.azure.azure_rm_availabilityset_info: name: Testing resource_group: myResourceGroup -", "if self.name and not self.resource_group: self.fail(\"Parameter error: resource group required", "azure_availabilitysets=[] ) ) self.name = None self.resource_group = None self.tags", "facts description: - Get facts for a specific availability set", "as exc: self.fail('Failed to list all items - {0}'.format(str(exc))) results", "- Get facts for a specific availability set or all", ") ) self.name = None self.resource_group = None self.tags =", "None result = [] try: item = self.compute_client.availability_sets.get(self.resource_group, self.name) except", "class AzureRMAvailabilitySetInfo(AzureRMModuleBase): \"\"\"Utility class to get availability set facts\"\"\" def", "community.azure.azure_rm_availabilityset_info: resource_group: myResourceGroup ''' RETURN = ''' azure_availabilityset: description: List", "key in self.module_args: setattr(self, key, kwargs[key]) if self.name and not", "# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)", "class to get availability set facts\"\"\" def __init__(self): self.module_args =", "Use M(azure.azcollection.azure_rm_availabilityset_info) instead. ''' EXAMPLES = ''' - name: Get", "self.log('Get properties for {0}'.format(self.name)) item = None result = []", "= [] try: item = self.compute_client.availability_sets.get(self.resource_group, self.name) except CloudError: pass", "description: - Get facts for a specific availability set or", "main(): \"\"\"Main module execution code path\"\"\" AzureRMAvailabilitySetInfo() if __name__ ==", "returned: always type: complex contains: location: description: - Location where", "resource group to search for the desired availability set. tags:", "'supported_by': 'community'} DOCUMENTATION = ''' --- module: azure_rm_availabilityset_info short_description: Get", "if is_old_facts: self.module.deprecate(\"The 'azure_rm_availabilityset_facts' module has been renamed to 'azure_rm_availabilityset_info'\",", "if self.name: self.results['ansible_info']['azure_availabilitysets'] = self.get_item() else: self.results['ansible_info']['azure_availabilitysets'] = self.list_items() return", "item = None result = [] try: item = self.compute_client.availability_sets.get(self.resource_group,", "\"\"\"Get all availability sets\"\"\" self.log('List all availability sets') try: response", "tags: description: - List of tags to be matched. extends_documentation_fragment:", "of references to all virtualmachines in the availability set. type:", "result def list_items(self): \"\"\"Get all availability sets\"\"\" self.log('List all availability", "List of availability sets dicts. returned: always type: complex contains:", "exc: self.fail('Failed to list all items - {0}'.format(str(exc))) results =", "dict( name=dict(type='str'), resource_group=dict(type='str'), tags=dict(type='list') ) self.results = dict( changed=False, ansible_info=dict(", "- List of tags to be matched. extends_documentation_fragment: - azure.azcollection.azure", "dict( changed=False, ansible_info=dict( azure_availabilitysets=[] ) ) self.name = None self.resource_group", "to 'azure_rm_availabilityset_info'\", version='3.0.0', collection_name='community.azure') # was 2.13 for key in", "version='3.0.0', collection_name='community.azure') # was 2.13 for key in self.module_args: setattr(self,", "resource. type: dict contains: platformFaultDomainCount: description: - Fault Domain count.", "results.append(avase) return results def main(): \"\"\"Main module execution code path\"\"\"", "name: Get facts for all availability sets in a specific", "(see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function", "description: - Resource tags. type: dict sample: { env: sandbox", "return results def main(): \"\"\"Main module execution code path\"\"\" AzureRMAvailabilitySetInfo()", "List of tags to be matched. extends_documentation_fragment: - azure.azcollection.azure author:", "to be matched. extends_documentation_fragment: - azure.azcollection.azure author: - <NAME> (@julienstroheker)", "item in response: if self.has_tags(item.tags, self.tags): avase = self.serialize_obj(item, AZURE_OBJECT_CLASS)", "'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: azure_rm_availabilityset_info", "availability set or all availability sets. options: name: description: -", "has been renamed to 'azure_rm_availabilityset_info'\", version='3.0.0', collection_name='community.azure') # was 2.13", "list of references to all virtualmachines in the availability set.", "kwargs[key]) if self.name and not self.resource_group: self.fail(\"Parameter error: resource group", "get availability set facts\"\"\" def __init__(self): self.module_args = dict( name=dict(type='str'),", "None self.tags = None super(AzureRMAvailabilitySetInfo, self).__init__( derived_arg_spec=self.module_args, supports_tags=False, facts_module=True )", "Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import", "Limit results to a specific availability set. resource_group: description: -", "self.results def get_item(self): \"\"\"Get a single availability set\"\"\" self.log('Get properties", "COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__", "msrestazure.azure_exceptions import CloudError except Exception: # handled in azure_rm_common pass", "type: list sample: [] sku: description: - Location where the", "= None result = [] try: item = self.compute_client.availability_sets.get(self.resource_group, self.name)", "str sample: myAvailabilitySet properties: description: - The properties of the", "response = self.compute_client.availability_sets.list(self.resource_group) except CloudError as exc: self.fail('Failed to list", "facts for all availability sets in a specific resource group", "'azure_rm_availabilityset_info'\", version='3.0.0', collection_name='community.azure') # was 2.13 for key in self.module_args:", "General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__", "# Copyright: (c) 2016, <NAME> <<EMAIL>> # GNU General Public", "AzureRMAvailabilitySetInfo(AzureRMModuleBase): \"\"\"Utility class to get availability set facts\"\"\" def __init__(self):", "all items - {0}'.format(str(exc))) results = [] for item in", "- The properties of the resource. type: dict contains: platformFaultDomainCount:", "in a specific resource group community.azure.azure_rm_availabilityset_info: resource_group: myResourceGroup ''' RETURN", "resource_group: myResourceGroup - name: Get facts for all availability sets", "sets dicts. returned: always type: complex contains: location: description: -", "avase['sku'] = item.sku.name results.append(avase) return results def main(): \"\"\"Main module", "avase['type'] = item.type avase['sku'] = item.sku.name result = [avase] return", "from msrestazure.azure_exceptions import CloudError except Exception: # handled in azure_rm_common", "A list of references to all virtualmachines in the availability", "set community.azure.azure_rm_availabilityset_info: name: Testing resource_group: myResourceGroup - name: Get facts", "self.name = None self.resource_group = None self.tags = None super(AzureRMAvailabilitySetInfo,", "- Limit results to a specific availability set. resource_group: description:", "= [avase] return result def list_items(self): \"\"\"Get all availability sets\"\"\"", "specific resource group community.azure.azure_rm_availabilityset_info: resource_group: myResourceGroup ''' RETURN = '''", "(c) 2016, <NAME> <<EMAIL>> # GNU General Public License v3.0+", "item.sku.name results.append(avase) return results def main(): \"\"\"Main module execution code", "- Update Domain count. type: int sample: 2 virtualMachines: description:", "self.compute_client.availability_sets.get(self.resource_group, self.name) except CloudError: pass if item and self.has_tags(item.tags, self.tags):", "of the resource. type: dict contains: platformFaultDomainCount: description: - Fault", "result = [] try: item = self.compute_client.availability_sets.get(self.resource_group, self.name) except CloudError:", "a specific availability set or all availability sets. options: name:", "''' RETURN = ''' azure_availabilityset: description: List of availability sets", "sample: 3 platformUpdateDomainCount: description: - Update Domain count. type: int", "resource_group=dict(type='str'), tags=dict(type='list') ) self.results = dict( changed=False, ansible_info=dict( azure_availabilitysets=[] )", "alternative: Use M(azure.azcollection.azure_rm_availabilityset_info) instead. ''' EXAMPLES = ''' - name:", "tags. type: dict sample: { env: sandbox } ''' from", "def get_item(self): \"\"\"Get a single availability set\"\"\" self.log('Get properties for", "Domain count. type: int sample: 2 virtualMachines: description: - A", "M(azure.azcollection.azure_rm_availabilityset_info) instead. ''' EXAMPLES = ''' - name: Get facts", "supports_tags=False, facts_module=True ) def exec_module(self, **kwargs): is_old_facts = self.module._name ==", "''' EXAMPLES = ''' - name: Get facts for one", "instead. alternative: Use M(azure.azcollection.azure_rm_availabilityset_info) instead. ''' EXAMPLES = ''' -", "the desired availability set. tags: description: - List of tags", "item = self.compute_client.availability_sets.get(self.resource_group, self.name) except CloudError: pass if item and", "- Resource tags. type: dict sample: { env: sandbox }", "properties: description: - The properties of the resource. type: dict", "why: The Ansible collection community.azure is deprecated. Use azure.azcollection instead.", "= self.serialize_obj(item, AZURE_OBJECT_CLASS) avase['name'] = item.name avase['type'] = item.type avase['sku']", "CloudError except Exception: # handled in azure_rm_common pass AZURE_OBJECT_CLASS =", "except CloudError as exc: self.fail('Failed to list all items -", "of tags to be matched. extends_documentation_fragment: - azure.azcollection.azure author: -", "for {0}'.format(self.name)) item = None result = [] try: item", "azure.azcollection.azure author: - <NAME> (@julienstroheker) deprecated: removed_in: '2.0.0' why: The", "for item in response: if self.has_tags(item.tags, self.tags): avase = self.serialize_obj(item,", "specific availability set or all availability sets. options: name: description:", "name: Get facts for one availability set community.azure.azure_rm_availabilityset_info: name: Testing", "{ env: sandbox } ''' from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase try:", "(@julienstroheker) deprecated: removed_in: '2.0.0' why: The Ansible collection community.azure is", "sku: description: - Location where the resource lives. type: str", "AzureRMModuleBase try: from msrestazure.azure_exceptions import CloudError except Exception: # handled", "self.module_args: setattr(self, key, kwargs[key]) if self.name and not self.resource_group: self.fail(\"Parameter", "type: description: - Resource type. type: str sample: \"Microsoft.Compute/availabilitySets\" tags:", "AZURE_OBJECT_CLASS) avase['name'] = item.name avase['type'] = item.type avase['sku'] = item.sku.name", "availability sets dicts. returned: always type: complex contains: location: description:", "group to search for the desired availability set. tags: description:", "matched. extends_documentation_fragment: - azure.azcollection.azure author: - <NAME> (@julienstroheker) deprecated: removed_in:", "for key in self.module_args: setattr(self, key, kwargs[key]) if self.name and", "Update Domain count. type: int sample: 2 virtualMachines: description: -", "deprecated: removed_in: '2.0.0' why: The Ansible collection community.azure is deprecated.", "2.13 for key in self.module_args: setattr(self, key, kwargs[key]) if self.name", "Get facts for one availability set community.azure.azure_rm_availabilityset_info: name: Testing resource_group:", "contains: platformFaultDomainCount: description: - Fault Domain count. type: int sample:", "sets in a specific resource group community.azure.azure_rm_availabilityset_info: resource_group: myResourceGroup '''", "sample: { env: sandbox } ''' from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase", "utf-8 -*- # Copyright: (c) 2016, <NAME> <<EMAIL>> # GNU", "- name: Get facts for one availability set community.azure.azure_rm_availabilityset_info: name:", "ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase try: from msrestazure.azure_exceptions import CloudError except Exception:", "all virtualmachines in the availability set. type: list sample: []", "resource_group: myResourceGroup ''' RETURN = ''' azure_availabilityset: description: List of", "description: List of availability sets dicts. returned: always type: complex", "list_items(self): \"\"\"Get all availability sets\"\"\" self.log('List all availability sets') try:", "= {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = '''", "'azure_rm_availabilityset_facts' module has been renamed to 'azure_rm_availabilityset_info'\", version='3.0.0', collection_name='community.azure') #", "myResourceGroup ''' RETURN = ''' azure_availabilityset: description: List of availability", "https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type", "description: - Location where the resource lives. type: str sample:", "<reponame>usegalaxy-no/usegalaxy #!/usr/bin/python # -*- coding: utf-8 -*- # Copyright: (c)", "pass if item and self.has_tags(item.tags, self.tags): avase = self.serialize_obj(item, AZURE_OBJECT_CLASS)", "azure_availabilityset: description: List of availability sets dicts. returned: always type:", "Domain count. type: int sample: 3 platformUpdateDomainCount: description: - Update", "self.results['ansible_info']['azure_availabilitysets'] = self.get_item() else: self.results['ansible_info']['azure_availabilitysets'] = self.list_items() return self.results def", "item.sku.name result = [avase] return result def list_items(self): \"\"\"Get all", "module execution code path\"\"\" AzureRMAvailabilitySetInfo() if __name__ == '__main__': main()", "{0}'.format(str(exc))) results = [] for item in response: if self.has_tags(item.tags,", "results = [] for item in response: if self.has_tags(item.tags, self.tags):", "AZURE_OBJECT_CLASS = 'AvailabilitySet' class AzureRMAvailabilitySetInfo(AzureRMModuleBase): \"\"\"Utility class to get availability", "or all availability sets. options: name: description: - Limit results", "try: item = self.compute_client.availability_sets.get(self.resource_group, self.name) except CloudError: pass if item", "Testing resource_group: myResourceGroup - name: Get facts for all availability", "--- module: azure_rm_availabilityset_info short_description: Get Azure Availability Set facts description:", "contains: location: description: - Location where the resource lives. type:", "import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version':", "''' - name: Get facts for one availability set community.azure.azure_rm_availabilityset_info:", "= ''' - name: Get facts for one availability set", "- {0}'.format(str(exc))) results = [] for item in response: if", "resource group community.azure.azure_rm_availabilityset_info: resource_group: myResourceGroup ''' RETURN = ''' azure_availabilityset:", "= ''' --- module: azure_rm_availabilityset_info short_description: Get Azure Availability Set", "description: - Fault Domain count. type: int sample: 3 platformUpdateDomainCount:", "try: from msrestazure.azure_exceptions import CloudError except Exception: # handled in", "complex contains: location: description: - Location where the resource lives.", "None self.resource_group = None self.tags = None super(AzureRMAvailabilitySetInfo, self).__init__( derived_arg_spec=self.module_args,", "items - {0}'.format(str(exc))) results = [] for item in response:", "ansible_info=dict( azure_availabilitysets=[] ) ) self.name = None self.resource_group = None", "in azure_rm_common pass AZURE_OBJECT_CLASS = 'AvailabilitySet' class AzureRMAvailabilitySetInfo(AzureRMModuleBase): \"\"\"Utility class", "for a specific availability set or all availability sets. options:", "all availability sets. options: name: description: - Limit results to", "get_item(self): \"\"\"Get a single availability set\"\"\" self.log('Get properties for {0}'.format(self.name))", "azure.azcollection instead. alternative: Use M(azure.azcollection.azure_rm_availabilityset_info) instead. ''' EXAMPLES = '''", "pass AZURE_OBJECT_CLASS = 'AvailabilitySet' class AzureRMAvailabilitySetInfo(AzureRMModuleBase): \"\"\"Utility class to get", "- Fault Domain count. type: int sample: 3 platformUpdateDomainCount: description:", "availability set. tags: description: - List of tags to be", "- Location where the resource lives. type: str sample: eastus2", "RETURN = ''' azure_availabilityset: description: List of availability sets dicts.", "= self.compute_client.availability_sets.list(self.resource_group) except CloudError as exc: self.fail('Failed to list all", "description: - Update Domain count. type: int sample: 2 virtualMachines:", "properties of the resource. type: dict contains: platformFaultDomainCount: description: -", "type. type: str sample: \"Microsoft.Compute/availabilitySets\" tags: description: - Resource tags.", "from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase try: from msrestazure.azure_exceptions import CloudError except", "- <NAME> (@julienstroheker) deprecated: removed_in: '2.0.0' why: The Ansible collection", "The Ansible collection community.azure is deprecated. Use azure.azcollection instead. alternative:", "is_old_facts: self.module.deprecate(\"The 'azure_rm_availabilityset_facts' module has been renamed to 'azure_rm_availabilityset_info'\", version='3.0.0',", "or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ =", "type: str sample: \"Microsoft.Compute/availabilitySets\" tags: description: - Resource tags. type:", "self.resource_group = None self.tags = None super(AzureRMAvailabilitySetInfo, self).__init__( derived_arg_spec=self.module_args, supports_tags=False,", "\"Microsoft.Compute/availabilitySets\" tags: description: - Resource tags. type: dict sample: {", "= None self.resource_group = None self.tags = None super(AzureRMAvailabilitySetInfo, self).__init__(", "type: int sample: 2 virtualMachines: description: - A list of", "else: self.results['ansible_info']['azure_availabilitysets'] = self.list_items() return self.results def get_item(self): \"\"\"Get a", "is deprecated. Use azure.azcollection instead. alternative: Use M(azure.azcollection.azure_rm_availabilityset_info) instead. '''", "- Resource name. type: str sample: myAvailabilitySet properties: description: -", ") self.results = dict( changed=False, ansible_info=dict( azure_availabilitysets=[] ) ) self.name", "results def main(): \"\"\"Main module execution code path\"\"\" AzureRMAvailabilitySetInfo() if", "lives. type: str sample: eastus2 name: description: - Resource name.", "properties for {0}'.format(self.name)) item = None result = [] try:", "result = [avase] return result def list_items(self): \"\"\"Get all availability", "Azure Availability Set facts description: - Get facts for a", "Resource type. type: str sample: \"Microsoft.Compute/availabilitySets\" tags: description: - Resource", "= self.module._name == 'azure_rm_availabilityset_facts' if is_old_facts: self.module.deprecate(\"The 'azure_rm_availabilityset_facts' module has", "lives. type: str sample: Aligned type: description: - Resource type.", "in response: if self.has_tags(item.tags, self.tags): avase = self.serialize_obj(item, AZURE_OBJECT_CLASS) avase['name']", "3 platformUpdateDomainCount: description: - Update Domain count. type: int sample:", "super(AzureRMAvailabilitySetInfo, self).__init__( derived_arg_spec=self.module_args, supports_tags=False, facts_module=True ) def exec_module(self, **kwargs): is_old_facts", "= dict( name=dict(type='str'), resource_group=dict(type='str'), tags=dict(type='list') ) self.results = dict( changed=False,", "of availability sets dicts. returned: always type: complex contains: location:", "key, kwargs[key]) if self.name and not self.resource_group: self.fail(\"Parameter error: resource", "extends_documentation_fragment: - azure.azcollection.azure author: - <NAME> (@julienstroheker) deprecated: removed_in: '2.0.0'", "azure_rm_common pass AZURE_OBJECT_CLASS = 'AvailabilitySet' class AzureRMAvailabilitySetInfo(AzureRMModuleBase): \"\"\"Utility class to", "print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'],", "License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import,", "required when filtering by name.\") if self.name: self.results['ansible_info']['azure_availabilitysets'] = self.get_item()", "division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status':", "resource lives. type: str sample: eastus2 name: description: - Resource", "self.has_tags(item.tags, self.tags): avase = self.serialize_obj(item, AZURE_OBJECT_CLASS) avase['name'] = item.name avase['type']", "2 virtualMachines: description: - A list of references to all", "__metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by':", "'community'} DOCUMENTATION = ''' --- module: azure_rm_availabilityset_info short_description: Get Azure", "count. type: int sample: 2 virtualMachines: description: - A list", "response: if self.has_tags(item.tags, self.tags): avase = self.serialize_obj(item, AZURE_OBJECT_CLASS) avase['name'] =", "description: - The properties of the resource. type: dict contains:", "int sample: 2 virtualMachines: description: - A list of references", "set or all availability sets. options: name: description: - Limit", "Ansible collection community.azure is deprecated. Use azure.azcollection instead. alternative: Use", "facts for a specific availability set or all availability sets.", "Resource name. type: str sample: myAvailabilitySet properties: description: - The", "def __init__(self): self.module_args = dict( name=dict(type='str'), resource_group=dict(type='str'), tags=dict(type='list') ) self.results", "count. type: int sample: 3 platformUpdateDomainCount: description: - Update Domain", "list sample: [] sku: description: - Location where the resource", "ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION =", "was 2.13 for key in self.module_args: setattr(self, key, kwargs[key]) if", "self.module._name == 'azure_rm_availabilityset_facts' if is_old_facts: self.module.deprecate(\"The 'azure_rm_availabilityset_facts' module has been", "sets\"\"\" self.log('List all availability sets') try: response = self.compute_client.availability_sets.list(self.resource_group) except", "== 'azure_rm_availabilityset_facts' if is_old_facts: self.module.deprecate(\"The 'azure_rm_availabilityset_facts' module has been renamed", "handled in azure_rm_common pass AZURE_OBJECT_CLASS = 'AvailabilitySet' class AzureRMAvailabilitySetInfo(AzureRMModuleBase): \"\"\"Utility", "options: name: description: - Limit results to a specific availability", "platformUpdateDomainCount: description: - Update Domain count. type: int sample: 2", "group required when filtering by name.\") if self.name: self.results['ansible_info']['azure_availabilitysets'] =", "Get facts for a specific availability set or all availability", "derived_arg_spec=self.module_args, supports_tags=False, facts_module=True ) def exec_module(self, **kwargs): is_old_facts = self.module._name", "'azure_rm_availabilityset_facts' if is_old_facts: self.module.deprecate(\"The 'azure_rm_availabilityset_facts' module has been renamed to", "renamed to 'azure_rm_availabilityset_info'\", version='3.0.0', collection_name='community.azure') # was 2.13 for key", "Fault Domain count. type: int sample: 3 platformUpdateDomainCount: description: -", "- The resource group to search for the desired availability", "is_old_facts = self.module._name == 'azure_rm_availabilityset_facts' if is_old_facts: self.module.deprecate(\"The 'azure_rm_availabilityset_facts' module", "where the resource lives. type: str sample: eastus2 name: description:", "author: - <NAME> (@julienstroheker) deprecated: removed_in: '2.0.0' why: The Ansible", "dicts. returned: always type: complex contains: location: description: - Location", "to list all items - {0}'.format(str(exc))) results = [] for", "} ''' from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase try: from msrestazure.azure_exceptions import", "Location where the resource lives. type: str sample: eastus2 name:", "name: description: - Resource name. type: str sample: myAvailabilitySet properties:", "def list_items(self): \"\"\"Get all availability sets\"\"\" self.log('List all availability sets')", "eastus2 name: description: - Resource name. type: str sample: myAvailabilitySet", "type: str sample: eastus2 name: description: - Resource name. type:", "''' azure_availabilityset: description: List of availability sets dicts. returned: always", "and self.has_tags(item.tags, self.tags): avase = self.serialize_obj(item, AZURE_OBJECT_CLASS) avase['name'] = item.name", "the resource. type: dict contains: platformFaultDomainCount: description: - Fault Domain", "location: description: - Location where the resource lives. type: str", "error: resource group required when filtering by name.\") if self.name:", "= None self.tags = None super(AzureRMAvailabilitySetInfo, self).__init__( derived_arg_spec=self.module_args, supports_tags=False, facts_module=True", "references to all virtualmachines in the availability set. type: list", "setattr(self, key, kwargs[key]) if self.name and not self.resource_group: self.fail(\"Parameter error:", "facts for one availability set community.azure.azure_rm_availabilityset_info: name: Testing resource_group: myResourceGroup", "always type: complex contains: location: description: - Location where the", "{0}'.format(self.name)) item = None result = [] try: item =", "-*- coding: utf-8 -*- # Copyright: (c) 2016, <NAME> <<EMAIL>>", "except Exception: # handled in azure_rm_common pass AZURE_OBJECT_CLASS = 'AvailabilitySet'", "resource group required when filtering by name.\") if self.name: self.results['ansible_info']['azure_availabilitysets']", "description: - Limit results to a specific availability set. resource_group:", "type: str sample: myAvailabilitySet properties: description: - The properties of", "set\"\"\" self.log('Get properties for {0}'.format(self.name)) item = None result =", "__init__(self): self.module_args = dict( name=dict(type='str'), resource_group=dict(type='str'), tags=dict(type='list') ) self.results =", "search for the desired availability set. tags: description: - List", "set. tags: description: - List of tags to be matched.", "availability sets') try: response = self.compute_client.availability_sets.list(self.resource_group) except CloudError as exc:", "Exception: # handled in azure_rm_common pass AZURE_OBJECT_CLASS = 'AvailabilitySet' class", "tags to be matched. extends_documentation_fragment: - azure.azcollection.azure author: - <NAME>", "a specific availability set. resource_group: description: - The resource group", "to a specific availability set. resource_group: description: - The resource", "<NAME> (@julienstroheker) deprecated: removed_in: '2.0.0' why: The Ansible collection community.azure", "v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division,", "= item.name avase['type'] = item.type avase['sku'] = item.sku.name results.append(avase) return", "self.results = dict( changed=False, ansible_info=dict( azure_availabilitysets=[] ) ) self.name =", "= 'AvailabilitySet' class AzureRMAvailabilitySetInfo(AzureRMModuleBase): \"\"\"Utility class to get availability set", "return self.results def get_item(self): \"\"\"Get a single availability set\"\"\" self.log('Get", "availability set. type: list sample: [] sku: description: - Location", "DOCUMENTATION = ''' --- module: azure_rm_availabilityset_info short_description: Get Azure Availability", "self.name and not self.resource_group: self.fail(\"Parameter error: resource group required when", "filtering by name.\") if self.name: self.results['ansible_info']['azure_availabilitysets'] = self.get_item() else: self.results['ansible_info']['azure_availabilitysets']", "the resource lives. type: str sample: eastus2 name: description: -", "type: dict contains: platformFaultDomainCount: description: - Fault Domain count. type:", "type: str sample: Aligned type: description: - Resource type. type:", "\"\"\"Utility class to get availability set facts\"\"\" def __init__(self): self.module_args", "myAvailabilitySet properties: description: - The properties of the resource. type:", "to all virtualmachines in the availability set. type: list sample:", "module has been renamed to 'azure_rm_availabilityset_info'\", version='3.0.0', collection_name='community.azure') # was", "single availability set\"\"\" self.log('Get properties for {0}'.format(self.name)) item = None", "item.type avase['sku'] = item.sku.name results.append(avase) return results def main(): \"\"\"Main", "__future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA =", "def exec_module(self, **kwargs): is_old_facts = self.module._name == 'azure_rm_availabilityset_facts' if is_old_facts:", "#!/usr/bin/python # -*- coding: utf-8 -*- # Copyright: (c) 2016,", "= [] for item in response: if self.has_tags(item.tags, self.tags): avase", "# -*- coding: utf-8 -*- # Copyright: (c) 2016, <NAME>", "availability set. resource_group: description: - The resource group to search", "description: - Resource type. type: str sample: \"Microsoft.Compute/availabilitySets\" tags: description:", "except CloudError: pass if item and self.has_tags(item.tags, self.tags): avase =", "self.compute_client.availability_sets.list(self.resource_group) except CloudError as exc: self.fail('Failed to list all items", "name: Testing resource_group: myResourceGroup - name: Get facts for all", "- A list of references to all virtualmachines in the", "the availability set. type: list sample: [] sku: description: -", "item.name avase['type'] = item.type avase['sku'] = item.sku.name result = [avase]", "set facts\"\"\" def __init__(self): self.module_args = dict( name=dict(type='str'), resource_group=dict(type='str'), tags=dict(type='list')", "'AvailabilitySet' class AzureRMAvailabilitySetInfo(AzureRMModuleBase): \"\"\"Utility class to get availability set facts\"\"\"", "myResourceGroup - name: Get facts for all availability sets in", "Resource tags. type: dict sample: { env: sandbox } '''", ") self.name = None self.resource_group = None self.tags = None", "try: response = self.compute_client.availability_sets.list(self.resource_group) except CloudError as exc: self.fail('Failed to", "availability sets. options: name: description: - Limit results to a", "env: sandbox } ''' from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase try: from", "if self.has_tags(item.tags, self.tags): avase = self.serialize_obj(item, AZURE_OBJECT_CLASS) avase['name'] = item.name", "sets') try: response = self.compute_client.availability_sets.list(self.resource_group) except CloudError as exc: self.fail('Failed", "results to a specific availability set. resource_group: description: - The", "type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION", "'1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module:", "- Resource type. type: str sample: \"Microsoft.Compute/availabilitySets\" tags: description: -", "import CloudError except Exception: # handled in azure_rm_common pass AZURE_OBJECT_CLASS", "where the resource lives. type: str sample: Aligned type: description:", "\"\"\"Get a single availability set\"\"\" self.log('Get properties for {0}'.format(self.name)) item", "facts_module=True ) def exec_module(self, **kwargs): is_old_facts = self.module._name == 'azure_rm_availabilityset_facts'", "<NAME> <<EMAIL>> # GNU General Public License v3.0+ (see COPYING", "all availability sets') try: response = self.compute_client.availability_sets.list(self.resource_group) except CloudError as", "str sample: \"Microsoft.Compute/availabilitySets\" tags: description: - Resource tags. type: dict", "and not self.resource_group: self.fail(\"Parameter error: resource group required when filtering", "coding: utf-8 -*- # Copyright: (c) 2016, <NAME> <<EMAIL>> #", "import AzureRMModuleBase try: from msrestazure.azure_exceptions import CloudError except Exception: #", "all availability sets\"\"\" self.log('List all availability sets') try: response =", "self.module_args = dict( name=dict(type='str'), resource_group=dict(type='str'), tags=dict(type='list') ) self.results = dict(", "sample: myAvailabilitySet properties: description: - The properties of the resource.", "resource lives. type: str sample: Aligned type: description: - Resource", "EXAMPLES = ''' - name: Get facts for one availability", "= None super(AzureRMAvailabilitySetInfo, self).__init__( derived_arg_spec=self.module_args, supports_tags=False, facts_module=True ) def exec_module(self,", "dict contains: platformFaultDomainCount: description: - Fault Domain count. type: int", "description: - List of tags to be matched. extends_documentation_fragment: -", "virtualMachines: description: - A list of references to all virtualmachines", "group community.azure.azure_rm_availabilityset_info: resource_group: myResourceGroup ''' RETURN = ''' azure_availabilityset: description:", "the resource lives. type: str sample: Aligned type: description: -", "Get facts for all availability sets in a specific resource", "in self.module_args: setattr(self, key, kwargs[key]) if self.name and not self.resource_group:", "def main(): \"\"\"Main module execution code path\"\"\" AzureRMAvailabilitySetInfo() if __name__", "'2.0.0' why: The Ansible collection community.azure is deprecated. Use azure.azcollection", "a single availability set\"\"\" self.log('Get properties for {0}'.format(self.name)) item =", "type: int sample: 3 platformUpdateDomainCount: description: - Update Domain count.", "type: dict sample: { env: sandbox } ''' from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common", "self).__init__( derived_arg_spec=self.module_args, supports_tags=False, facts_module=True ) def exec_module(self, **kwargs): is_old_facts =", "**kwargs): is_old_facts = self.module._name == 'azure_rm_availabilityset_facts' if is_old_facts: self.module.deprecate(\"The 'azure_rm_availabilityset_facts'", "self.log('List all availability sets') try: response = self.compute_client.availability_sets.list(self.resource_group) except CloudError", "Availability Set facts description: - Get facts for a specific", "deprecated. Use azure.azcollection instead. alternative: Use M(azure.azcollection.azure_rm_availabilityset_info) instead. ''' EXAMPLES", "self.get_item() else: self.results['ansible_info']['azure_availabilitysets'] = self.list_items() return self.results def get_item(self): \"\"\"Get", "Use azure.azcollection instead. alternative: Use M(azure.azcollection.azure_rm_availabilityset_info) instead. ''' EXAMPLES =", "= item.type avase['sku'] = item.sku.name results.append(avase) return results def main():", "- name: Get facts for all availability sets in a", "int sample: 3 platformUpdateDomainCount: description: - Update Domain count. type:", "sample: \"Microsoft.Compute/availabilitySets\" tags: description: - Resource tags. type: dict sample:", "self.fail('Failed to list all items - {0}'.format(str(exc))) results = []", "availability sets\"\"\" self.log('List all availability sets') try: response = self.compute_client.availability_sets.list(self.resource_group)", "The resource group to search for the desired availability set.", "description: - A list of references to all virtualmachines in", "self.fail(\"Parameter error: resource group required when filtering by name.\") if", "removed_in: '2.0.0' why: The Ansible collection community.azure is deprecated. Use", "set. type: list sample: [] sku: description: - Location where", "''' --- module: azure_rm_availabilityset_info short_description: Get Azure Availability Set facts", "Copyright: (c) 2016, <NAME> <<EMAIL>> # GNU General Public License", "resource_group: description: - The resource group to search for the", "been renamed to 'azure_rm_availabilityset_info'\", version='3.0.0', collection_name='community.azure') # was 2.13 for", "Get Azure Availability Set facts description: - Get facts for", "Set facts description: - Get facts for a specific availability", "one availability set community.azure.azure_rm_availabilityset_info: name: Testing resource_group: myResourceGroup - name:", "instead. ''' EXAMPLES = ''' - name: Get facts for", "name. type: str sample: myAvailabilitySet properties: description: - The properties", "sandbox } ''' from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase try: from msrestazure.azure_exceptions", "list all items - {0}'.format(str(exc))) results = [] for item", "specific availability set. resource_group: description: - The resource group to", "avase = self.serialize_obj(item, AZURE_OBJECT_CLASS) avase['name'] = item.name avase['type'] = item.type", "[] sku: description: - Location where the resource lives. type:", "sample: Aligned type: description: - Resource type. type: str sample:", "from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA", "a specific resource group community.azure.azure_rm_availabilityset_info: resource_group: myResourceGroup ''' RETURN =", "description: - The resource group to search for the desired", "item and self.has_tags(item.tags, self.tags): avase = self.serialize_obj(item, AZURE_OBJECT_CLASS) avase['name'] =", "avase['name'] = item.name avase['type'] = item.type avase['sku'] = item.sku.name result", "to get availability set facts\"\"\" def __init__(self): self.module_args = dict(", "{'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' ---", "self.name) except CloudError: pass if item and self.has_tags(item.tags, self.tags): avase", "= self.list_items() return self.results def get_item(self): \"\"\"Get a single availability", "CloudError as exc: self.fail('Failed to list all items - {0}'.format(str(exc)))", "to search for the desired availability set. tags: description: -", "facts\"\"\" def __init__(self): self.module_args = dict( name=dict(type='str'), resource_group=dict(type='str'), tags=dict(type='list') )", "-*- # Copyright: (c) 2016, <NAME> <<EMAIL>> # GNU General", "be matched. extends_documentation_fragment: - azure.azcollection.azure author: - <NAME> (@julienstroheker) deprecated:", "= self.compute_client.availability_sets.get(self.resource_group, self.name) except CloudError: pass if item and self.has_tags(item.tags,", "community.azure is deprecated. Use azure.azcollection instead. alternative: Use M(azure.azcollection.azure_rm_availabilityset_info) instead.", "None super(AzureRMAvailabilitySetInfo, self).__init__( derived_arg_spec=self.module_args, supports_tags=False, facts_module=True ) def exec_module(self, **kwargs):", "availability set community.azure.azure_rm_availabilityset_info: name: Testing resource_group: myResourceGroup - name: Get", "self.module.deprecate(\"The 'azure_rm_availabilityset_facts' module has been renamed to 'azure_rm_availabilityset_info'\", version='3.0.0', collection_name='community.azure')", "= item.type avase['sku'] = item.sku.name result = [avase] return result", "dict sample: { env: sandbox } ''' from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import", "community.azure.azure_rm_availabilityset_info: name: Testing resource_group: myResourceGroup - name: Get facts for", "short_description: Get Azure Availability Set facts description: - Get facts", "self.name: self.results['ansible_info']['azure_availabilitysets'] = self.get_item() else: self.results['ansible_info']['azure_availabilitysets'] = self.list_items() return self.results", "GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from", "type: complex contains: location: description: - Location where the resource", "availability sets in a specific resource group community.azure.azure_rm_availabilityset_info: resource_group: myResourceGroup", "# was 2.13 for key in self.module_args: setattr(self, key, kwargs[key])", "all availability sets in a specific resource group community.azure.azure_rm_availabilityset_info: resource_group:", "sets. options: name: description: - Limit results to a specific", "if item and self.has_tags(item.tags, self.tags): avase = self.serialize_obj(item, AZURE_OBJECT_CLASS) avase['name']", "module: azure_rm_availabilityset_info short_description: Get Azure Availability Set facts description: -", "= item.sku.name result = [avase] return result def list_items(self): \"\"\"Get", "= item.sku.name results.append(avase) return results def main(): \"\"\"Main module execution", "platformFaultDomainCount: description: - Fault Domain count. type: int sample: 3", "= ''' azure_availabilityset: description: List of availability sets dicts. returned:", "name.\") if self.name: self.results['ansible_info']['azure_availabilitysets'] = self.get_item() else: self.results['ansible_info']['azure_availabilitysets'] = self.list_items()", "self.list_items() return self.results def get_item(self): \"\"\"Get a single availability set\"\"\"", "tags=dict(type='list') ) self.results = dict( changed=False, ansible_info=dict( azure_availabilitysets=[] ) )", "= item.name avase['type'] = item.type avase['sku'] = item.sku.name result =", "when filtering by name.\") if self.name: self.results['ansible_info']['azure_availabilitysets'] = self.get_item() else:", "Location where the resource lives. type: str sample: Aligned type:", "self.results['ansible_info']['azure_availabilitysets'] = self.list_items() return self.results def get_item(self): \"\"\"Get a single", "availability set facts\"\"\" def __init__(self): self.module_args = dict( name=dict(type='str'), resource_group=dict(type='str'),", "set. resource_group: description: - The resource group to search for", "Aligned type: description: - Resource type. type: str sample: \"Microsoft.Compute/availabilitySets\"", "avase['sku'] = item.sku.name result = [avase] return result def list_items(self):", "avase['type'] = item.type avase['sku'] = item.sku.name results.append(avase) return results def", "collection community.azure is deprecated. Use azure.azcollection instead. alternative: Use M(azure.azcollection.azure_rm_availabilityset_info)", "self.tags = None super(AzureRMAvailabilitySetInfo, self).__init__( derived_arg_spec=self.module_args, supports_tags=False, facts_module=True ) def", "name=dict(type='str'), resource_group=dict(type='str'), tags=dict(type='list') ) self.results = dict( changed=False, ansible_info=dict( azure_availabilitysets=[]", "in the availability set. type: list sample: [] sku: description:", "2016, <NAME> <<EMAIL>> # GNU General Public License v3.0+ (see", "availability set\"\"\" self.log('Get properties for {0}'.format(self.name)) item = None result", "['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: azure_rm_availabilityset_info short_description:", "changed=False, ansible_info=dict( azure_availabilitysets=[] ) ) self.name = None self.resource_group =", "exec_module(self, **kwargs): is_old_facts = self.module._name == 'azure_rm_availabilityset_facts' if is_old_facts: self.module.deprecate(\"The", "self.serialize_obj(item, AZURE_OBJECT_CLASS) avase['name'] = item.name avase['type'] = item.type avase['sku'] =", "CloudError: pass if item and self.has_tags(item.tags, self.tags): avase = self.serialize_obj(item,", "sample: 2 virtualMachines: description: - A list of references to", "item.name avase['type'] = item.type avase['sku'] = item.sku.name results.append(avase) return results", "name: description: - Limit results to a specific availability set.", "= self.get_item() else: self.results['ansible_info']['azure_availabilitysets'] = self.list_items() return self.results def get_item(self):", "virtualmachines in the availability set. type: list sample: [] sku:", "desired availability set. tags: description: - List of tags to", "for the desired availability set. tags: description: - List of", "for all availability sets in a specific resource group community.azure.azure_rm_availabilityset_info:", ") def exec_module(self, **kwargs): is_old_facts = self.module._name == 'azure_rm_availabilityset_facts' if", "[] for item in response: if self.has_tags(item.tags, self.tags): avase =", "sample: [] sku: description: - Location where the resource lives.", "self.tags): avase = self.serialize_obj(item, AZURE_OBJECT_CLASS) avase['name'] = item.name avase['type'] =", "''' from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase try: from msrestazure.azure_exceptions import CloudError", "= dict( changed=False, ansible_info=dict( azure_availabilitysets=[] ) ) self.name = None", "collection_name='community.azure') # was 2.13 for key in self.module_args: setattr(self, key,", "- Location where the resource lives. type: str sample: Aligned", "# handled in azure_rm_common pass AZURE_OBJECT_CLASS = 'AvailabilitySet' class AzureRMAvailabilitySetInfo(AzureRMModuleBase):", "by name.\") if self.name: self.results['ansible_info']['azure_availabilitysets'] = self.get_item() else: self.results['ansible_info']['azure_availabilitysets'] =", "absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1',", "avase['name'] = item.name avase['type'] = item.type avase['sku'] = item.sku.name results.append(avase)", "<<EMAIL>> # GNU General Public License v3.0+ (see COPYING or", "- azure.azcollection.azure author: - <NAME> (@julienstroheker) deprecated: removed_in: '2.0.0' why:", "\"\"\"Main module execution code path\"\"\" AzureRMAvailabilitySetInfo() if __name__ == '__main__':", "[] try: item = self.compute_client.availability_sets.get(self.resource_group, self.name) except CloudError: pass if", "= type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'}", "not self.resource_group: self.fail(\"Parameter error: resource group required when filtering by", "description: - Resource name. type: str sample: myAvailabilitySet properties: description:", "sample: eastus2 name: description: - Resource name. type: str sample:", "item.type avase['sku'] = item.sku.name result = [avase] return result def" ]
[ "for state to get to active\") time.sleep(.5) nodes = client.list_node(uuid=uuid).data", "timeout=DEFAULT_TIMEOUT): start = time.time() time.sleep(2) nss = client.list_namespace(uuid=ns.uuid).data assert len(nss)", "p_client.list_workload(uuid=workload.uuid).data[0].paused assert workloadStatus == expectedstatus def validate_pod_images(expectedimage, workload, ns_name): for", "{1} > {0}'\".format(filename, content) output = kubectl_pod_exec(pod, cmd_write) assert output.strip().decode('utf-8')", "expected) def validate_dns_entry(pod, host, expected): # requires pod with `dig`", "else: cluster_token = create_custom_host_registration_token(client, cluster) cmd = cluster_token.nodeCommand for role", "\" + ns_name pods_result = execute_kubectl_cmd(get_pods) assert len(pods_result[\"items\"]) == pod_count", "if nodes[0].nodeTemplateId is None: return \"Custom\" for cluster_config in cluster_configs:", "wl def wait_for_wl_transitioning(client, workload, timeout=DEFAULT_TIMEOUT, state=\"error\"): start = time.time() workloads", "if len(host) > 0: curl_args += \" --header 'Host: \"", "def assign_members_to_project(client, user, project, role_template_id): prtb = client.create_project_role_template_binding( projectId=project.id, roleTemplateId=role_template_id,", "assert cs[\"conditions\"][0][\"type\"] == \"Healthy\" assert len(components) == 0 def validate_dns_record(pod,", "client.list_cluster(name=CLUSTER_NAME).data assert len(clusters) > 0 cluster = clusters[0] return client,", "\\ \" Expected: \" + expected_k8s_version def check_cluster_state(etcd_count): css_resp =", "if ns_name is None: ns_name = random_name() ns = client.create_namespace(name=ns_name,", "= get_admin_client() nodes = client.list_node(clusterId=cluster.id).data for node in nodes: if", "+ ADMIN_TOKEN + \"'\\n\" env_details += \"env.CLUSTER_NAME='\" + cluster.name +", "workloadStatus = p_client.list_workload(uuid=workload.uuid).data[0].paused assert workloadStatus == expectedstatus def validate_pod_images(expectedimage, workload,", "2 try: obj = client.reload(obj) except ApiError as e: if", "assert pod[\"status\"][\"phase\"] == \"Running\" return pods_result[\"items\"] def validate_workload_with_sidekicks(p_client, workload, type,", "obj, state, timeout=DEFAULT_TIMEOUT): wait_for(lambda: client.reload(obj).state == state, timeout) return client.reload(obj)", "def create_project_and_ns(token, cluster, project_name=None, ns_name=None): client = get_client_for_token(token) p =", "workload.transitioningMessage def validate_workload(p_client, workload, type, ns_name, pod_count=1, wait_for_cron_pods=60): workload =", "len(nss) == 1 ns = nss[0] return ns def wait_for_pod_images(p_client,", "+ \"/name.html\", target_name_list) def validate_nodePort(p_client, workload, cluster): source_port = workload.publicEndpoints[0][\"port\"]", "inspect.getsource(check_function) if fail_handler: exceptionMsg = exceptionMsg + fail_handler(resource) raise Exception(exceptionMsg)", "target_name_list def get_endpoint_url_for_workload(p_client, workload, timeout=600): fqdn_available = False url =", "list:\" + str(target_name_list)) for node in nodes: host_ip = node.externalIpAddress", "assert len(workload_list) == 1 workload = workload_list[0] if hasattr(workload, 'publicEndpoints'):", "while not check_function(resource): if time.time() - start > timeout: exceptionMsg", "wait_for_pvc_to_be_bound(p_client, pvc_object, timeout=DEFAULT_TIMEOUT): start = time.time() time.sleep(2) list = p_client.list_persistent_volume_claim(uuid=pvc_object.uuid).data", "hasattr(cluster, 'rancherKubernetesEngineConfig'): check_cluster_state(len(get_role_nodes(cluster, \"etcd\"))) project, ns = create_project_and_ns(ADMIN_TOKEN, cluster) p_client", "get to active\") time.sleep(.5) nodes = client.list_node(clusterId=cluster.id).data node_count = len(nodes)", "for node in nodes: ip_list.append(node.externalIpAddress) assert len(ip_filter) > 0 print(ip_filter)", "== 1 ns = nss[0] while ns.state != \"active\": if", "= cluster.links['self'] + '/schemas' c_client = rancher.Client(url=c_url, token=token, verify=False) return", "assert len(pods_result[\"items\"]) == pod_count for pod in pods_result[\"items\"]: assert pod[\"status\"][\"phase\"]", "+ \" -- \" + cmd return execute_kubectl_cmd(command, json_out=False, stderr=True)", "if client_pod is None: curl_cmd = \"curl \" + cmd", "time.sleep(5) p = wait_until_available(client, p) assert p.state == 'active' return", "client.list_node(clusterId=cluster.id).data node_count = len(nodes) def get_custom_host_registration_cmd(client, cluster, roles, node): allowed_roles", "lambda x: x.state == intermediate_state, lambda x: 'State is: '", "requires pod with `dig` available - TEST_IMAGE host = '{0}.{1}.svc.cluster.local'.format(", "def wait_for_condition(client, resource, check_function, fail_handler=None, timeout=DEFAULT_TIMEOUT): start = time.time() resource", "pods -l\" + label + \" -n \" + ns_name", "assert len(ingresses) == 1 wl = ingresses[0] return wl def", "CATTLE_TEST_URL + \"/v3\" kube_fname = os.path.join(os.path.dirname(os.path.realpath(__file__)), \"k8s_kube_config\") MACHINE_TIMEOUT = float(os.environ.get('RANCHER_MACHINE_TIMEOUT',", "mapp.state != \"active\": print(mapp.uuid) print(mapp.state) if time.time() - start >", "time.time() for key, value in workload.workloadLabels.items(): label = key +", "return cluster_token def get_cluster_type(client, cluster): cluster_configs = [ \"amazonElasticContainerServiceConfig\", \"azureKubernetesServiceConfig\",", "\"Timed out waiting for endpoint to be available\") time.sleep(.5) ingress_list", "pods[\"items\"]: assert pod[\"spec\"][\"containers\"][0][\"image\"] == expectedimage def validate_pods_are_running_by_id(expectedpods, workload, ns_name): for", "from \" + pod1.name + \":\" + str(response)) if allow_connectivity:", "timeout=MACHINE_TIMEOUT) assert cluster.state == intermediate_state cluster = wait_for_condition( client, cluster,", "\"'\\n\" env_details += \"env.CLUSTER_NAME='\" + cluster.name + \"'\\n\" create_config_file(env_details) def", "project = wait_until_available(client, project) assert project.state == 'active' return project", "in pods: target_name_list.append(pod[\"name\"]) curl_cmd = \"http://\" + cluster_ip + \"/name.html\"", "mapp = mcapps[0] print(mapp.state) while mapp.state != \"active\": print(mapp.uuid) print(mapp.state)", "fqdn_available: if time.time() - start > timeout: raise AssertionError( \"Timed", "pods = execute_kubectl_cmd(get_pods) pod = pods[\"items\"][x] podimage = pod[\"spec\"][\"containers\"][0][\"image\"] def", "raise Exception('Timed out waiting for LB to become active') return", "validate_pod_images(expectedImage, workload, ns.name) def execute_kubectl_cmd(cmd, json_out=True, stderr=False): command = 'kubectl", "'/schemas' p_client = rancher.Client(url=p_url, token=token, verify=False) return p_client def get_cluster_client_for_token(cluster,", "def validate_cluster(client, cluster, intermediate_state=\"provisioning\", check_intermediate_state=True, skipIngresscheck=True, nodes_not_in_active_state=[], k8s_version=\"\"): cluster =", "output = subprocess.check_output(command, shell=True, stderr=subprocess.PIPE) returncode = 0 except subprocess.CalledProcessError", "exec_shell_command(ip, port, cmd, password): ssh = paramiko.SSHClient() ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) ssh.connect(ip, username=\"root\",", "set_pspt_for_project(p, client, pspt) def set_pspt_for_project(project, client, pspt): project.setpodsecuritypolicytemplate(podSecurityPolicyTemplateId=pspt.id) project =", "ns = client.create_namespace(name=ns_name, clusterId=cluster.id, projectId=project.id) wait_for_ns_to_become_active(client, ns) ns = client.reload(ns)", "\"deployment\" or type == \"statefulSet\": assert wl_result[\"status\"][\"readyReplicas\"] == pod_count if", "clusters[0] return client, cluster def validate_cluster_state(client, cluster, check_intermediate_state=True, intermediate_state=\"provisioning\", nodes_not_in_active_state=[]):", "+ \\ ' to satisfy condition: ' + \\ inspect.getsource(check_function)", "= time.time() workloads = client.list_workload(uuid=workload.uuid).data assert len(workloads) == 1 wl", "start = time.time() workloads = client.list_workload(uuid=workload.uuid).data assert len(workloads) == 1", "execute_kubectl_cmd(get_pods) for x in range(0, numofpods - 1): pod =", "workloads[0] while wl.transitioning != state: if time.time() - start >", "cluster, roles, node): allowed_roles = [\"etcd\", \"worker\", \"controlplane\"] cluster_tokens =", "http://\" + host_ip + \":\" + \\ str(source_port) + \"/name.html\"", "url = get_endpoint_url_for_workload(p_client, workload) target_name_list = get_target_names(p_client, [workload]) wait_until_lb_is_active(url) validate_http_response(url", "+ \" -n \" + pod.namespaceId + \" -- \"", "for endpoint to be available\") time.sleep(.5) ingress_list = p_client.list_ingress(uuid=ingress.uuid).data assert", "verify=False) return c_client def up(cluster, token): c_url = cluster.links['self'] +", "control_nodes if role == \"worker\": node_list = worker_nodes return node_list", "to active\") time.sleep(.5) apps = client.list_app(name=app_id).data assert len(apps) == 1", "+ ns_name) assert wl_result[\"status\"][\"readyReplicas\"] == pod_count for key, value in", "token): c_url = cluster.links['self'] + '/schemas' c_client = rancher.Client(url=c_url, token=token,", "random_name(): return \"test\" + \"-\" + str(random_int(10000, 99999)) def create_project_and_ns(token,", "active\") time.sleep(.5) nss = client.list_namespace(uuid=ns.uuid).data assert len(nss) == 1 ns", "x in range(0, numofpods - 1): pod = pods[\"items\"][x] podimage", "url = url + str(workload.publicEndpoints[0][\"port\"]) fqdn_available = True return url", "check_connectivity_between_pods(pod1, pod2, allow_connectivity=True): pod_ip = pod2.status.podIp cmd = \"ping -c", "cluster.state == \"active\" wait_for_nodes_to_become_active(client, cluster, exception_list=nodes_not_in_active_state) return cluster def wait_until_available(client,", "def delete_node(aws_nodes): for node in aws_nodes: AmazonWebServices().delete_node(node) def cluster_cleanup(client, cluster,", "= client.reload(ns) assert ns.state == 'active' return ns def assign_members_to_cluster(client,", "lambda x: x.state == \"active\", lambda x: 'State is: '", "cluster_k8s_version = \\ cluster.appliedSpec[\"rancherKubernetesEngineConfig\"][ \"kubernetesVersion\"] assert cluster_k8s_version == version, \\", "from rancher import ApiError from lib.aws import AmazonWebServices DEFAULT_TIMEOUT =", "label = key + \"=\" + value get_pods = \"get", "300 CATTLE_TEST_URL = os.environ.get('CATTLE_TEST_URL', \"http://localhost:80\") ADMIN_TOKEN = os.environ.get('ADMIN_TOKEN', \"None\") CATTLE_API_URL", "type == \"daemonSet\": assert wl_result[\"status\"][\"currentNumberScheduled\"] == pod_count if type ==", "prtb, roleTemplateId=role_template_id, userId=user.id) return prtb def create_kubeconfig(cluster): generateKubeConfigOutput = cluster.generateKubeconfig()", "raise e else: return obj delta = time.time() - start", "start = time.time() time.sleep(2) nss = client.list_namespace(uuid=ns.uuid).data assert len(nss) ==", "ns_name pods = execute_kubectl_cmd(get_pods) for pod in pods[\"items\"]: assert pod[\"spec\"][\"containers\"][0][\"image\"]", "if time.time() - start > timeout: raise Exception('Timed out waiting", "CLUSTER_NAME == \"\": clusters = client.list_cluster().data else: clusters = client.list_cluster(name=CLUSTER_NAME).data", "len(pods) == 1 p = pods[0] return p def get_schedulable_nodes(cluster):", "def up(cluster, token): c_url = cluster.links['self'] + '/schemas' c_client =", "\" + cluster_k8s_version + \\ \" Expected: \" + version", "- 1): pod = pods[\"items\"][x] podimage = pod[\"spec\"][\"containers\"][0][\"image\"] while podimage", "return 'random-{0}-{1}'.format(random_num(), int(time.time())) def random_num(): return random.randint(0, 1000000) def random_int(start,", "after # scheduled wait time if type == \"cronJob\": time.sleep(wait_for_cron_pods)", "def run_command_with_stderr(command): try: output = subprocess.check_output(command, shell=True, stderr=subprocess.PIPE) returncode =", "schedulable_nodes def get_role_nodes(cluster, role): etcd_nodes = [] control_nodes = []", "workloads) for node in nodes: host_ip = node.externalIpAddress cmd =", "= workload_list[0] if hasattr(workload, 'publicEndpoints'): assert len(workload.publicEndpoints) > 0 url", "= False for node in nodes: if node.requestedHostname not in", "time.time() nodes = client.list_node(uuid=uuid).data node_count = len(nodes) # Handle the", "\"amazonElasticContainerServiceConfig\", \"azureKubernetesServiceConfig\", \"googleKubernetesEngineConfig\", \"rancherKubernetesEngineConfig\" ] if \"rancherKubernetesEngineConfig\" in cluster: nodes", "= AmazonWebServices().get_nodes(filters) for node in aws_nodes: print(node.public_ip_address) AmazonWebServices().delete_nodes(aws_nodes) def check_connectivity_between_workloads(p_client1,", "allow_connectivity) def check_connectivity_between_workload_pods(p_client, workload): pods = p_client.list_pod(workloadId=workload.id).data for pod in", "timeout: raise AssertionError( \"Timed out waiting for state to get", "while node_status != state: if time.time() - start > MACHINE_TIMEOUT:", "def create_project_with_pspt(client, cluster, pspt): p = client.create_project(name=random_name(), clusterId=cluster.id) p =", "= kubectl_pod_exec(client_pod, wget_cmd) result = result.decode() result = result.rstrip() print(\"cmd:", "def validate_dns_entry(pod, host, expected): # requires pod with `dig` available", "time.sleep(.5) ingresses = client.list_ingress(uuid=ingress.uuid).data assert len(ingresses) == 1 wl =", "verify=False) def get_project_client_for_token(project, token): p_url = project.links['self'] + '/schemas' p_client", "pod2.status.podIp cmd = \"ping -c 1 -W 1 \" +", "timeout_message: raise Exception(timeout_message) else: raise Exception('Timeout waiting for condition') ret", "in str(response) and \" 0% packet loss\" in str(response) else:", "active or not wait_for_mcapp_cluster_level_to_active(p_client1, app_id1) if app_id2 != \"\": wait_for_mcapp_cluster_level_to_active(p_client2,", "cluster: return cluster_config return \"Imported\" def delete_cluster(client, cluster): nodes =", "'/schemas' c_client = rancher.Client(url=c_url, token=token, verify=False) return c_client def up(cluster,", "= wait_for_pods_in_workload(p_client, workload, pod_count) assert len(pods) == pod_count for pod", "execute_kubectl_cmd(get_pods) for pod in pods[\"items\"]: assert pod[\"spec\"][\"containers\"][0][\"image\"] == expectedimage def", "kubectl_pod_exec(pod, dig_cmd) for expected_value in expected: assert expected_value in str(dig_output)", "try: output = subprocess.check_output(command, shell=True, stderr=subprocess.PIPE) returncode = 0 except", "*= 2 if sleep > 2: sleep = 2 try:", "ns_id, pvc_name, wl_name, mount_path, sub_path, is_daemonSet=False): volumes = [{\"type\": \"volume\",", "p_client.list_app(name=app_id).data assert len(mcapp) == 1 app = mcapp[0] return app", "nodes = client.list_node(uuid=uuid).data node_count = len(nodes) if node_count == 1:", "assert workload.state == \"active\" pods = wait_for_pods_in_workload(p_client, workload, pod_count) assert", "if e.error.status != 403: raise e else: return obj delta", "+ \" -n \" + ns_name pods = execute_kubectl_cmd(get_pods) for", "json_out=True, stderr=False): command = 'kubectl --kubeconfig {0} {1}'.format( kube_fname, cmd)", "validate_cluster_state(client, cluster, check_intermediate_state=True, intermediate_state=\"provisioning\", nodes_not_in_active_state=[]): if check_intermediate_state: cluster = wait_for_condition(", "ingresses[0] while wl.state != \"active\": if time.time() - start >", "\"env.CLUSTER_NAME='\" + cluster.name + \"'\\n\" create_config_file(env_details) def create_config_file(env_details): file =", "check_cluster_state(len(get_role_nodes(cluster, \"etcd\"))) project, ns = create_project_and_ns(ADMIN_TOKEN, cluster) p_client = get_project_client_for_token(project,", "= p_client.list_pod(workloadId=workload.id).data for pod in pods: for o_pod in pods:", "retry_count += 1 print(\"Retry Count:\" + str(retry_count)) if node_auto_deleted and", "False for expected_value in expected: if expected_value in str(ping_output): ping_validation_pass", "wait_for_wl_to_active(client, workload, timeout=DEFAULT_TIMEOUT): start = time.time() workloads = client.list_workload(uuid=workload.uuid).data assert", "json_out: result = json.loads(result) print(result) return result def run_command(command): return", "= open(kube_fname, \"w\") file.write(generateKubeConfigOutput.config) file.close() def validate_psp_error_worklaod(p_client, workload, error_message): workload", "\"name\": \"vol1\", \"persistentVolumeClaim\": { \"readOnly\": \"false\", \"type\": \"persistentVolumeClaimVolumeSource\", \"persistentVolumeClaimId\": pvc_name", "validate_http_response(curl_cmd, target_name_list) def validate_lb(p_client, workload): url = get_endpoint_url_for_workload(p_client, workload) target_name_list", "assert cluster.state == intermediate_state cluster = wait_for_condition( client, cluster, lambda", "with `dig` available - TEST_IMAGE host = '{0}.{1}.svc.cluster.local'.format( record[\"name\"], record[\"namespaceId\"])", "skipIngresscheck=True, nodes_not_in_active_state=[], k8s_version=\"\"): cluster = validate_cluster_state( client, cluster, check_intermediate_state=check_intermediate_state, intermediate_state=intermediate_state,", "workload = wait_for_wl_transitioning(p_client, workload) assert workload.state == \"updating\" assert workload.transitioning", "break if client_pod is None: curl_cmd = \"curl \" +", "file.close() def validate_hostPort(p_client, workload, source_port, cluster): pods = p_client.list_pod(workloadId=workload.id).data nodes", "ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) ssh.connect(ip, username=\"root\", password=password, port=port) stdin, stdout, stderr = ssh.exec_command(cmd)", "str(response) and \" 100% packet loss\" in str(response) def kubectl_pod_exec(pod,", "def random_str(): return 'random-{0}-{1}'.format(random_num(), int(time.time())) def random_num(): return random.randint(0, 1000000)", "cluster.state == intermediate_state cluster = wait_for_condition( client, cluster, lambda x:", "cronjob, wait for the first pod to get created after", "time.sleep(sleep) sleep *= 2 if sleep > 2: sleep =", "\\ \" Expected: \" + version expected_k8s_version = version[:version.find(\"-\")] k8s_version", "port=port) stdin, stdout, stderr = ssh.exec_command(cmd) response = stdout.readlines() return", "0 except subprocess.CalledProcessError as e: output = e.output returncode =", "create_custom_host_registration_token(client, cluster): cluster_token = client.create_cluster_registration_token( clusterId=cluster.id) cluster_token = client.wait_success(cluster_token) assert", "1 pv = list[0] while pv.state != \"available\": if time.time()", "source_port = workload.publicEndpoints[0][\"port\"] nodes = get_schedulable_nodes(cluster) pods = p_client.list_pod(workloadId=workload.id).data target_name_list", "ingress = ingress_list[0] if hasattr(ingress, 'publicEndpoints'): for public_endpoint in ingress.publicEndpoints:", "p = pods[0] return p def get_schedulable_nodes(cluster): client = get_admin_client()", "project_name=None): if project_name is None: project_name = random_name() p =", "schedulable_nodes.append(node) return schedulable_nodes def get_role_nodes(cluster, role): etcd_nodes = [] control_nodes", "\"persistentVolumeClaimId\": pvc_name }}] volumeMounts = [{\"readOnly\": \"False\", \"type\": \"volumeMount\", \"mountPath\":", "components.remove(component_name) assert cs[\"conditions\"][0][\"status\"] == \"True\" assert cs[\"conditions\"][0][\"type\"] == \"Healthy\" assert", "+ ns_name pods = execute_kubectl_cmd(get_pods) for pod in pods[\"items\"]: assert", "pod = pods[\"items\"][x] podimage = pod[\"spec\"][\"containers\"][0][\"image\"] while podimage != expectedimage:", "+ resource.baseType + \\ ' to satisfy condition: ' +", "\"True\" assert cs[\"conditions\"][0][\"type\"] == \"Healthy\" assert len(components) == 0 def", "== \"Healthy\" assert len(components) == 0 def validate_dns_record(pod, record, expected):", "== \"Running\" assert len(pod[\"status\"][\"containerStatuses\"]) == 2 assert \"running\" in pod[\"status\"][\"containerStatuses\"][0][\"state\"]", "'publicEndpoints'): assert len(workload.publicEndpoints) > 0 url = \"http://\" url =", "= \\ 'network-interface.addresses.association.public-ip' ip_filter['Values'] = ip_list filters.append(ip_filter) for node in", "cmd_get_content = \"/bin/bash -c 'cat {0}' \".format(filename) output = kubectl_pod_exec(pod,", "wait_for_node_to_be_deleted(client, node, timeout=300): uuid = node.uuid start = time.time() nodes", "RANCHER_CLEANUP_CLUSTER: client.delete(cluster) if aws_nodes is not None: delete_node(aws_nodes) else: env_details", "aws_nodes: AmazonWebServices().delete_node(node) def cluster_cleanup(client, cluster, aws_nodes=None): if RANCHER_CLEANUP_CLUSTER: client.delete(cluster) if", "p) assert p.state == 'active' return p def create_project_with_pspt(client, cluster,", "for correct pod images\") time.sleep(.5) pods = execute_kubectl_cmd(get_pods) pod =", "def wait_state(client, obj, state, timeout=DEFAULT_TIMEOUT): wait_for(lambda: client.reload(obj).state == state, timeout)", "clusterId=cluster.id) cluster_token = client.wait_success(cluster_token) assert cluster_token.state == 'active' return cluster_token", "!= \"bound\": if time.time() - start > timeout: raise AssertionError(", "pvc def create_wl_with_nfs(p_client, ns_id, pvc_name, wl_name, mount_path, sub_path, is_daemonSet=False): volumes", "client.reload(obj).state == state, timeout) return client.reload(obj) def wait_for_condition(client, resource, check_function,", "expected_value in str(ping_output): ping_validation_pass = True break assert ping_validation_pass is", "cluster): nodes = client.list_node(clusterId=cluster.id).data # Delete Cluster client.delete(cluster) # Delete", "time.time() time.sleep(2) list = c_client.list_persistent_volume(uuid=pv_object.uuid).data assert len(list) == 1 pv", "roles: assert role in allowed_roles cmd += \" --\" +", "def wait_for_node_status(client, node, state): uuid = node.uuid start = time.time()", "0: cluster_token = cluster_tokens[0] else: cluster_token = create_custom_host_registration_token(client, cluster) cmd", "\" Expected: \" + expected_k8s_version def check_cluster_state(etcd_count): css_resp = execute_kubectl_cmd(\"get", "> 0: if nodes[0].nodeTemplateId is None: return \"Custom\" for cluster_config", "ns_name is None: ns_name = random_name() ns = client.create_namespace(name=ns_name, clusterId=cluster.id,", "client.list_workload(uuid=workload.uuid).data assert len(workloads) == 1 wl = workloads[0] return wl", "all, the rest is: \", target_hit_list) assert len(target_hit_list) == 0", "is not None: delete_node(aws_nodes) else: env_details = \"env.CATTLE_TEST_URL='\" + CATTLE_TEST_URL", "pod with `dig` available - TEST_IMAGE cmd = 'ping -c", "= mcapp[0] return app def wait_for_mcapp_cluster_level_to_active(client, app_id, timeout=DEFAULT_MULTI_CLUSTER_APP_TIMEOUT): mcapps =", "public_endpoint in ingress.publicEndpoints: if public_endpoint[\"hostname\"].startswith(ingress.name): fqdn_available = True url =", "waiting for endpoint to be available\") time.sleep(.5) workload_list = p_client.list_workload(uuid=workload.uuid).data", "def check_connectivity_between_workloads(p_client1, workload1, p_client2, workload2, allow_connectivity=True): wl1_pods = p_client1.list_pod(workloadId=workload1.id).data wl2_pods", "url = \\ public_endpoint[\"protocol\"].lower() + \"://\" + \\ public_endpoint[\"hostname\"] if", "-n \" + ns_name pods = execute_kubectl_cmd(get_pods) curpodnames = []", "return set_pspt_for_project(p, client, pspt) def set_pspt_for_project(project, client, pspt): project.setpodsecuritypolicytemplate(podSecurityPolicyTemplateId=pspt.id) project", "out waiting for state to get to available\") time.sleep(.5) list", "AmazonWebServices().get_nodes(filters) for node in aws_nodes: print(node.public_ip_address) AmazonWebServices().delete_nodes(aws_nodes) def check_connectivity_between_workloads(p_client1, workload1,", "\\ inspect.getsource(check_function) if fail_handler: exceptionMsg = exceptionMsg + fail_handler(resource) raise", "expected_k8s_version def check_cluster_state(etcd_count): css_resp = execute_kubectl_cmd(\"get cs\") css = css_resp[\"items\"]", "wl.state != \"active\": if time.time() - start > timeout: raise", "pod in test_pods: validate_http_response(curl_cmd, target_name_list, pod) def wait_for_pv_to_be_available(c_client, pv_object, timeout=DEFAULT_TIMEOUT):", "def random_num(): return random.randint(0, 1000000) def random_int(start, end): return random.randint(start,", "\\ ast.literal_eval(os.environ.get('RANCHER_CLEANUP_CLUSTER', \"True\")) env_file = os.path.join( os.path.dirname(os.path.realpath(__file__)), \"rancher_env.config\") CLUSTER_NAME_2 =", "[] for workload in workloads: pod_list = p_client.list_pod(workloadId=workload.id).data pods.extend(pod_list) target_name_list", "record[\"namespaceId\"]) validate_dns_entry(pod, host, expected) def validate_dns_entry(pod, host, expected): # requires", "time import requests import ast import paramiko import rancher from", "client.list_node(clusterId=cluster.id).data schedulable_nodes = [] for node in nodes: if node.worker:", "public_endpoint[\"hostname\"].startswith(ingress.name): fqdn_available = True url = \\ public_endpoint[\"protocol\"].lower() + \"://\"", "time if type == \"cronJob\": time.sleep(wait_for_cron_pods) pods = p_client.list_pod(workloadId=workload.id).data assert", "client = get_admin_client() if CLUSTER_NAME == \"\" or CLUSTER_NAME_2 ==", "\" -n \" + ns_name pods = execute_kubectl_cmd(get_pods) for x", "= 300 CATTLE_TEST_URL = os.environ.get('CATTLE_TEST_URL', \"http://localhost:80\") ADMIN_TOKEN = os.environ.get('ADMIN_TOKEN', \"None\")", "validate_mcapp_cluster(app_id2, p_client2) # verify app in cluster is active or", "p, ns def create_project(client, cluster, project_name=None): if project_name is None:", "k8s_version) if hasattr(cluster, 'rancherKubernetesEngineConfig'): check_cluster_state(len(get_role_nodes(cluster, \"etcd\"))) project, ns = create_project_and_ns(ADMIN_TOKEN,", "cluster = wait_for_condition( client, cluster, lambda x: x.state == \"active\",", "+ cluster_k8s_version + \\ \" Expected: \" + version expected_k8s_version", "node in nodes: if node.etcd: etcd_nodes.append(node) if node.controlPlane: control_nodes.append(node) if", "ns = create_project_and_ns(ADMIN_TOKEN, cluster) p_client = get_project_client_for_token(project, ADMIN_TOKEN) con =", "\"\": clusters = client.list_cluster().data else: clusters = client.list_cluster(name=CLUSTER_NAME).data assert len(clusters)", "except subprocess.CalledProcessError as e: output = e.output returncode = e.returncode", "== 'active' return set_pspt_for_project(p, client, pspt) def set_pspt_for_project(project, client, pspt):", "= [{\"type\": \"volume\", \"name\": \"vol1\", \"persistentVolumeClaim\": { \"readOnly\": \"false\", \"type\":", "css = css_resp[\"items\"] components = [\"scheduler\", \"controller-manager\"] for i in", "timeout: exceptionMsg = 'Timeout waiting for ' + resource.baseType +", "= cluster_token.nodeCommand for role in roles: assert role in allowed_roles", "node_count = len(nodes) def wait_for_cluster_node_count(client, cluster, expected_node_count, timeout=300): start =", "version): cluster_k8s_version = \\ cluster.appliedSpec[\"rancherKubernetesEngineConfig\"][ \"kubernetesVersion\"] assert cluster_k8s_version == version,", "[ \"amazonElasticContainerServiceConfig\", \"azureKubernetesServiceConfig\", \"googleKubernetesEngineConfig\", \"rancherKubernetesEngineConfig\" ] if \"rancherKubernetesEngineConfig\" in cluster:", "token) ns = create_ns(c_client, cluster, p, ns_name) return p, ns", "result.rstrip() print(\"cmd: \\t\" + cmd) print(\"result: \\t\" + result) assert", "assert error_message in workload.transitioningMessage def validate_workload(p_client, workload, type, ns_name, pod_count=1,", "http://\" + host_ip + path validate_http_response(cmd, target_name_list) def validate_ingress_using_endpoint(p_client, ingress,", "def wait_for_cluster_node_count(client, cluster, expected_node_count, timeout=300): start = time.time() nodes =", "== pod_count for key, value in workload.workloadLabels.items(): label = key", "= 120 DEFAULT_MULTI_CLUSTER_APP_TIMEOUT = 300 CATTLE_TEST_URL = os.environ.get('CATTLE_TEST_URL', \"http://localhost:80\") ADMIN_TOKEN", "out waiting for endpoint to be available\") time.sleep(.5) workload_list =", "stdin, stdout, stderr = ssh.exec_command(cmd) response = stdout.readlines() return response", "ns_name pods = execute_kubectl_cmd(get_pods) for x in range(0, numofpods -", "fqdn_available = False url = None while not fqdn_available: if", "workloads: pod_list = p_client.list_pod(workloadId=workload.id).data pods.extend(pod_list) target_name_list = [] for pod", "userId=user.id) return prtb def change_member_role_in_cluster(client, user, crtb, role_template_id): crtb =", "\"active\") if node is None: print(\"Need to re-evalauate new node", "result = result.decode() result = result.rstrip() print(\"cmd: \\t\" + cmd)", "ADMIN_TOKEN = os.environ.get('ADMIN_TOKEN', \"None\") CATTLE_API_URL = CATTLE_TEST_URL + \"/v3\" kube_fname", "print(result) return result def run_command(command): return subprocess.check_output(command, shell=True, text=True) def", "assert len(mcapps) == 1 mapp = mcapps[0] while mapp.state !=", "\"http://localhost:80\") ADMIN_TOKEN = os.environ.get('ADMIN_TOKEN', \"None\") CATTLE_API_URL = CATTLE_TEST_URL + \"/v3\"", "workload.publicEndpoints[0][\"port\"] nodes = get_schedulable_nodes(cluster) pods = p_client.list_pod(workloadId=workload.id).data target_name_list = []", "get_client_for_token(token): return rancher.Client(url=CATTLE_API_URL, token=token, verify=False) def get_project_client_for_token(project, token): p_url =", "\"curl \" + cmd result = run_command(curl_cmd) else: wget_cmd =", "workload, \"daemonSet\", ns.name, len(get_schedulable_nodes(cluster))) if not skipIngresscheck: host = \"test\"", "cluster.generateKubeconfig() print(generateKubeConfigOutput.config) file = open(kube_fname, \"w\") file.write(generateKubeConfigOutput.config) file.close() def validate_psp_error_worklaod(p_client,", "= len(nodes) def wait_for_cluster_node_count(client, cluster, expected_node_count, timeout=300): start = time.time()", "node.private_ip_address cmd += additional_options return cmd def create_custom_host_registration_token(client, cluster): cluster_token", "kube_fname, cmd) if json_out: command += ' -o json' if", "ns, timeout=DEFAULT_TIMEOUT): start = time.time() time.sleep(2) nss = client.list_namespace(uuid=ns.uuid).data assert", "print(returncode) return (output, returncode) def wait_for_wl_to_active(client, workload, timeout=DEFAULT_TIMEOUT): start =", "pod_list = p_client.list_pod(workloadId=workload.id).data pods.extend(pod_list) target_name_list = [] for pod in", "ns def create_project(client, cluster, project_name=None): if project_name is None: project_name", "namespaceId=ns_id, volumes=volumes, daemonSetConfig={}) else: workload = p_client.create_workload(name=wl_name, containers=con, namespaceId=ns_id, volumes=volumes)", "assert p.state == 'active' return p def create_project_with_pspt(client, cluster, pspt):", "wait for the first pod to get created after #", "assert len(clusters) > 0 cluster = clusters[0] return client, cluster", "get_schedulable_nodes(cluster) pods = p_client.list_pod(workloadId=workload.id).data target_name_list = [] for pod in", "assert workload.state == \"updating\" assert workload.transitioning == \"error\" print(workload.transitioningMessage) assert", "+ \"'\\n\" create_config_file(env_details) def create_config_file(env_details): file = open(env_file, \"w\") file.write(env_details)", "-o json' if stderr: result = run_command_with_stderr(command) else: result =", "pods = p_client.list_pod(workloadId=workload.id).data for pod in pods: for o_pod in", "len(nss) == 1 ns = nss[0] while ns.state != \"active\":", "start if delta > timeout: msg = 'Timeout waiting for", "time.sleep(10) validate_http_response(url, target_name_list) def get_target_names(p_client, workloads): pods = [] for", "else: print(\"Node does not exist anymore -\" + uuid) return", "\"test1\", \"image\": TEST_IMAGE, \"volumeMounts\": volumeMounts }] if is_daemonSet: workload =", "ns_name pods_result = execute_kubectl_cmd(get_pods) assert len(pods_result[\"items\"]) == pod_count for pod", "while check_for_no_access(url): time.sleep(.5) print(\"No access yet\") if time.time() - start", "result def run_command(command): return subprocess.check_output(command, shell=True, text=True) def run_command_with_stderr(command): try:", "and \" 0% packet loss\" in str(response) else: assert pod_ip", "= 2 try: obj = client.reload(obj) except ApiError as e:", "workload def write_content_to_file(pod, content, filename): cmd_write = \"/bin/bash -c 'echo", "def get_client_for_token(token): return rancher.Client(url=CATTLE_API_URL, token=token, verify=False) def get_project_client_for_token(project, token): p_url", "start = time.time() nodes = client.list_node(uuid=uuid).data node_count = len(nodes) while", "time.time() assert len(mcapps) == 1 mapp = mcapps[0] while mapp.state", "json import os import random import subprocess import time import", "= time.time() while not fqdn_available: if time.time() - start >", "return c_client def wait_state(client, obj, state, timeout=DEFAULT_TIMEOUT): wait_for(lambda: client.reload(obj).state ==", "in allowed_roles cmd += \" --\" + role additional_options =", "mcapp[0] return app def wait_for_mcapp_cluster_level_to_active(client, app_id, timeout=DEFAULT_MULTI_CLUSTER_APP_TIMEOUT): mcapps = client.list_app(name=app_id).data", "client_pod=None): target_hit_list = target_name_list[:] count = 5 * len(target_name_list) for", "cluster, expected_node_count, timeout=300): start = time.time() nodes = client.list_node(clusterId=cluster.id).data node_count", "kube_fname = os.path.join(os.path.dirname(os.path.realpath(__file__)), \"k8s_kube_config\") MACHINE_TIMEOUT = float(os.environ.get('RANCHER_MACHINE_TIMEOUT', \"1200\")) TEST_IMAGE =", "print(\"Node does not exist anymore -\" + uuid) return None", "raise AssertionError( \"Timed out waiting for endpoint to be available\")", "filename): cmd_write = \"/bin/bash -c 'echo {1} > {0}'\".format(filename, content)", "c_client.list_persistent_volume(uuid=pv_object.uuid).data assert len(list) == 1 pv = list[0] return pv", "0 def validate_cluster(client, cluster, intermediate_state=\"provisioning\", check_intermediate_state=True, skipIngresscheck=True, nodes_not_in_active_state=[], k8s_version=\"\"): cluster", "' + x.state, timeout=MACHINE_TIMEOUT) assert cluster.state == intermediate_state cluster =", "active\") time.sleep(.5) workloads = client.list_workload(uuid=workload.uuid).data assert len(workloads) == 1 wl", "if time.time() - start > MACHINE_TIMEOUT: raise AssertionError( \"Timed out", "client.list_node(uuid=uuid).data node_count = len(nodes) while node_count != 0: if time.time()", "wget_cmd) result = result.decode() result = result.rstrip() print(\"cmd: \\t\" +", "ns) ns = client.reload(ns) assert ns.state == 'active' return ns", "client.list_node(clusterId=cluster.id).data if len(nodes) > 0: if nodes[0].nodeTemplateId is None: return", "> 0): cluster_type = get_cluster_type(client, cluster) print(cluster_type) if get_cluster_type(client, cluster)", "stderr=True) def exec_shell_command(ip, port, cmd, password): ssh = paramiko.SSHClient() ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())", "start = time.time() while check_for_no_access(url): time.sleep(.5) print(\"No access yet\") if", "Exception(msg) def delete_node(aws_nodes): for node in aws_nodes: AmazonWebServices().delete_node(node) def cluster_cleanup(client,", "public_endpoint[\"protocol\"].lower() + \"://\" + \\ public_endpoint[\"hostname\"] if \"path\" in public_endpoint.keys():", "-W 1 \" + pod_ip response = kubectl_pod_exec(pod1, cmd) print(\"Actual", "'ping -c 1 -W 1 {0}'.format(host) ping_output = kubectl_pod_exec(pod, cmd)", "client.create_project(name=project_name, clusterId=cluster.id) time.sleep(5) p = wait_until_available(client, p) assert p.state ==", "workload.containers[0].image == expectedImage validate_pod_images(expectedImage, workload, ns.name) def execute_kubectl_cmd(cmd, json_out=True, stderr=False):", "and retry_count < 5: wait_for_nodes_to_become_active(client, cluster, exception_list, retry_count) def wait_for_node_status(client,", "for condition') ret = callback() return ret def random_name(): return", "command = \"exec \" + pod.name + \" -n \"", "len(pods) == 1 p = pods[0] while p.state != \"running\":", "= ip_list filters.append(ip_filter) for node in nodes: ip_list.append(node.externalIpAddress) assert len(ip_filter)", "active\") time.sleep(.5) nodes = client.list_node(clusterId=cluster.id).data node_count = len(nodes) def get_custom_host_registration_cmd(client,", "obj.id, delta) raise Exception(msg) def delete_node(aws_nodes): for node in aws_nodes:", "= client.create_namespace(name=ns_name, clusterId=cluster.id, projectId=project.id) wait_for_ns_to_become_active(client, ns) ns = client.reload(ns) assert", "role_template_id): crtb = client.update( crtb, roleTemplateId=role_template_id, userId=user.id) return crtb def", "if json_out: command += ' -o json' if stderr: result", "time.sleep(.5) nss = client.list_namespace(uuid=ns.uuid).data assert len(nss) == 1 ns =", "'active' return project def create_ns(client, cluster, project, ns_name=None): if ns_name", "5 * len(target_name_list) for i in range(1, count): if len(target_hit_list)", "[] ip_filter['Name'] = \\ 'network-interface.addresses.association.public-ip' ip_filter['Values'] = ip_list filters.append(ip_filter) for", "+ \":\" + str(response)) if allow_connectivity: assert pod_ip in str(response)", "workload): url = get_endpoint_url_for_workload(p_client, workload) target_name_list = get_target_names(p_client, [workload]) wait_until_lb_is_active(url)", "== 'active' return ns def assign_members_to_cluster(client, user, cluster, role_template_id): crtb", "command += ' -o json' if stderr: result = run_command_with_stderr(command)", "workload) assert workload.state == \"active\" # For cronjob, wait for", "while p.state != \"running\": if time.time() - start > timeout:", "\"Timed out waiting for state to get to bound\") time.sleep(.5)", "= [\"scheduler\", \"controller-manager\"] for i in range(0, etcd_count): components.append(\"etcd-\" +", "x.state, timeout=MACHINE_TIMEOUT) assert cluster.state == intermediate_state cluster = wait_for_condition( client,", "}] con = [{\"name\": \"test1\", \"image\": TEST_IMAGE, \"volumeMounts\": volumeMounts }]", "== pod_count for pod in pods: wait_for_pod_to_running(p_client, pod) wl_result =", "nodes = client.list_node(clusterId=cluster.id).data node_auto_deleted = False for node in nodes:", "= css_resp[\"items\"] components = [\"scheduler\", \"controller-manager\"] for i in range(0,", "1 -W 1 \" + pod_ip response = kubectl_pod_exec(pod1, cmd)", "+= \" --header 'Host: \" + host + \"'\" nodes", "`dig` available - TEST_IMAGE cmd = 'ping -c 1 -W", "timeout=DEFAULT_TIMEOUT, timeout_message=None): start = time.time() ret = callback() while ret", "1 pvc = list[0] return pvc def create_wl_with_nfs(p_client, ns_id, pvc_name,", "else: workload = p_client.create_workload(name=wl_name, containers=con, namespaceId=ns_id, volumes=volumes) return workload def", "client.list_pod(uuid=pod.uuid).data assert len(pods) == 1 p = pods[0] return p", "cmd = cluster_token.nodeCommand for role in roles: assert role in", "def get_cluster_client_for_token(cluster, token): c_url = cluster.links['self'] + '/schemas' c_client =", "len(ingresses) == 1 wl = ingresses[0] while wl.state != \"active\":", "100% packet loss\" in str(response) def kubectl_pod_exec(pod, cmd): command =", "if expected_value in str(ping_output): ping_validation_pass = True break assert ping_validation_pass", "cluster_config in cluster_configs: if cluster_config in cluster: return cluster_config return", "workload.state == \"active\" pods = wait_for_pods_in_workload(p_client, workload, pod_count) assert len(pods)", "return None while node_status != state: if time.time() - start", "For cronjob, wait for the first pod to get created", "nodes = get_schedulable_nodes(cluster) pods = p_client.list_pod(workloadId=workload.id).data target_name_list = [] for", "+ cluster_ip + \"/name.html\" for pod in test_pods: validate_http_response(curl_cmd, target_name_list,", "timeout_message=None): start = time.time() ret = callback() while ret is", "p_client2) # verify app in cluster is active or not", "+ uuid) return None while node_status != state: if time.time()", "target_hit_list.remove(result) print(\"After removing all, the rest is: \", target_hit_list) assert", "additional_options return cmd def create_custom_host_registration_token(client, cluster): cluster_token = client.create_cluster_registration_token( clusterId=cluster.id)", "name=multiClusterApp.name).data start = time.time() assert len(mcapps) == 1 mapp =", "state to get to active\") time.sleep(.5) apps = client.list_app(name=app_id).data assert", "cluster, check_intermediate_state=check_intermediate_state, intermediate_state=intermediate_state, nodes_not_in_active_state=nodes_not_in_active_state) # Create Daemon set workload and", "def wait_for_pv_to_be_available(c_client, pv_object, timeout=DEFAULT_TIMEOUT): start = time.time() time.sleep(2) list =", "def validate_ingress_using_endpoint(p_client, ingress, workloads, timeout=300): target_name_list = get_target_names(p_client, workloads) start", "start > timeout: raise AssertionError( \"Timed out waiting for state", "numofpods - 1): pod = pods[\"items\"][x] podimage = pod[\"spec\"][\"containers\"][0][\"image\"] while", "cluster def check_cluster_version(cluster, version): cluster_k8s_version = \\ cluster.appliedSpec[\"rancherKubernetesEngineConfig\"][ \"kubernetesVersion\"] assert", "def check_cluster_state(etcd_count): css_resp = execute_kubectl_cmd(\"get cs\") css = css_resp[\"items\"] components", "validate_http_response(curl_cmd, target_name_list) def validate_clusterIp(p_client, workload, cluster_ip, test_pods): pods = p_client.list_pod(workloadId=workload.id).data", "return cmd def create_custom_host_registration_token(client, cluster): cluster_token = client.create_cluster_registration_token( clusterId=cluster.id) cluster_token", "start > MACHINE_TIMEOUT: raise AssertionError( \"Timed out waiting for state", "return wl def wait_for_pod_to_running(client, pod, timeout=DEFAULT_TIMEOUT): start = time.time() pods", "client.wait_success(cluster_token) assert cluster_token.state == 'active' return cluster_token def get_cluster_type(client, cluster):", "ret def random_name(): return \"test\" + \"-\" + str(random_int(10000, 99999))", "len(nodes) while node_count != 0: if time.time() - start >", "print(\"Actual ping Response from \" + pod1.name + \":\" +", "Error - \" + url) return True def validate_http_response(cmd, target_name_list,", "while mapp.state != \"active\": if time.time() - start > timeout:", "1 wl = workloads[0] return wl def wait_for_ingress_to_active(client, ingress, timeout=DEFAULT_TIMEOUT):", "kubectl_k8s_version + \\ \" Expected: \" + expected_k8s_version def check_cluster_state(etcd_count):", "= get_admin_client() if CLUSTER_NAME == \"\" or CLUSTER_NAME_2 == \"\":", "- TEST_IMAGE host = '{0}.{1}.svc.cluster.local'.format( record[\"name\"], record[\"namespaceId\"]) validate_dns_entry(pod, host, expected)", "in str(response) and \" 100% packet loss\" in str(response) def", "clusters = client.list_cluster(name=CLUSTER_NAME).data assert len(clusters) > 0 cluster = clusters[0]", "\"statefulSet\": assert wl_result[\"status\"][\"readyReplicas\"] == pod_count if type == \"daemonSet\": assert", "msg = 'Timeout waiting for [{}:{}] for condition after {}'", "nodes = client.list_node(uuid=uuid).data node_count = len(nodes) while node_count != 0:", "\" --header 'Host: \" + host + \"'\" nodes =", "+ cluster.name + \"'\\n\" create_config_file(env_details) def create_config_file(env_details): file = open(env_file,", "+ str(retry_count)) if node_auto_deleted and retry_count < 5: wait_for_nodes_to_become_active(client, cluster,", "validate_clusterIp(p_client, workload, cluster_ip, test_pods): pods = p_client.list_pod(workloadId=workload.id).data target_name_list = []", "state, timeout=DEFAULT_TIMEOUT): wait_for(lambda: client.reload(obj).state == state, timeout) return client.reload(obj) def", "return p def create_project_with_pspt(client, cluster, pspt): p = client.create_project(name=random_name(), clusterId=cluster.id)", "cluster, project, ns_name=None): if ns_name is None: ns_name = random_name()", "cs[\"conditions\"][0][\"status\"] == \"True\" assert cs[\"conditions\"][0][\"type\"] == \"Healthy\" assert len(components) ==", "= len(nodes) while node_count != 0: if time.time() - start", "print(\"components to check - \" + str(components)) for cs in", "start = time.time() time.sleep(2) list = p_client.list_persistent_volume_claim(uuid=pvc_object.uuid).data assert len(list) ==", "open(env_file, \"w\") file.write(env_details) file.close() def validate_hostPort(p_client, workload, source_port, cluster): pods", "p_client1, p_client2): validate_mcapp_cluster(app_id1, p_client1) if app_id2 != \"\": validate_mcapp_cluster(app_id2, p_client2)", "get to active\") time.sleep(5) nodes = client.list_node(uuid=uuid).data node_count = len(nodes)", "\" + node.id) if pod.nodeId == node.id: target_name_list.append(pod.name) break host_ip", "pod[\"status\"][\"phase\"] == \"Running\" return pods_result[\"items\"] def validate_workload_with_sidekicks(p_client, workload, type, ns_name,", "workload.name + \" -n \" + ns_name) assert wl_result[\"status\"][\"readyReplicas\"] ==", "+ node.private_ip_address cmd += additional_options return cmd def create_custom_host_registration_token(client, cluster):", "get_admin_client() if CLUSTER_NAME == \"\" or CLUSTER_NAME_2 == \"\": clusters", "timeout=DEFAULT_TIMEOUT): start = time.time() ingresses = client.list_ingress(uuid=ingress.uuid).data assert len(ingresses) ==", "workload_list[0] if hasattr(workload, 'publicEndpoints'): assert len(workload.publicEndpoints) > 0 url =", "def validate_workload(p_client, workload, type, ns_name, pod_count=1, wait_for_cron_pods=60): workload = wait_for_wl_to_active(p_client,", "for i in range(0, etcd_count): components.append(\"etcd-\" + str(i)) print(\"components to", "== 0 def validate_dns_record(pod, record, expected): # requires pod with", "\"rancherKubernetesEngineConfig\" ] if \"rancherKubernetesEngineConfig\" in cluster: nodes = client.list_node(clusterId=cluster.id).data if", "-- \" + cmd return execute_kubectl_cmd(command, json_out=False, stderr=True) def exec_shell_command(ip,", "= {\"host\": host, \"paths\": [{\"workloadIds\": [workload.id], \"targetPort\": \"80\"}]} ingress =", "target_name_list, client_pod=None): target_hit_list = target_name_list[:] count = 5 * len(target_name_list)", "client.list_workload(uuid=workload.uuid).data[0] assert workload.containers[0].image == expectedImage validate_pod_images(expectedImage, workload, ns.name) def execute_kubectl_cmd(cmd,", "p_client.list_ingress(uuid=ingress.uuid).data assert len(ingress_list) == 1 ingress = ingress_list[0] if hasattr(ingress,", "== content def wait_for_mcapp_to_active(client, multiClusterApp, timeout=DEFAULT_MULTI_CLUSTER_APP_TIMEOUT): print(\"\\nuuid:\") print(multiClusterApp.uuid) time.sleep(5) mcapps", "get_project_client_for_token(project, token): p_url = project.links['self'] + '/schemas' p_client = rancher.Client(url=p_url,", "timeout=DEFAULT_MULTI_CLUSTER_APP_TIMEOUT): mcapps = client.list_app(name=app_id).data start = time.time() assert len(mcapps) ==", "run_command_with_stderr(command): try: output = subprocess.check_output(command, shell=True, stderr=subprocess.PIPE) returncode = 0", "exception_list=[], retry_count=0): nodes = client.list_node(clusterId=cluster.id).data node_auto_deleted = False for node", "node): allowed_roles = [\"etcd\", \"worker\", \"controlplane\"] cluster_tokens = client.list_cluster_registration_token( clusterId=cluster.id).data", "def validate_workload_image(client, workload, expectedImage, ns): workload = client.list_workload(uuid=workload.uuid).data[0] assert workload.containers[0].image", "len(nodes) # Handle the case of nodes getting auto deleted", "in roles: assert role in allowed_roles cmd += \" --\"", "time.time() - start > MACHINE_TIMEOUT: raise AssertionError( \"Timed out waiting", "= \"env.CATTLE_TEST_URL='\" + CATTLE_TEST_URL + \"'\\n\" env_details += \"env.ADMIN_TOKEN='\" +", "def kubectl_pod_exec(pod, cmd): command = \"exec \" + pod.name +", "= wait_for_wl_to_active(p_client, workload) assert workload.state == \"active\" # For cronjob,", "cmd_get_content) assert output.strip().decode('utf-8') == content def wait_for_mcapp_to_active(client, multiClusterApp, timeout=DEFAULT_MULTI_CLUSTER_APP_TIMEOUT): print(\"\\nuuid:\")", "nss[0] return ns def wait_for_pod_images(p_client, workload, ns_name, expectedimage, numofpods, timeout=DEFAULT_TIMEOUT):", "result = run_command(curl_cmd) else: wget_cmd = \"wget -qO- \" +", "str(workload.publicEndpoints[0][\"port\"]) fqdn_available = True return url def wait_until_lb_is_active(url, timeout=300): start", "nodes: target_name_list = [] for pod in pods: print(pod.nodeId +", "print(mapp.uuid) print(mapp.state) if time.time() - start > timeout: raise AssertionError(", "\\ ' to satisfy condition: ' + \\ inspect.getsource(check_function) if", "css_resp = execute_kubectl_cmd(\"get cs\") css = css_resp[\"items\"] components = [\"scheduler\",", "AmazonWebServices DEFAULT_TIMEOUT = 120 DEFAULT_MULTI_CLUSTER_APP_TIMEOUT = 300 CATTLE_TEST_URL = os.environ.get('CATTLE_TEST_URL',", "client.list_cluster().data else: clusters.append(client.list_cluster(name=CLUSTER_NAME).data) clusters.append(client.list_cluster(name=CLUSTER_NAME_2).data) assert len(clusters) == 2 return client,", "host, path, insecure_redirect=False): time.sleep(10) curl_args = \" \" if (insecure_redirect):", "retry_count=0): nodes = client.list_node(clusterId=cluster.id).data node_auto_deleted = False for node in", "node, timeout=300): uuid = node.uuid start = time.time() nodes =", "wait_until_available(client, p) assert p.state == 'active' return set_pspt_for_project(p, client, pspt)", "for role in roles: assert role in allowed_roles cmd +=", "name list:\" + str(target_name_list)) for node in nodes: host_ip =", "worker_nodes = [] node_list = [] client = get_admin_client() nodes", "fail_handler: exceptionMsg = exceptionMsg + fail_handler(resource) raise Exception(exceptionMsg) time.sleep(.5) resource", "assert output.strip().decode('utf-8') == \"\" def validate_file_content(pod, content, filename): cmd_get_content =", "active\") time.sleep(.5) multiclusterapps = client.list_multiClusterApp(uuid=multiClusterApp.uuid, name=multiClusterApp.name).data assert len(multiclusterapps) == 1", "\\ 'network-interface.addresses.association.public-ip' ip_filter['Values'] = ip_list filters.append(ip_filter) for node in nodes:", "+ \\ \" Expected: \" + version expected_k8s_version = version[:version.find(\"-\")]", "check_for_no_access(url): try: requests.get(url) return False except requests.ConnectionError: print(\"Connection Error -", "cluster, exception_list, retry_count) def wait_for_node_status(client, node, state): uuid = node.uuid", "\"/name.html\" for pod in test_pods: validate_http_response(curl_cmd, target_name_list, pod) def wait_for_pv_to_be_available(c_client,", "while node_count != expected_node_count: if time.time() - start > timeout:", "callback() return ret def random_name(): return \"test\" + \"-\" +", "time.time() - start > timeout: exceptionMsg = 'Timeout waiting for", "cluster): cluster_configs = [ \"amazonElasticContainerServiceConfig\", \"azureKubernetesServiceConfig\", \"googleKubernetesEngineConfig\", \"rancherKubernetesEngineConfig\" ] if", "mapp def get_admin_client_and_cluster_mcapp(): clusters = [] client = get_admin_client() if", "for node in nodes: target_name_list = [] for pod in", "timeout=DEFAULT_TIMEOUT): start = time.time() resource = client.reload(resource) while not check_function(resource):", "def get_cluster_type(client, cluster): cluster_configs = [ \"amazonElasticContainerServiceConfig\", \"azureKubernetesServiceConfig\", \"googleKubernetesEngineConfig\", \"rancherKubernetesEngineConfig\"", "wl2_pods: check_connectivity_between_pods(pod, o_pod, allow_connectivity) def check_connectivity_between_workload_pods(p_client, workload): pods = p_client.list_pod(workloadId=workload.id).data", "e.output returncode = e.returncode print(returncode) return (output, returncode) def wait_for_wl_to_active(client,", "ns_name) return p, ns def create_project(client, cluster, project_name=None): if project_name", "json.loads(result) print(result) return result def run_command(command): return subprocess.check_output(command, shell=True, text=True)", "def wait_for(callback, timeout=DEFAULT_TIMEOUT, timeout_message=None): start = time.time() ret = callback()", "e: if e.error.status != 403: raise e else: return obj", "expected): # requires pod with `dig` available - TEST_IMAGE host", "for node in nodes: if node.etcd: etcd_nodes.append(node) if node.controlPlane: control_nodes.append(node)", "\"80\"}]} ingress = p_client.create_ingress(name=name, namespaceId=ns.id, rules=[rule]) wait_for_ingress_to_active(p_client, ingress) validate_ingress(p_client, cluster,", "wait_until_lb_is_active(url) validate_http_response(url + \"/name.html\", target_name_list) def validate_nodePort(p_client, workload, cluster): source_port", "-n \" + ns_name pods = execute_kubectl_cmd(get_pods) for pod in", "if node.etcd: etcd_nodes.append(node) if node.controlPlane: control_nodes.append(node) if node.worker: worker_nodes.append(node) if", "= client.create_cluster_registration_token( clusterId=cluster.id) cluster_token = client.wait_success(cluster_token) assert cluster_token.state == 'active'", "= time.time() sleep = 0.01 while True: time.sleep(sleep) sleep *=", "ping Response from \" + pod1.name + \":\" + str(response))", "== 1 mapp = mcapps[0] while mapp.state != \"active\": if", "ADMIN_TOKEN + \"'\\n\" env_details += \"env.CLUSTER_NAME='\" + cluster.name + \"'\\n\"", "create_custom_host_registration_token(client, cluster) cmd = cluster_token.nodeCommand for role in roles: assert", "to get to active\") time.sleep(.5) ingresses = client.list_ingress(uuid=ingress.uuid).data assert len(ingresses)", "print(\"result: \\t\" + result) assert result in target_name_list if result", "= workloads[0] while wl.state != \"active\": if time.time() - start", "with `dig` available - TEST_IMAGE cmd = 'ping -c 1", "ip_list.append(node.externalIpAddress) assert len(ip_filter) > 0 print(ip_filter) aws_nodes = AmazonWebServices().get_nodes(filters) for", "time.time() time.sleep(2) list = p_client.list_persistent_volume_claim(uuid=pvc_object.uuid).data assert len(list) == 1 pvc", "get_cluster_client_for_token(cluster, token): c_url = cluster.links['self'] + '/schemas' c_client = rancher.Client(url=c_url,", "str(ping_output): ping_validation_pass = True break assert ping_validation_pass is True assert", "os.environ.get('ADMIN_TOKEN', \"None\") CATTLE_API_URL = CATTLE_TEST_URL + \"/v3\" kube_fname = os.path.join(os.path.dirname(os.path.realpath(__file__)),", "+ \" -n \" + ns_name) if type == \"deployment\"", "node, state): uuid = node.uuid start = time.time() nodes =", "str(i)) print(\"components to check - \" + str(components)) for cs", "= p_client.list_pod(workloadId=workload.id).data return pods def get_admin_client_and_cluster(): client = get_admin_client() if", "!= expected_node_count: if time.time() - start > timeout: raise AssertionError(", "= nss[0] while ns.state != \"active\": if time.time() - start", "\"\": clusters = client.list_cluster().data else: clusters.append(client.list_cluster(name=CLUSTER_NAME).data) clusters.append(client.list_cluster(name=CLUSTER_NAME_2).data) assert len(clusters) ==", "cluster, project_name) c_client = get_cluster_client_for_token(cluster, token) ns = create_ns(c_client, cluster,", "list[0] while pvc.state != \"bound\": if time.time() - start >", "time.sleep(.5) nodes = client.list_node(uuid=uuid).data node_count = len(nodes) def wait_for_cluster_node_count(client, cluster,", "p = client.create_project(name=random_name(), clusterId=cluster.id) p = wait_until_available(client, p) assert p.state", "= len(nodes) # Handle the case of nodes getting auto", "= client.list_workload(uuid=workload.uuid).data assert len(workloads) == 1 wl = workloads[0] while", "def delete_cluster(client, cluster): nodes = client.list_node(clusterId=cluster.id).data # Delete Cluster client.delete(cluster)", "= client.list_node(clusterId=cluster.id).data # Delete Cluster client.delete(cluster) # Delete nodes(in cluster)", "condition after {}' \\ ' seconds'.format(obj.type, obj.id, delta) raise Exception(msg)", "\"w\") file.write(env_details) file.close() def validate_hostPort(p_client, workload, source_port, cluster): pods =", "\" + host + \"'\" nodes = get_schedulable_nodes(cluster) target_name_list =", "def create_custom_host_registration_token(client, cluster): cluster_token = client.create_cluster_registration_token( clusterId=cluster.id) cluster_token = client.wait_success(cluster_token)", "random_test_name(\"default\") workload = p_client.create_workload(name=name, containers=con, namespaceId=ns.id, daemonSetConfig={}) validate_workload(p_client, workload, \"daemonSet\",", "pods = p_client.list_pod(workloadId=workload.id).data return pods def get_admin_client_and_cluster(): client = get_admin_client()", "ip_filter = {} ip_list = [] ip_filter['Name'] = \\ 'network-interface.addresses.association.public-ip'", "\"mountPath\": mount_path, \"subPath\": sub_path, \"name\": \"vol1\" }] con = [{\"name\":", "\"control\": node_list = control_nodes if role == \"worker\": node_list =", "project) assert project.state == 'active' return project def create_ns(client, cluster,", "= pods[0] while p.state != \"running\": if time.time() - start", "[] for pod in pods: print(pod.nodeId + \" check \"", "= client.list_node(uuid=uuid).data node_count = len(nodes) def wait_for_cluster_node_count(client, cluster, expected_node_count, timeout=300):", "pod in pods: print(pod.nodeId + \" check \" + node.id)", "p_client.list_persistent_volume_claim(uuid=pvc_object.uuid).data assert len(list) == 1 pvc = list[0] while pvc.state", "workload, timeout=DEFAULT_TIMEOUT): start = time.time() workloads = client.list_workload(uuid=workload.uuid).data assert len(workloads)", "str(dig_output) def wait_for_nodes_to_become_active(client, cluster, exception_list=[], retry_count=0): nodes = client.list_node(clusterId=cluster.id).data node_auto_deleted", "start = time.time() assert len(mcapps) == 1 mapp = mcapps[0]", "node in nodes: target_name_list = [] for pod in pods:", "== 1 wl = workloads[0] return wl def wait_for_pod_to_running(client, pod,", "re-evalauate new node list\") node_auto_deleted = True retry_count += 1", "podimage = pod[\"spec\"][\"containers\"][0][\"image\"] def wait_for_pods_in_workload(p_client, workload, pod_count, timeout=DEFAULT_TIMEOUT): start =", "= [] node_list = [] client = get_admin_client() nodes =", "def validate_clusterIp(p_client, workload, cluster_ip, test_pods): pods = p_client.list_pod(workloadId=workload.id).data target_name_list =", "get_cluster_client_for_token(cluster, token) ns = create_ns(c_client, cluster, p, ns_name) return p,", "workload, type, ns_name, pod_count=1, wait_for_cron_pods=60): workload = wait_for_wl_to_active(p_client, workload) assert", "def get_admin_client(): return rancher.Client(url=CATTLE_API_URL, token=ADMIN_TOKEN, verify=False) def get_client_for_token(token): return rancher.Client(url=CATTLE_API_URL,", "intermediate_state cluster = wait_for_condition( client, cluster, lambda x: x.state ==", "\" + expected_k8s_version def check_cluster_state(etcd_count): css_resp = execute_kubectl_cmd(\"get cs\") css", "cluster, intermediate_state=\"provisioning\", check_intermediate_state=True, skipIngresscheck=True, nodes_not_in_active_state=[], k8s_version=\"\"): cluster = validate_cluster_state( client,", "ns_name, pod_count=1): workload = wait_for_wl_to_active(p_client, workload) assert workload.state == \"active\"", "nodes = client.list_node(clusterId=cluster.id).data schedulable_nodes = [] for node in nodes:", "assert len(workloads) == 1 wl = workloads[0] while wl.transitioning !=", "= p_client.list_pod(workloadId=workload.id).data while len(pods) != pod_count: if time.time() - start", "this daemonset create_kubeconfig(cluster) if k8s_version != \"\": check_cluster_version(cluster, k8s_version) if", "nss[0] while ns.state != \"active\": if time.time() - start >", "set_pspt_for_project(project, client, pspt): project.setpodsecuritypolicytemplate(podSecurityPolicyTemplateId=pspt.id) project = wait_until_available(client, project) assert project.state", "while mapp.state != \"active\": print(mapp.uuid) print(mapp.state) if time.time() - start", "= len(nodes) if node_count == 1: node_status = nodes[0].state else:", "\"etcd\": node_list = etcd_nodes if role == \"control\": node_list =", "= [] ip_filter['Name'] = \\ 'network-interface.addresses.association.public-ip' ip_filter['Values'] = ip_list filters.append(ip_filter)", "kubectl_pod_exec(pod, cmd_write) assert output.strip().decode('utf-8') == \"\" def validate_file_content(pod, content, filename):", "-qO- \" + cmd result = kubectl_pod_exec(client_pod, wget_cmd) result =", "raise AssertionError( \"Timed out waiting for state to get to", "!= \"active\": print(mapp.uuid) print(mapp.state) if time.time() - start > timeout:", "len(nodes) def wait_for_cluster_node_count(client, cluster, expected_node_count, timeout=300): start = time.time() nodes", "print(\"Retry Count:\" + str(retry_count)) if node_auto_deleted and retry_count < 5:", "p_client.list_pod(workloadId=workload.id).data pods.extend(pod_list) target_name_list = [] for pod in pods: target_name_list.append(pod.name)", "cluster = wait_for_condition( client, cluster, lambda x: x.state == intermediate_state,", "curl_cmd = \"http://\" + cluster_ip + \"/name.html\" for pod in", "AmazonWebServices().delete_nodes(aws_nodes) def check_connectivity_between_workloads(p_client1, workload1, p_client2, workload2, allow_connectivity=True): wl1_pods = p_client1.list_pod(workloadId=workload1.id).data", "in [\"Imported\", \"Custom\"]: nodes = client.list_node(clusterId=cluster.id).data filters = [ {'Name':", "- start > timeout: raise AssertionError( \"Timed out waiting for", "\"/name.html\", target_name_list) def validate_nodePort(p_client, workload, cluster): source_port = workload.publicEndpoints[0][\"port\"] nodes", "# requires pod with `dig` available - TEST_IMAGE cmd =", "= exceptionMsg + fail_handler(resource) raise Exception(exceptionMsg) time.sleep(.5) resource = client.reload(resource)", "str(response) else: assert pod_ip in str(response) and \" 100% packet", "= client.list_node(clusterId=cluster.id).data node_count = len(nodes) while node_count != expected_node_count: if", "to available\") time.sleep(.5) list = c_client.list_persistent_volume(uuid=pv_object.uuid).data assert len(list) == 1", "expected: if expected_value in str(ping_output): ping_validation_pass = True break assert", "= list[0] return pv def wait_for_pvc_to_be_bound(p_client, pvc_object, timeout=DEFAULT_TIMEOUT): start =", "in range(0, numofpods - 1): pod = pods[\"items\"][x] podimage =", "None: return \"Custom\" for cluster_config in cluster_configs: if cluster_config in", "return None return node def wait_for_node_to_be_deleted(client, node, timeout=300): uuid =", "list:\" + str(target_name_list)) return target_name_list def get_endpoint_url_for_workload(p_client, workload, timeout=600): fqdn_available", "ping_validation_pass = True break assert ping_validation_pass is True assert \"", "len(wl_result[\"status\"][\"active\"]) >= pod_count return for key, value in workload.workloadLabels.items(): label", "if result in target_hit_list: target_hit_list.remove(result) print(\"After removing all, the rest", "+ \\ public_endpoint[\"hostname\"] if \"path\" in public_endpoint.keys(): url += public_endpoint[\"path\"]", "# Handle the case of nodes getting auto deleted when", "+ ns_name pods_result = execute_kubectl_cmd(get_pods) assert len(pods_result[\"items\"]) == pod_count for", "subprocess.check_output(command, shell=True, stderr=subprocess.PIPE) returncode = 0 except subprocess.CalledProcessError as e:", "node.worker: schedulable_nodes.append(node) return schedulable_nodes def get_role_nodes(cluster, role): etcd_nodes = []", "execute_kubectl_cmd(\"get cs\") css = css_resp[\"items\"] components = [\"scheduler\", \"controller-manager\"] for", "exception_list: node = wait_for_node_status(client, node, \"active\") if node is None:", "wait_for_nodes_to_become_active(client, cluster, exception_list=nodes_not_in_active_state) return cluster def wait_until_available(client, obj, timeout=DEFAULT_TIMEOUT): start", "client.list_multiClusterApp(uuid=multiClusterApp.uuid, name=multiClusterApp.name).data start = time.time() assert len(mcapps) == 1 mapp", "for node in aws_nodes: AmazonWebServices().delete_node(node) def cluster_cleanup(client, cluster, aws_nodes=None): if", "allowed_roles = [\"etcd\", \"worker\", \"controlplane\"] cluster_tokens = client.list_cluster_registration_token( clusterId=cluster.id).data if", "assert role in allowed_roles cmd += \" --\" + role", "1 p = pods[0] while p.state != \"running\": if time.time()", "waiting for condition') ret = callback() return ret def random_name():", "= 'dig {0} +short'.format(host) dig_output = kubectl_pod_exec(pod, dig_cmd) for expected_value", "node in nodes: if node.requestedHostname not in exception_list: node =", "\"volume\", \"name\": \"vol1\", \"persistentVolumeClaim\": { \"readOnly\": \"false\", \"type\": \"persistentVolumeClaimVolumeSource\", \"persistentVolumeClaimId\":", "crtb = client.create_cluster_role_template_binding( clusterId=cluster.id, roleTemplateId=role_template_id, subjectKind=\"User\", userId=user.id) return crtb def", "assert pod_ip in str(response) and \" 100% packet loss\" in", "cluster_ip + \"/name.html\" for pod in test_pods: validate_http_response(curl_cmd, target_name_list, pod)", "str(random_int(10000, 99999)) + \".com\" path = \"/name.html\" rule = {\"host\":", "\" http://\" + host_ip + \":\" + \\ str(source_port) +", "x.state == \"active\", lambda x: 'State is: ' + x.state,", "= get_schedulable_nodes(cluster) pods = p_client.list_pod(workloadId=workload.id).data target_name_list = [] for pod", "ping_output = kubectl_pod_exec(pod, cmd) ping_validation_pass = False for expected_value in", "= validate_cluster_state( client, cluster, check_intermediate_state=check_intermediate_state, intermediate_state=intermediate_state, nodes_not_in_active_state=nodes_not_in_active_state) # Create Daemon", "True url = \\ public_endpoint[\"protocol\"].lower() + \"://\" + \\ public_endpoint[\"hostname\"]", "validate_workload_image(client, workload, expectedImage, ns): workload = client.list_workload(uuid=workload.uuid).data[0] assert workload.containers[0].image ==", "return crtb def assign_members_to_project(client, user, project, role_template_id): prtb = client.create_project_role_template_binding(", "str(ping_output) dig_cmd = 'dig {0} +short'.format(host) dig_output = kubectl_pod_exec(pod, dig_cmd)", "len(ip_filter) > 0 print(ip_filter) aws_nodes = AmazonWebServices().get_nodes(filters) for node in", "value get_pods = \"get pods -l\" + label + \"", "node in aws_nodes: AmazonWebServices().delete_node(node) def cluster_cleanup(client, cluster, aws_nodes=None): if RANCHER_CLEANUP_CLUSTER:", "def validate_pods_are_running_by_id(expectedpods, workload, ns_name): for key, value in workload.workloadLabels.items(): label", "int(time.time())) def random_num(): return random.randint(0, 1000000) def random_int(start, end): return", "workload.name + \" -n \" + ns_name) if type ==", "[{\"workloadIds\": [workload.id], \"targetPort\": \"80\"}]} ingress = p_client.create_ingress(name=name, namespaceId=ns.id, rules=[rule]) wait_for_ingress_to_active(p_client,", "[] client = get_admin_client() if CLUSTER_NAME == \"\" or CLUSTER_NAME_2", "\"running\" in pod[\"status\"][\"containerStatuses\"][1][\"state\"] def validate_workload_paused(p_client, workload, expectedstatus): workloadStatus = p_client.list_workload(uuid=workload.uuid).data[0].paused", "timeout=DEFAULT_TIMEOUT): start = time.time() sleep = 0.01 while True: time.sleep(sleep)", "sleep *= 2 if sleep > 2: sleep = 2", "{\"host\": host, \"paths\": [{\"workloadIds\": [workload.id], \"targetPort\": \"80\"}]} ingress = p_client.create_ingress(name=name,", "role_template_id): prtb = client.create_project_role_template_binding( projectId=project.id, roleTemplateId=role_template_id, subjectKind=\"User\", userId=user.id) return prtb", "to bound\") time.sleep(.5) list = p_client.list_persistent_volume_claim(uuid=pvc_object.uuid).data assert len(list) == 1", "out waiting for endpoint to be available\") time.sleep(.5) ingress_list =", "in public_endpoint.keys(): url += public_endpoint[\"path\"] time.sleep(10) validate_http_response(url, target_name_list) def get_target_names(p_client,", "env_details += \"env.ADMIN_TOKEN='\" + ADMIN_TOKEN + \"'\\n\" env_details += \"env.CLUSTER_NAME='\"", "workloads, timeout=300): target_name_list = get_target_names(p_client, workloads) start = time.time() fqdn_available", "daemonSetConfig={}) validate_workload(p_client, workload, \"daemonSet\", ns.name, len(get_schedulable_nodes(cluster))) if not skipIngresscheck: host", "if node_auto_deleted and retry_count < 5: wait_for_nodes_to_become_active(client, cluster, exception_list, retry_count)", "client.create_project(name=random_name(), clusterId=cluster.id) p = wait_until_available(client, p) assert p.state == 'active'", "+ label + \" -n \" + ns_name pods =", "if hasattr(workload, 'publicEndpoints'): assert len(workload.publicEndpoints) > 0 url = \"http://\"", "\" + cmd result = kubectl_pod_exec(client_pod, wget_cmd) result = result.decode()", "== \"active\", lambda x: 'State is: ' + x.state, timeout=MACHINE_TIMEOUT)", "{0}' \".format(filename) output = kubectl_pod_exec(pod, cmd_get_content) assert output.strip().decode('utf-8') == content", "time.sleep(2) list = p_client.list_persistent_volume_claim(uuid=pvc_object.uuid).data assert len(list) == 1 pvc =", "check_cluster_state(etcd_count): css_resp = execute_kubectl_cmd(\"get cs\") css = css_resp[\"items\"] components =", "assert p.state == 'active' return set_pspt_for_project(p, client, pspt) def set_pspt_for_project(project,", "cluster = validate_cluster_state( client, cluster, check_intermediate_state=check_intermediate_state, intermediate_state=intermediate_state, nodes_not_in_active_state=nodes_not_in_active_state) # Create", "False except requests.ConnectionError: print(\"Connection Error - \" + url) return", "while pv.state != \"available\": if time.time() - start > timeout:", "> 0: curl_args += \" --header 'Host: \" + host", "c_client = get_cluster_client_for_token(cluster, token) ns = create_ns(c_client, cluster, p, ns_name)", "+ \" -n \" + ns_name pods_result = execute_kubectl_cmd(get_pods) assert", "== 'active' return p def create_project_with_pspt(client, cluster, pspt): p =", "url += public_endpoint[\"path\"] time.sleep(10) validate_http_response(url, target_name_list) def get_target_names(p_client, workloads): pods", "-n \" + ns_name) if type == \"deployment\" or type", "+ workload.publicEndpoints[0][\"addresses\"][0] + \":\" url = url + str(workload.publicEndpoints[0][\"port\"]) fqdn_available", "\" -n \" + ns_name execute_kubectl_cmd(get_pods) pods_result = execute_kubectl_cmd(get_pods) assert", "--\" + role additional_options = \" --address \" + node.public_ip_address", "\"/bin/bash -c 'cat {0}' \".format(filename) output = kubectl_pod_exec(pod, cmd_get_content) assert", "\\ public_endpoint[\"hostname\"] if \"path\" in public_endpoint.keys(): url += public_endpoint[\"path\"] time.sleep(10)", "+ \"'\\n\" env_details += \"env.ADMIN_TOKEN='\" + ADMIN_TOKEN + \"'\\n\" env_details", "\" + pod.name + \" -n \" + pod.namespaceId +", "time.time() resource = client.reload(resource) while not check_function(resource): if time.time() -", "be available\") time.sleep(.5) workload_list = p_client.list_workload(uuid=workload.uuid).data assert len(workload_list) == 1", "expected_node_count, timeout=300): start = time.time() nodes = client.list_node(clusterId=cluster.id).data node_count =", "for ' + resource.baseType + \\ ' to satisfy condition:", "time.time() nodes = client.list_node(uuid=uuid).data node_count = len(nodes) while node_count !=", "clusters = client.list_cluster().data else: clusters.append(client.list_cluster(name=CLUSTER_NAME).data) clusters.append(client.list_cluster(name=CLUSTER_NAME_2).data) assert len(clusters) == 2", "try: requests.get(url) return False except requests.ConnectionError: print(\"Connection Error - \"", "for [{}:{}] for condition after {}' \\ ' seconds'.format(obj.type, obj.id,", "+ str(response)) if allow_connectivity: assert pod_ip in str(response) and \"", "timeout=DEFAULT_TIMEOUT): start = time.time() pods = client.list_pod(uuid=pod.uuid).data assert len(pods) ==", "== 1 app = mcapp[0] return app def wait_for_mcapp_cluster_level_to_active(client, app_id,", "mcapps = client.list_multiClusterApp(uuid=multiClusterApp.uuid, name=multiClusterApp.name).data start = time.time() assert len(mcapps) ==", "assert project.state == 'active' return project def create_ns(client, cluster, project,", "in pods: target_name_list.append(pod.name) print(\"target name list:\" + str(target_name_list)) for node", "return result def run_command(command): return subprocess.check_output(command, shell=True, text=True) def run_command_with_stderr(command):", "return for key, value in workload.workloadLabels.items(): label = key +", "\"Timed out waiting for state to get to active\") time.sleep(.5)", "in pods[\"items\"]: curpodnames.append(pod[\"metadata\"][\"name\"]) for expectedpod in expectedpods[\"items\"]: assert expectedpod[\"metadata\"][\"name\"] in", "path, insecure_redirect=False): time.sleep(10) curl_args = \" \" if (insecure_redirect): curl_args", "project, ns = create_project_and_ns(ADMIN_TOKEN, cluster) p_client = get_project_client_for_token(project, ADMIN_TOKEN) con", "client.list_multiClusterApp(uuid=multiClusterApp.uuid, name=multiClusterApp.name).data assert len(multiclusterapps) == 1 mapp = multiclusterapps[0] return", "p_client.list_pod(workloadId=workload.id).data target_name_list = [] for pod in pods: target_name_list.append(pod[\"name\"]) curl_cmd", "for pod in pods: target_name_list.append(pod.name) print(\"target name list:\" + str(target_name_list))", "lambda x: 'State is: ' + x.state, timeout=MACHINE_TIMEOUT) assert cluster.state", "for i in range(1, count): if len(target_hit_list) == 0: break", "rancher from rancher import ApiError from lib.aws import AmazonWebServices DEFAULT_TIMEOUT", "= '{0}.{1}.svc.cluster.local'.format( record[\"name\"], record[\"namespaceId\"]) validate_dns_entry(pod, host, expected) def validate_dns_entry(pod, host,", "sleep = 0.01 while True: time.sleep(sleep) sleep *= 2 if", "= execute_kubectl_cmd(get_pods) curpodnames = [] for pod in pods[\"items\"]: curpodnames.append(pod[\"metadata\"][\"name\"])", "time.time() fqdn_available = False url = None while not fqdn_available:", "= get_client_for_token(token) p = create_project(client, cluster, project_name) c_client = get_cluster_client_for_token(cluster,", "wl_name, mount_path, sub_path, is_daemonSet=False): volumes = [{\"type\": \"volume\", \"name\": \"vol1\",", "assert workload.containers[0].image == expectedImage validate_pod_images(expectedImage, workload, ns.name) def execute_kubectl_cmd(cmd, json_out=True,", "range(0, numofpods - 1): pod = pods[\"items\"][x] podimage = pod[\"spec\"][\"containers\"][0][\"image\"]", "Daemon set workload and have an Ingress with Workload #", "> timeout: raise AssertionError( \"Timed out waiting for endpoint to", "= list[0] while pv.state != \"available\": if time.time() - start", "output = kubectl_pod_exec(pod, cmd_write) assert output.strip().decode('utf-8') == \"\" def validate_file_content(pod,", "pods[\"items\"][x] podimage = pod[\"spec\"][\"containers\"][0][\"image\"] def wait_for_pods_in_workload(p_client, workload, pod_count, timeout=DEFAULT_TIMEOUT): start", "p.state == 'active' return p def create_project_with_pspt(client, cluster, pspt): p", "[] control_nodes = [] worker_nodes = [] node_list = []", "p = pods[0] while p.state != \"running\": if time.time() -", "timeout: raise AssertionError( \"Timed out waiting for endpoint to be", "rules=[rule]) wait_for_ingress_to_active(p_client, ingress) validate_ingress(p_client, cluster, [workload], host, path) return cluster", "!= pod_count: if time.time() - start > timeout: raise AssertionError(", "> timeout: if timeout_message: raise Exception(timeout_message) else: raise Exception('Timeout waiting", "subjectKind=\"User\", userId=user.id) return crtb def assign_members_to_project(client, user, project, role_template_id): prtb", "uuid = node.uuid start = time.time() nodes = client.list_node(uuid=uuid).data node_count", "not check_function(resource): if time.time() - start > timeout: exceptionMsg =", "len(workloads) == 1 wl = workloads[0] while wl.transitioning != state:", "client, cluster, check_intermediate_state=check_intermediate_state, intermediate_state=intermediate_state, nodes_not_in_active_state=nodes_not_in_active_state) # Create Daemon set workload", "workload, expectedImage, ns): workload = client.list_workload(uuid=workload.uuid).data[0] assert workload.containers[0].image == expectedImage", "wait_for_pod_images(p_client, workload, ns_name, expectedimage, numofpods, timeout=DEFAULT_TIMEOUT): start = time.time() for", "workloads = client.list_workload(uuid=workload.uuid).data assert len(workloads) == 1 wl = workloads[0]", "cluster) p_client = get_project_client_for_token(project, ADMIN_TOKEN) con = [{\"name\": \"test1\", \"image\":", "containers=con, namespaceId=ns.id, daemonSetConfig={}) validate_workload(p_client, workload, \"daemonSet\", ns.name, len(get_schedulable_nodes(cluster))) if not", "= 'Timeout waiting for [{}:{}] for condition after {}' \\", "pods_result[\"items\"]: assert pod[\"status\"][\"phase\"] == \"Running\" return pods_result[\"items\"] def validate_workload_with_sidekicks(p_client, workload,", "= ingresses[0] while wl.state != \"active\": if time.time() - start", "= \"\" start = time.time() while not fqdn_available: if time.time()", "in cluster_configs: if cluster_config in cluster: return cluster_config return \"Imported\"", "volumeMounts }] if is_daemonSet: workload = p_client.create_workload(name=wl_name, containers=con, namespaceId=ns_id, volumes=volumes,", "host = \"test\" + str(random_int(10000, 99999)) + \".com\" path =", "assert wl_result[\"status\"][\"readyReplicas\"] == pod_count for key, value in workload.workloadLabels.items(): label", "len(workloads) == 1 wl = workloads[0] while wl.state != \"active\":", "get_target_names(p_client, [workload]) wait_until_lb_is_active(url) validate_http_response(url + \"/name.html\", target_name_list) def validate_nodePort(p_client, workload,", "for public_endpoint in ingress.publicEndpoints: if public_endpoint[\"hostname\"].startswith(ingress.name): fqdn_available = True url", "pod in pods: target_name_list.append(pod[\"name\"]) curl_cmd = \"http://\" + cluster_ip +", "condition: ' + \\ inspect.getsource(check_function) if fail_handler: exceptionMsg = exceptionMsg", "in pod[\"status\"][\"containerStatuses\"][0][\"state\"] assert \"running\" in pod[\"status\"][\"containerStatuses\"][1][\"state\"] def validate_workload_paused(p_client, workload, expectedstatus):", "podimage = pod[\"spec\"][\"containers\"][0][\"image\"] while podimage != expectedimage: if time.time() -", "\" + workload.name + \" -n \" + ns_name) if", "= get_target_names(p_client, [workload]) wait_until_lb_is_active(url) validate_http_response(url + \"/name.html\", target_name_list) def validate_nodePort(p_client,", "assert len(mcapps) == 1 mapp = mcapps[0] print(mapp.state) while mapp.state", "validate_workload(p_client, workload, \"daemonSet\", ns.name, len(get_schedulable_nodes(cluster))) if not skipIngresscheck: host =", "assert len(list) == 1 pv = list[0] while pv.state !=", "\"controlplane\"] cluster_tokens = client.list_cluster_registration_token( clusterId=cluster.id).data if len(cluster_tokens) > 0: cluster_token", "= control_nodes if role == \"worker\": node_list = worker_nodes return", "\"Custom\"]: nodes = client.list_node(clusterId=cluster.id).data filters = [ {'Name': 'tag:Name', 'Values':", "\"available\": if time.time() - start > timeout: raise AssertionError( \"Timed", "part of # nodepools if node_count == 1: node_status =", "99999)) def get_admin_client(): return rancher.Client(url=CATTLE_API_URL, token=ADMIN_TOKEN, verify=False) def get_client_for_token(token): return", "time.time() pods = client.list_pod(uuid=pod.uuid).data assert len(pods) == 1 p =", "len(mcapps) == 1 mapp = mcapps[0] while mapp.state != \"active\":", "client.reload(resource) return resource def wait_for(callback, timeout=DEFAULT_TIMEOUT, timeout_message=None): start = time.time()", "p = wait_until_available(client, p) assert p.state == 'active' return p", "client.update( prtb, roleTemplateId=role_template_id, userId=user.id) return prtb def create_kubeconfig(cluster): generateKubeConfigOutput =", "if hasattr(cluster, 'rancherKubernetesEngineConfig'): check_cluster_state(len(get_role_nodes(cluster, \"etcd\"))) project, ns = create_project_and_ns(ADMIN_TOKEN, cluster)", "ns_name execute_kubectl_cmd(get_pods) pods_result = execute_kubectl_cmd(get_pods) assert len(pods_result[\"items\"]) == pod_count for", "pointing to this daemonset create_kubeconfig(cluster) if k8s_version != \"\": check_cluster_version(cluster,", "exceptionMsg + fail_handler(resource) raise Exception(exceptionMsg) time.sleep(.5) resource = client.reload(resource) return", "subprocess.check_output(command, shell=True, text=True) def run_command_with_stderr(command): try: output = subprocess.check_output(command, shell=True,", "!= \"running\": if time.time() - start > timeout: raise AssertionError(", "= version[:version.find(\"-\")] k8s_version = execute_kubectl_cmd(\"version\") kubectl_k8s_version = k8s_version[\"serverVersion\"][\"gitVersion\"] assert kubectl_k8s_version", "os.path.join(os.path.dirname(os.path.realpath(__file__)), \"k8s_kube_config\") MACHINE_TIMEOUT = float(os.environ.get('RANCHER_MACHINE_TIMEOUT', \"1200\")) TEST_IMAGE = \"sangeetha/mytestcontainer\" CLUSTER_NAME", "time.sleep(.5) resource = client.reload(resource) return resource def wait_for(callback, timeout=DEFAULT_TIMEOUT, timeout_message=None):", "timeout=MACHINE_TIMEOUT) assert cluster.state == \"active\" wait_for_nodes_to_become_active(client, cluster, exception_list=nodes_not_in_active_state) return cluster", "file.close() def validate_psp_error_worklaod(p_client, workload, error_message): workload = wait_for_wl_transitioning(p_client, workload) assert", "= client.list_cluster(name=CLUSTER_NAME).data assert len(clusters) > 0 cluster = clusters[0] return", "\"kubectl version: \" + kubectl_k8s_version + \\ \" Expected: \"", "validate_workload(p_client, workload, type, ns_name, pod_count=1, wait_for_cron_pods=60): workload = wait_for_wl_to_active(p_client, workload)", "assert len(ingresses) == 1 wl = ingresses[0] while wl.state !=", "is None or ret is False: time.sleep(.5) if time.time() -", "create_ns(client, cluster, project, ns_name=None): if ns_name is None: ns_name =", "time.sleep(2) list = c_client.list_persistent_volume(uuid=pv_object.uuid).data assert len(list) == 1 pv =", "return client, cluster def validate_cluster_state(client, cluster, check_intermediate_state=True, intermediate_state=\"provisioning\", nodes_not_in_active_state=[]): if", "= stdout.readlines() return response def wait_for_ns_to_become_active(client, ns, timeout=DEFAULT_TIMEOUT): start =", "\"test1\", \"image\": TEST_IMAGE}] name = random_test_name(\"default\") workload = p_client.create_workload(name=name, containers=con,", "len(nodes) def get_custom_host_registration_cmd(client, cluster, roles, node): allowed_roles = [\"etcd\", \"worker\",", "client, cluster, lambda x: x.state == intermediate_state, lambda x: 'State", "satisfy condition: ' + \\ inspect.getsource(check_function) if fail_handler: exceptionMsg =", "waiting for state to get to active\") time.sleep(.5) workloads =", "1 app = mcapp[0] return app def wait_for_mcapp_cluster_level_to_active(client, app_id, timeout=DEFAULT_MULTI_CLUSTER_APP_TIMEOUT):", "to get to active\") time.sleep(.5) workloads = client.list_workload(uuid=workload.uuid).data assert len(workloads)", "cmd): command = \"exec \" + pod.name + \" -n", "p, ns_name) return p, ns def create_project(client, cluster, project_name=None): if", "\"Imported\" def delete_cluster(client, cluster): nodes = client.list_node(clusterId=cluster.id).data # Delete Cluster", "expectedpods[\"items\"]: assert expectedpod[\"metadata\"][\"name\"] in curpodnames def validate_workload_image(client, workload, expectedImage, ns):", "'State is: ' + x.state, timeout=MACHINE_TIMEOUT) assert cluster.state == intermediate_state", "ssh = paramiko.SSHClient() ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) ssh.connect(ip, username=\"root\", password=password, port=port) stdin, stdout,", "\"wget -qO- \" + cmd result = kubectl_pod_exec(client_pod, wget_cmd) result", "Imported and Custom Cluster if (len(nodes) > 0): cluster_type =", "shell=True, stderr=subprocess.PIPE) returncode = 0 except subprocess.CalledProcessError as e: output", "p_client.create_workload(name=wl_name, containers=con, namespaceId=ns_id, volumes=volumes, daemonSetConfig={}) else: workload = p_client.create_workload(name=wl_name, containers=con,", "if node.requestedHostname not in exception_list: node = wait_for_node_status(client, node, \"active\")", "\"get pods -l\" + label + \" -n \" +", "assert output.strip().decode('utf-8') == content def wait_for_mcapp_to_active(client, multiClusterApp, timeout=DEFAULT_MULTI_CLUSTER_APP_TIMEOUT): print(\"\\nuuid:\") print(multiClusterApp.uuid)", "= client.list_namespace(uuid=ns.uuid).data assert len(nss) == 1 ns = nss[0] while", "-n \" + pod.namespaceId + \" -- \" + cmd", "nodes[0].nodeTemplateId is None: return \"Custom\" for cluster_config in cluster_configs: if", "curl_args = \" \" if (insecure_redirect): curl_args = \" -L", "ingress, workloads, timeout=300): target_name_list = get_target_names(p_client, workloads) start = time.time()", "+ kubectl_k8s_version + \\ \" Expected: \" + expected_k8s_version def", "node_count = len(nodes) # Handle the case of nodes getting", "are part of # nodepools if node_count == 1: node_status", "- start if delta > timeout: msg = 'Timeout waiting", "pspt): project.setpodsecuritypolicytemplate(podSecurityPolicyTemplateId=pspt.id) project = wait_until_available(client, project) assert project.state == 'active'", "os.path.join( os.path.dirname(os.path.realpath(__file__)), \"rancher_env.config\") CLUSTER_NAME_2 = \"\" def random_str(): return 'random-{0}-{1}'.format(random_num(),", "label + \" -n \" + ns_name pods = execute_kubectl_cmd(get_pods)", "Delete nodes(in cluster) from AWS for Imported and Custom Cluster", "return client, clusters def validate_multi_cluster_app_cluster(app_id1, app_id2, p_client1, p_client2): validate_mcapp_cluster(app_id1, p_client1)", "subprocess import time import requests import ast import paramiko import", "len(workload_list) == 1 workload = workload_list[0] if hasattr(workload, 'publicEndpoints'): assert", "return subprocess.check_output(command, shell=True, text=True) def run_command_with_stderr(command): try: output = subprocess.check_output(command,", "p.state == 'active' return set_pspt_for_project(p, client, pspt) def set_pspt_for_project(project, client,", "[{\"name\": \"test1\", \"image\": TEST_IMAGE}] name = random_test_name(\"default\") workload = p_client.create_workload(name=name,", "os.environ.get('CATTLE_TEST_URL', \"http://localhost:80\") ADMIN_TOKEN = os.environ.get('ADMIN_TOKEN', \"None\") CATTLE_API_URL = CATTLE_TEST_URL +", "clusterId=cluster.id).data if len(cluster_tokens) > 0: cluster_token = cluster_tokens[0] else: cluster_token", "if time.time() - start > timeout: if timeout_message: raise Exception(timeout_message)", "raise AssertionError( \"Timed out waiting for correct pod images\") time.sleep(.5)", "for state to get to bound\") time.sleep(.5) list = p_client.list_persistent_volume_claim(uuid=pvc_object.uuid).data", "or CLUSTER_NAME_2 == \"\": clusters = client.list_cluster().data else: clusters.append(client.list_cluster(name=CLUSTER_NAME).data) clusters.append(client.list_cluster(name=CLUSTER_NAME_2).data)", "print(workload.transitioningMessage) assert error_message in workload.transitioningMessage def validate_workload(p_client, workload, type, ns_name,", "to active\") time.sleep(5) nodes = client.list_node(uuid=uuid).data node_count = len(nodes) if", "for cluster_config in cluster_configs: if cluster_config in cluster: return cluster_config", "try: obj = client.reload(obj) except ApiError as e: if e.error.status", "pod in pods: target_name_list.append(pod.name) print(\"target name list:\" + str(target_name_list)) for", "= client.reload(resource) while not check_function(resource): if time.time() - start >", "client.reload(resource) while not check_function(resource): if time.time() - start > timeout:", "\"'\" nodes = get_schedulable_nodes(cluster) target_name_list = get_target_names(p_client, workloads) for node", "print(multiClusterApp.uuid) time.sleep(5) mcapps = client.list_multiClusterApp(uuid=multiClusterApp.uuid, name=multiClusterApp.name).data start = time.time() assert", "= time.time() for key, value in workload.workloadLabels.items(): label = key", "check_connectivity_between_workload_pods(p_client, workload): pods = p_client.list_pod(workloadId=workload.id).data for pod in pods: for", "out waiting for state to get to active\") time.sleep(.5) workloads", "k8s_version=\"\"): cluster = validate_cluster_state( client, cluster, check_intermediate_state=check_intermediate_state, intermediate_state=intermediate_state, nodes_not_in_active_state=nodes_not_in_active_state) #", "get_role_nodes(cluster, role): etcd_nodes = [] control_nodes = [] worker_nodes =", "host, expected) def validate_dns_entry(pod, host, expected): # requires pod with", "= \"/bin/bash -c 'echo {1} > {0}'\".format(filename, content) output =", "\" + ns_name) assert wl_result[\"status\"][\"readyReplicas\"] == pod_count for key, value", "filters = [ {'Name': 'tag:Name', 'Values': ['testcustom*', 'teststess*']}] ip_filter =", "host_ip = node.externalIpAddress cmd = curl_args + \" http://\" +", "ns.name, len(get_schedulable_nodes(cluster))) if not skipIngresscheck: host = \"test\" + str(random_int(10000,", "\" + cmd return execute_kubectl_cmd(command, json_out=False, stderr=True) def exec_shell_command(ip, port,", "cluster, project_name=None, ns_name=None): client = get_client_for_token(token) p = create_project(client, cluster,", "nodes: host_ip = node.externalIpAddress cmd = curl_args + \" http://\"", "+ type + \" \" + workload.name + \" -n", "if k8s_version != \"\": check_cluster_version(cluster, k8s_version) if hasattr(cluster, 'rancherKubernetesEngineConfig'): check_cluster_state(len(get_role_nodes(cluster,", "p def get_schedulable_nodes(cluster): client = get_admin_client() nodes = client.list_node(clusterId=cluster.id).data schedulable_nodes", "mcapp = p_client.list_app(name=app_id).data assert len(mcapp) == 1 app = mcapp[0]", "pod[\"status\"][\"containerStatuses\"][1][\"state\"] def validate_workload_paused(p_client, workload, expectedstatus): workloadStatus = p_client.list_workload(uuid=workload.uuid).data[0].paused assert workloadStatus", "= client.list_pod(uuid=pod.uuid).data assert len(pods) == 1 p = pods[0] return", "\\t\" + result) assert result in target_name_list if result in", "state to get to active\") time.sleep(.5) workloads = client.list_workload(uuid=workload.uuid).data assert", "p_client.list_workload(uuid=workload.uuid).data assert len(workload_list) == 1 workload = workload_list[0] if hasattr(workload,", "c_client = rancher.Client(url=c_url, token=token, verify=False) return c_client def up(cluster, token):", "2 assert \"running\" in pod[\"status\"][\"containerStatuses\"][0][\"state\"] assert \"running\" in pod[\"status\"][\"containerStatuses\"][1][\"state\"] def", "\"error\" print(workload.transitioningMessage) assert error_message in workload.transitioningMessage def validate_workload(p_client, workload, type,", "'rancherKubernetesEngineConfig'): check_cluster_state(len(get_role_nodes(cluster, \"etcd\"))) project, ns = create_project_and_ns(ADMIN_TOKEN, cluster) p_client =", "\"type\": \"persistentVolumeClaimVolumeSource\", \"persistentVolumeClaimId\": pvc_name }}] volumeMounts = [{\"readOnly\": \"False\", \"type\":", "timeout=300): start = time.time() nodes = client.list_node(clusterId=cluster.id).data node_count = len(nodes)", "False url = None while not fqdn_available: if time.time() -", "auto deleted when they are part of # nodepools if", "\"persistentVolumeClaim\": { \"readOnly\": \"false\", \"type\": \"persistentVolumeClaimVolumeSource\", \"persistentVolumeClaimId\": pvc_name }}] volumeMounts", "return \"Imported\" def delete_cluster(client, cluster): nodes = client.list_node(clusterId=cluster.id).data # Delete", "\"env.CATTLE_TEST_URL='\" + CATTLE_TEST_URL + \"'\\n\" env_details += \"env.ADMIN_TOKEN='\" + ADMIN_TOKEN", "+ str(random_int(10000, 99999)) def create_project_and_ns(token, cluster, project_name=None, ns_name=None): client =", "wait_for_ns_to_become_active(client, ns, timeout=DEFAULT_TIMEOUT): start = time.time() time.sleep(2) nss = client.list_namespace(uuid=ns.uuid).data", "for pod in test_pods: validate_http_response(curl_cmd, target_name_list, pod) def wait_for_pv_to_be_available(c_client, pv_object,", "for state to get to active\") time.sleep(.5) multiclusterapps = client.list_multiClusterApp(uuid=multiClusterApp.uuid,", "mapp = mcapps[0] while mapp.state != \"active\": if time.time() -", "== \"\" or CLUSTER_NAME_2 == \"\": clusters = client.list_cluster().data else:", "return mapp def validate_mcapp_cluster(app_id, p_client): mcapp = p_client.list_app(name=app_id).data assert len(mcapp)", "= CATTLE_TEST_URL + \"/v3\" kube_fname = os.path.join(os.path.dirname(os.path.realpath(__file__)), \"k8s_kube_config\") MACHINE_TIMEOUT =", "o_pod in wl2_pods: check_connectivity_between_pods(pod, o_pod, allow_connectivity) def check_connectivity_between_workload_pods(p_client, workload): pods", "timeout=DEFAULT_TIMEOUT): wait_for(lambda: client.reload(obj).state == state, timeout) return client.reload(obj) def wait_for_condition(client,", "validate_lb(p_client, workload): url = get_endpoint_url_for_workload(p_client, workload) target_name_list = get_target_names(p_client, [workload])", "for pod in pods_result[\"items\"]: assert pod[\"status\"][\"phase\"] == \"Running\" assert len(pod[\"status\"][\"containerStatuses\"])", "import ast import paramiko import rancher from rancher import ApiError", "else: wget_cmd = \"wget -qO- \" + cmd result =", "+ \"/v3\" kube_fname = os.path.join(os.path.dirname(os.path.realpath(__file__)), \"k8s_kube_config\") MACHINE_TIMEOUT = float(os.environ.get('RANCHER_MACHINE_TIMEOUT', \"1200\"))", "= [] for node in nodes: if node.worker: schedulable_nodes.append(node) return", "while ns.state != \"active\": if time.time() - start > timeout:", "= kubectl_pod_exec(pod, cmd) ping_validation_pass = False for expected_value in expected:", "host, \"paths\": [{\"workloadIds\": [workload.id], \"targetPort\": \"80\"}]} ingress = p_client.create_ingress(name=name, namespaceId=ns.id,", "to satisfy condition: ' + \\ inspect.getsource(check_function) if fail_handler: exceptionMsg", "json_out=False, stderr=True) def exec_shell_command(ip, port, cmd, password): ssh = paramiko.SSHClient()", "cluster_token = create_custom_host_registration_token(client, cluster) cmd = cluster_token.nodeCommand for role in", "validate_mcapp_cluster(app_id1, p_client1) if app_id2 != \"\": validate_mcapp_cluster(app_id2, p_client2) # verify", "userId=user.id) return crtb def change_member_role_in_project(client, user, prtb, role_template_id): prtb =", "1 {0}'.format(host) ping_output = kubectl_pod_exec(pod, cmd) ping_validation_pass = False for", "if stderr: result = run_command_with_stderr(command) else: result = run_command(command) if", "cluster_token = client.create_cluster_registration_token( clusterId=cluster.id) cluster_token = client.wait_success(cluster_token) assert cluster_token.state ==", "delta) raise Exception(msg) def delete_node(aws_nodes): for node in aws_nodes: AmazonWebServices().delete_node(node)", "workload and have an Ingress with Workload # rule pointing", "= p_client.list_app(name=app_id).data assert len(mcapp) == 1 app = mcapp[0] return", "timeout) return client.reload(obj) def wait_for_condition(client, resource, check_function, fail_handler=None, timeout=DEFAULT_TIMEOUT): start", "get_admin_client() nodes = client.list_node(clusterId=cluster.id).data for node in nodes: if node.etcd:", "0 print(ip_filter) aws_nodes = AmazonWebServices().get_nodes(filters) for node in aws_nodes: print(node.public_ip_address)", "obj delta = time.time() - start if delta > timeout:", "get to active\") time.sleep(.5) pods = client.list_pod(uuid=pod.uuid).data assert len(pods) ==", "if hasattr(ingress, 'publicEndpoints'): for public_endpoint in ingress.publicEndpoints: if public_endpoint[\"hostname\"].startswith(ingress.name): fqdn_available", "[workload]) wait_until_lb_is_active(url) validate_http_response(url + \"/name.html\", target_name_list) def validate_nodePort(p_client, workload, cluster):", "filters.append(ip_filter) for node in nodes: ip_list.append(node.externalIpAddress) assert len(ip_filter) > 0", "for the first pod to get created after # scheduled", "= execute_kubectl_cmd(\"get cs\") css = css_resp[\"items\"] components = [\"scheduler\", \"controller-manager\"]", "assert len(mcapp) == 1 app = mcapp[0] return app def", "role): etcd_nodes = [] control_nodes = [] worker_nodes = []", "cluster) cmd = cluster_token.nodeCommand for role in roles: assert role", "timeout=DEFAULT_TIMEOUT): start = time.time() time.sleep(2) list = p_client.list_persistent_volume_claim(uuid=pvc_object.uuid).data assert len(list)", "= client.create_cluster_role_template_binding( clusterId=cluster.id, roleTemplateId=role_template_id, subjectKind=\"User\", userId=user.id) return crtb def assign_members_to_project(client,", "}}] volumeMounts = [{\"readOnly\": \"False\", \"type\": \"volumeMount\", \"mountPath\": mount_path, \"subPath\":", "is: \", target_hit_list) assert len(target_hit_list) == 0 def validate_cluster(client, cluster,", "= client.list_app(name=app_id).data start = time.time() assert len(mcapps) == 1 mapp", "target_name_list) def validate_nodePort(p_client, workload, cluster): source_port = workload.publicEndpoints[0][\"port\"] nodes =", "0% packet loss\" in str(ping_output) dig_cmd = 'dig {0} +short'.format(host)", "state: if time.time() - start > timeout: raise AssertionError( \"Timed", "user, prtb, role_template_id): prtb = client.update( prtb, roleTemplateId=role_template_id, userId=user.id) return", "] if \"rancherKubernetesEngineConfig\" in cluster: nodes = client.list_node(clusterId=cluster.id).data if len(nodes)", "node_auto_deleted = True retry_count += 1 print(\"Retry Count:\" + str(retry_count))", "p_client2.list_pod(workloadId=workload2.id).data for pod in wl1_pods: for o_pod in wl2_pods: check_connectivity_between_pods(pod,", "import json import os import random import subprocess import time", "+ node.id) if pod.nodeId == node.id: target_name_list.append(pod.name) break host_ip =", "loss\" in str(ping_output) dig_cmd = 'dig {0} +short'.format(host) dig_output =", "= execute_kubectl_cmd(get_pods) for x in range(0, numofpods - 1): pod", "css_resp[\"items\"] components = [\"scheduler\", \"controller-manager\"] for i in range(0, etcd_count):", "namespaceId=ns_id, volumes=volumes) return workload def write_content_to_file(pod, content, filename): cmd_write =", "not fqdn_available: if time.time() - start > timeout: raise AssertionError(", "prtb = client.create_project_role_template_binding( projectId=project.id, roleTemplateId=role_template_id, subjectKind=\"User\", userId=user.id) return prtb def", "\" 0% packet loss\" in str(response) else: assert pod_ip in", "= time.time() assert len(mcapps) == 1 mapp = mcapps[0] while", "> 0 url = \"http://\" url = url + workload.publicEndpoints[0][\"addresses\"][0]", "= client.list_app(name=app_id).data assert len(apps) == 1 mapp = apps[0] return", "'Timeout waiting for ' + resource.baseType + \\ ' to", "target_name_list.append(pod[\"name\"]) curl_cmd = \"http://\" + cluster_ip + \"/name.html\" for pod", "def validate_multi_cluster_app_cluster(app_id1, app_id2, p_client1, p_client2): validate_mcapp_cluster(app_id1, p_client1) if app_id2 !=", "and \" 100% packet loss\" in str(response) def kubectl_pod_exec(pod, cmd):", "assert len(clusters) == 2 return client, clusters def validate_multi_cluster_app_cluster(app_id1, app_id2,", "'active' return ns def assign_members_to_cluster(client, user, cluster, role_template_id): crtb =", "and Custom Cluster if (len(nodes) > 0): cluster_type = get_cluster_type(client,", "of # nodepools if node_count == 1: node_status = nodes[0].state", "== \"\": clusters = client.list_cluster().data else: clusters = client.list_cluster(name=CLUSTER_NAME).data assert", "to active\") time.sleep(.5) multiclusterapps = client.list_multiClusterApp(uuid=multiClusterApp.uuid, name=multiClusterApp.name).data assert len(multiclusterapps) ==", "loss\" in str(response) def kubectl_pod_exec(pod, cmd): command = \"exec \"", "if timeout_message: raise Exception(timeout_message) else: raise Exception('Timeout waiting for condition')", "cluster) print(cluster_type) if get_cluster_type(client, cluster) in [\"Imported\", \"Custom\"]: nodes =", "TEST_IMAGE, \"volumeMounts\": volumeMounts }] if is_daemonSet: workload = p_client.create_workload(name=wl_name, containers=con,", "label + \" -n \" + ns_name execute_kubectl_cmd(get_pods) pods_result =", "cluster) in [\"Imported\", \"Custom\"]: nodes = client.list_node(clusterId=cluster.id).data filters = [", "= time.time() while check_for_no_access(url): time.sleep(.5) print(\"No access yet\") if time.time()", "validate_hostPort(p_client, workload, source_port, cluster): pods = p_client.list_pod(workloadId=workload.id).data nodes = get_schedulable_nodes(cluster)", "result.decode() result = result.rstrip() print(\"cmd: \\t\" + cmd) print(\"result: \\t\"", "waiting for [{}:{}] for condition after {}' \\ ' seconds'.format(obj.type,", "'active' return p def create_project_with_pspt(client, cluster, pspt): p = client.create_project(name=random_name(),", "mapp.state != \"active\": if time.time() - start > timeout: raise", "wl = ingresses[0] return wl def wait_for_wl_transitioning(client, workload, timeout=DEFAULT_TIMEOUT, state=\"error\"):", "\"/v3\" kube_fname = os.path.join(os.path.dirname(os.path.realpath(__file__)), \"k8s_kube_config\") MACHINE_TIMEOUT = float(os.environ.get('RANCHER_MACHINE_TIMEOUT', \"1200\")) TEST_IMAGE", "timeout=300): start = time.time() while check_for_no_access(url): time.sleep(.5) print(\"No access yet\")", "cluster def validate_cluster_state(client, cluster, check_intermediate_state=True, intermediate_state=\"provisioning\", nodes_not_in_active_state=[]): if check_intermediate_state: cluster", "to check - \" + str(components)) for cs in css:", "return c_client def up(cluster, token): c_url = cluster.links['self'] + '/schemas'", "\", target_hit_list) assert len(target_hit_list) == 0 def validate_cluster(client, cluster, intermediate_state=\"provisioning\",", "None return node def wait_for_node_to_be_deleted(client, node, timeout=300): uuid = node.uuid", "volumes = [{\"type\": \"volume\", \"name\": \"vol1\", \"persistentVolumeClaim\": { \"readOnly\": \"false\",", "assert workload.state == \"active\" # For cronjob, wait for the", "1 wl = workloads[0] return wl def wait_for_pod_to_running(client, pod, timeout=DEFAULT_TIMEOUT):", "= [] worker_nodes = [] node_list = [] client =", "= wait_until_available(client, project) assert project.state == 'active' return project def", "= rancher.Client(url=c_url, token=token, verify=False) return c_client def wait_state(client, obj, state,", "== 1 wl = workloads[0] while wl.state != \"active\": if", "range(0, etcd_count): components.append(\"etcd-\" + str(i)) print(\"components to check - \"", "target_name_list.append(pod.name) print(\"target name list:\" + str(target_name_list)) for node in nodes:", "to get to active\") time.sleep(.5) multiclusterapps = client.list_multiClusterApp(uuid=multiClusterApp.uuid, name=multiClusterApp.name).data assert", "random_name() p = client.create_project(name=project_name, clusterId=cluster.id) time.sleep(5) p = wait_until_available(client, p)", "ping_validation_pass = False for expected_value in expected: if expected_value in", "if type == \"daemonSet\": assert wl_result[\"status\"][\"currentNumberScheduled\"] == pod_count if type", "ret = callback() while ret is None or ret is", "pod1.name + \":\" + str(response)) if allow_connectivity: assert pod_ip in", "workload, ns_name): for key, value in workload.workloadLabels.items(): label = key", "time.sleep(.5) pods = client.list_pod(uuid=pod.uuid).data assert len(pods) == 1 p =", "rest is: \", target_hit_list) assert len(target_hit_list) == 0 def validate_cluster(client,", "workload = wait_for_wl_to_active(p_client, workload) assert workload.state == \"active\" pods =", "len(pods) != pod_count: if time.time() - start > timeout: raise", "if is_daemonSet: workload = p_client.create_workload(name=wl_name, containers=con, namespaceId=ns_id, volumes=volumes, daemonSetConfig={}) else:", "+ str(target_name_list)) return target_name_list def get_endpoint_url_for_workload(p_client, workload, timeout=600): fqdn_available =", "def wait_for_pod_to_running(client, pod, timeout=DEFAULT_TIMEOUT): start = time.time() pods = client.list_pod(uuid=pod.uuid).data", "paramiko import rancher from rancher import ApiError from lib.aws import", "in aws_nodes: AmazonWebServices().delete_node(node) def cluster_cleanup(client, cluster, aws_nodes=None): if RANCHER_CLEANUP_CLUSTER: client.delete(cluster)", "= len(nodes) def get_custom_host_registration_cmd(client, cluster, roles, node): allowed_roles = [\"etcd\",", "role == \"worker\": node_list = worker_nodes return node_list def validate_ingress(p_client,", "worker_nodes return node_list def validate_ingress(p_client, cluster, workloads, host, path, insecure_redirect=False):", "== \"statefulSet\": assert wl_result[\"status\"][\"readyReplicas\"] == pod_count if type == \"daemonSet\":", "+short'.format(host) dig_output = kubectl_pod_exec(pod, dig_cmd) for expected_value in expected: assert", "\" Expected: \" + version expected_k8s_version = version[:version.find(\"-\")] k8s_version =", "if type == \"deployment\" or type == \"statefulSet\": assert wl_result[\"status\"][\"readyReplicas\"]", "out waiting for state to get to active\") time.sleep(.5) apps", "assert len(pods) == pod_count for pod in pods: wait_for_pod_to_running(p_client, pod)", "nodes_not_in_active_state=nodes_not_in_active_state) # Create Daemon set workload and have an Ingress", "not None: delete_node(aws_nodes) else: env_details = \"env.CATTLE_TEST_URL='\" + CATTLE_TEST_URL +", "return crtb def change_member_role_in_project(client, user, prtb, role_template_id): prtb = client.update(", "type == \"deployment\" or type == \"statefulSet\": assert wl_result[\"status\"][\"readyReplicas\"] ==", "validate_workload_with_sidekicks(p_client, workload, type, ns_name, pod_count=1): workload = wait_for_wl_to_active(p_client, workload) assert", "source_port, cluster): pods = p_client.list_pod(workloadId=workload.id).data nodes = get_schedulable_nodes(cluster) for node", "client.reload(obj) def wait_for_condition(client, resource, check_function, fail_handler=None, timeout=DEFAULT_TIMEOUT): start = time.time()", "cluster = clusters[0] return client, cluster def validate_cluster_state(client, cluster, check_intermediate_state=True,", "return project def create_ns(client, cluster, project, ns_name=None): if ns_name is", "-\" + uuid) return None while node_status != state: if", "= p_client.list_pod(workloadId=workload.id).data assert len(pods) == pod_count for pod in pods:", "nodes = client.list_node(uuid=uuid).data node_count = len(nodes) # Handle the case", "print(\"cmd: \\t\" + cmd) print(\"result: \\t\" + result) assert result", "== expectedimage def validate_pods_are_running_by_id(expectedpods, workload, ns_name): for key, value in", "0.01 while True: time.sleep(sleep) sleep *= 2 if sleep >", "return cluster def check_cluster_version(cluster, version): cluster_k8s_version = \\ cluster.appliedSpec[\"rancherKubernetesEngineConfig\"][ \"kubernetesVersion\"]", "\"rancherKubernetesEngineConfig\" in cluster: nodes = client.list_node(clusterId=cluster.id).data if len(nodes) > 0:", "content def wait_for_mcapp_to_active(client, multiClusterApp, timeout=DEFAULT_MULTI_CLUSTER_APP_TIMEOUT): print(\"\\nuuid:\") print(multiClusterApp.uuid) time.sleep(5) mcapps =", "node.externalIpAddress curl_cmd = \" http://\" + host_ip + \":\" +", "node in nodes: host_ip = node.externalIpAddress cmd = curl_args +", "as e: output = e.output returncode = e.returncode print(returncode) return", "name = random_test_name(\"default\") workload = p_client.create_workload(name=name, containers=con, namespaceId=ns.id, daemonSetConfig={}) validate_workload(p_client,", "clusters def validate_multi_cluster_app_cluster(app_id1, app_id2, p_client1, p_client2): validate_mcapp_cluster(app_id1, p_client1) if app_id2", "start = time.time() pods = p_client.list_pod(workloadId=workload.id).data while len(pods) != pod_count:", "condition') ret = callback() return ret def random_name(): return \"test\"", "check_intermediate_state=True, intermediate_state=\"provisioning\", nodes_not_in_active_state=[]): if check_intermediate_state: cluster = wait_for_condition( client, cluster,", "None while not fqdn_available: if time.time() - start > timeout:", "list = c_client.list_persistent_volume(uuid=pv_object.uuid).data assert len(list) == 1 pv = list[0]", "delta > timeout: msg = 'Timeout waiting for [{}:{}] for", "random_name() ns = client.create_namespace(name=ns_name, clusterId=cluster.id, projectId=project.id) wait_for_ns_to_become_active(client, ns) ns =", "in exception_list: node = wait_for_node_status(client, node, \"active\") if node is", "def wait_for_nodes_to_become_active(client, cluster, exception_list=[], retry_count=0): nodes = client.list_node(clusterId=cluster.id).data node_auto_deleted =", "c_client = rancher.Client(url=c_url, token=token, verify=False) return c_client def wait_state(client, obj,", "dig_cmd = 'dig {0} +short'.format(host) dig_output = kubectl_pod_exec(pod, dig_cmd) for", "\"active\" # For cronjob, wait for the first pod to", "workload, ns_name, expectedimage, numofpods, timeout=DEFAULT_TIMEOUT): start = time.time() for key,", "return obj delta = time.time() - start if delta >", "client.list_workload(uuid=workload.uuid).data assert len(workloads) == 1 wl = workloads[0] while wl.state", "+ x.state, timeout=MACHINE_TIMEOUT) assert cluster.state == \"active\" wait_for_nodes_to_become_active(client, cluster, exception_list=nodes_not_in_active_state)", "assert cluster_token.state == 'active' return cluster_token def get_cluster_type(client, cluster): cluster_configs", "timeout: raise AssertionError( \"Timed out waiting for correct pod images\")", "stdout, stderr = ssh.exec_command(cmd) response = stdout.readlines() return response def", "wl_result[\"status\"][\"readyReplicas\"] == pod_count if type == \"daemonSet\": assert wl_result[\"status\"][\"currentNumberScheduled\"] ==", "= 'Timeout waiting for ' + resource.baseType + \\ '", "1 wl = workloads[0] while wl.state != \"active\": if time.time()", "state to get to active\") time.sleep(.5) nodes = client.list_node(uuid=uuid).data node_count", "= client.list_workload(uuid=workload.uuid).data[0] assert workload.containers[0].image == expectedImage validate_pod_images(expectedImage, workload, ns.name) def", "node_count != 0: if time.time() - start > timeout: raise", "os.path.dirname(os.path.realpath(__file__)), \"rancher_env.config\") CLUSTER_NAME_2 = \"\" def random_str(): return 'random-{0}-{1}'.format(random_num(), int(time.time()))", "state to get to active\") time.sleep(.5) pods = p_client.list_pod(workloadId=workload.id).data return", "in workload.workloadLabels.items(): label = key + \"=\" + value get_pods", "if time.time() - start > timeout: raise AssertionError( \"Timed out", "validate_file_content(pod, content, filename): cmd_get_content = \"/bin/bash -c 'cat {0}' \".format(filename)", "while podimage != expectedimage: if time.time() - start > timeout:", "!= \"available\": if time.time() - start > timeout: raise AssertionError(", "len(list) == 1 pv = list[0] return pv def wait_for_pvc_to_be_bound(p_client,", "False: time.sleep(.5) if time.time() - start > timeout: if timeout_message:", "return pvc def create_wl_with_nfs(p_client, ns_id, pvc_name, wl_name, mount_path, sub_path, is_daemonSet=False):", "wait_for(lambda: client.reload(obj).state == state, timeout) return client.reload(obj) def wait_for_condition(client, resource,", "\"cronJob\": time.sleep(wait_for_cron_pods) pods = p_client.list_pod(workloadId=workload.id).data assert len(pods) == pod_count for", "nodes(in cluster) from AWS for Imported and Custom Cluster if", "file = open(env_file, \"w\") file.write(env_details) file.close() def validate_hostPort(p_client, workload, source_port,", "to get to bound\") time.sleep(.5) list = p_client.list_persistent_volume_claim(uuid=pvc_object.uuid).data assert len(list)", "node in nodes: host_ip = node.externalIpAddress curl_cmd = \" http://\"", "def validate_lb(p_client, workload): url = get_endpoint_url_for_workload(p_client, workload) target_name_list = get_target_names(p_client,", "cmd = 'ping -c 1 -W 1 {0}'.format(host) ping_output =", "= True return url def wait_until_lb_is_active(url, timeout=300): start = time.time()", "= True break assert ping_validation_pass is True assert \" 0%", "exist anymore -\" + uuid) return None return node def", "+ \"/name.html\" validate_http_response(curl_cmd, target_name_list) def validate_clusterIp(p_client, workload, cluster_ip, test_pods): pods", "MACHINE_TIMEOUT: raise AssertionError( \"Timed out waiting for state to get", "for workload in workloads: pod_list = p_client.list_pod(workloadId=workload.id).data pods.extend(pod_list) target_name_list =", "= url + str(workload.publicEndpoints[0][\"port\"]) fqdn_available = True return url def", "time.time() ingresses = client.list_ingress(uuid=ingress.uuid).data assert len(ingresses) == 1 wl =", "rule pointing to this daemonset create_kubeconfig(cluster) if k8s_version != \"\":", "client, cluster def validate_cluster_state(client, cluster, check_intermediate_state=True, intermediate_state=\"provisioning\", nodes_not_in_active_state=[]): if check_intermediate_state:", "active') return def check_for_no_access(url): try: requests.get(url) return False except requests.ConnectionError:", "to this daemonset create_kubeconfig(cluster) if k8s_version != \"\": check_cluster_version(cluster, k8s_version)", "\"googleKubernetesEngineConfig\", \"rancherKubernetesEngineConfig\" ] if \"rancherKubernetesEngineConfig\" in cluster: nodes = client.list_node(clusterId=cluster.id).data", "None: project_name = random_name() p = client.create_project(name=project_name, clusterId=cluster.id) time.sleep(5) p", "ns = create_ns(c_client, cluster, p, ns_name) return p, ns def", "control_nodes = [] worker_nodes = [] node_list = [] client", "ip_list = [] ip_filter['Name'] = \\ 'network-interface.addresses.association.public-ip' ip_filter['Values'] = ip_list", "else: env_details = \"env.CATTLE_TEST_URL='\" + CATTLE_TEST_URL + \"'\\n\" env_details +=", "version expected_k8s_version = version[:version.find(\"-\")] k8s_version = execute_kubectl_cmd(\"version\") kubectl_k8s_version = k8s_version[\"serverVersion\"][\"gitVersion\"]", "node_list = worker_nodes return node_list def validate_ingress(p_client, cluster, workloads, host,", "\" -n \" + ns_name) assert wl_result[\"status\"][\"readyReplicas\"] == pod_count for", "pod_count return for key, value in workload.workloadLabels.items(): label = key", "\" -n \" + ns_name) if type == \"deployment\" or", "nodes: if node.requestedHostname not in exception_list: node = wait_for_node_status(client, node,", "components components.remove(component_name) assert cs[\"conditions\"][0][\"status\"] == \"True\" assert cs[\"conditions\"][0][\"type\"] == \"Healthy\"", "check_function(resource): if time.time() - start > timeout: exceptionMsg = 'Timeout", "pod_count: if time.time() - start > timeout: raise AssertionError( \"Timed", "ns def wait_for_pod_images(p_client, workload, ns_name, expectedimage, numofpods, timeout=DEFAULT_TIMEOUT): start =", "def validate_pod_images(expectedimage, workload, ns_name): for key, value in workload.workloadLabels.items(): label", "p_client.list_pod(workloadId=workload.id).data return pods def get_admin_client_and_cluster(): client = get_admin_client() if CLUSTER_NAME", "env_details = \"env.CATTLE_TEST_URL='\" + CATTLE_TEST_URL + \"'\\n\" env_details += \"env.ADMIN_TOKEN='\"", "pod.nodeId == node.id: target_name_list.append(pod.name) break host_ip = node.externalIpAddress curl_cmd =", "def exec_shell_command(ip, port, cmd, password): ssh = paramiko.SSHClient() ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) ssh.connect(ip,", "crtb, role_template_id): crtb = client.update( crtb, roleTemplateId=role_template_id, userId=user.id) return crtb", "workload.transitioning == \"error\" print(workload.transitioningMessage) assert error_message in workload.transitioningMessage def validate_workload(p_client,", "assign_members_to_project(client, user, project, role_template_id): prtb = client.create_project_role_template_binding( projectId=project.id, roleTemplateId=role_template_id, subjectKind=\"User\",", "not in exception_list: node = wait_for_node_status(client, node, \"active\") if node", "roleTemplateId=role_template_id, subjectKind=\"User\", userId=user.id) return crtb def assign_members_to_project(client, user, project, role_template_id):", "env_file = os.path.join( os.path.dirname(os.path.realpath(__file__)), \"rancher_env.config\") CLUSTER_NAME_2 = \"\" def random_str():", "resource, check_function, fail_handler=None, timeout=DEFAULT_TIMEOUT): start = time.time() resource = client.reload(resource)", "cluster, role_template_id): crtb = client.create_cluster_role_template_binding( clusterId=cluster.id, roleTemplateId=role_template_id, subjectKind=\"User\", userId=user.id) return", "target_name_list[:] count = 5 * len(target_name_list) for i in range(1,", "exceptionMsg = exceptionMsg + fail_handler(resource) raise Exception(exceptionMsg) time.sleep(.5) resource =", "= kubectl_pod_exec(pod, dig_cmd) for expected_value in expected: assert expected_value in", "subprocess.CalledProcessError as e: output = e.output returncode = e.returncode print(returncode)", "!= 403: raise e else: return obj delta = time.time()", "'active' return set_pspt_for_project(p, client, pspt) def set_pspt_for_project(project, client, pspt): project.setpodsecuritypolicytemplate(podSecurityPolicyTemplateId=pspt.id)", "apps = client.list_app(name=app_id).data assert len(apps) == 1 mapp = apps[0]", "in str(response) else: assert pod_ip in str(response) and \" 100%", "-n \" + ns_name execute_kubectl_cmd(get_pods) pods_result = execute_kubectl_cmd(get_pods) assert len(pods_result[\"items\"])", "host, path) return cluster def check_cluster_version(cluster, version): cluster_k8s_version = \\", "waiting for state to get to active\") time.sleep(.5) nss =", "deleted when they are part of # nodepools if node_count", "\" + cmd result = run_command(curl_cmd) else: wget_cmd = \"wget", "node, \"active\") if node is None: print(\"Need to re-evalauate new", "0 cluster = clusters[0] return client, cluster def validate_cluster_state(client, cluster,", "if get_cluster_type(client, cluster) in [\"Imported\", \"Custom\"]: nodes = client.list_node(clusterId=cluster.id).data filters", "ns_name pods = execute_kubectl_cmd(get_pods) curpodnames = [] for pod in", "= e.returncode print(returncode) return (output, returncode) def wait_for_wl_to_active(client, workload, timeout=DEFAULT_TIMEOUT):", "target_name_list) def validate_clusterIp(p_client, workload, cluster_ip, test_pods): pods = p_client.list_pod(workloadId=workload.id).data target_name_list", "Handle the case of nodes getting auto deleted when they", "import random import subprocess import time import requests import ast", "app in cluster is active or not wait_for_mcapp_cluster_level_to_active(p_client1, app_id1) if", "+ \".com\" path = \"/name.html\" rule = {\"host\": host, \"paths\":", "float(os.environ.get('RANCHER_MACHINE_TIMEOUT', \"1200\")) TEST_IMAGE = \"sangeetha/mytestcontainer\" CLUSTER_NAME = os.environ.get(\"RANCHER_CLUSTER_NAME\", \"\") RANCHER_CLEANUP_CLUSTER", "len(target_hit_list) == 0 def validate_cluster(client, cluster, intermediate_state=\"provisioning\", check_intermediate_state=True, skipIngresscheck=True, nodes_not_in_active_state=[],", "Ingress with Workload # rule pointing to this daemonset create_kubeconfig(cluster)", "= rancher.Client(url=p_url, token=token, verify=False) return p_client def get_cluster_client_for_token(cluster, token): c_url", "wait_for_pods_in_workload(p_client, workload, pod_count) assert len(pods) == pod_count for pod in", "get to active\") time.sleep(.5) pods = p_client.list_pod(workloadId=workload.id).data return pods def", "ping_validation_pass is True assert \" 0% packet loss\" in str(ping_output)", "[\"scheduler\", \"controller-manager\"] for i in range(0, etcd_count): components.append(\"etcd-\" + str(i))", "expected_k8s_version, \\ \"kubectl version: \" + kubectl_k8s_version + \\ \"", "pod in pods: wait_for_pod_to_running(p_client, pod) wl_result = execute_kubectl_cmd( \"get \"", "run_command(command): return subprocess.check_output(command, shell=True, text=True) def run_command_with_stderr(command): try: output =", "wait_for_ingress_to_active(p_client, ingress) validate_ingress(p_client, cluster, [workload], host, path) return cluster def", "+ str(i)) print(\"components to check - \" + str(components)) for", "def set_pspt_for_project(project, client, pspt): project.setpodsecuritypolicytemplate(podSecurityPolicyTemplateId=pspt.id) project = wait_until_available(client, project) assert", "host, expected): # requires pod with `dig` available - TEST_IMAGE", "= rancher.Client(url=c_url, token=token, verify=False) return c_client def up(cluster, token): c_url", "= execute_kubectl_cmd(get_pods) for pod in pods[\"items\"]: assert pod[\"spec\"][\"containers\"][0][\"image\"] == expectedimage", "= time.time() ingresses = client.list_ingress(uuid=ingress.uuid).data assert len(ingresses) == 1 wl", "\":\" + \\ str(source_port) + \"/name.html\" validate_http_response(curl_cmd, target_name_list) def validate_lb(p_client,", "= pods[\"items\"][x] podimage = pod[\"spec\"][\"containers\"][0][\"image\"] while podimage != expectedimage: if", "p = create_project(client, cluster, project_name) c_client = get_cluster_client_for_token(cluster, token) ns", "create_project_with_pspt(client, cluster, pspt): p = client.create_project(name=random_name(), clusterId=cluster.id) p = wait_until_available(client,", "pod in pods: for o_pod in pods: check_connectivity_between_pods(pod, o_pod) def", "node_list def validate_ingress(p_client, cluster, workloads, host, path, insecure_redirect=False): time.sleep(10) curl_args", "print(\"No access yet\") if time.time() - start > timeout: raise", "!= \"active\": if time.time() - start > timeout: raise AssertionError(", "pod in wl1_pods: for o_pod in wl2_pods: check_connectivity_between_pods(pod, o_pod, allow_connectivity)", "= ssh.exec_command(cmd) response = stdout.readlines() return response def wait_for_ns_to_become_active(client, ns,", "\"/name.html\" rule = {\"host\": host, \"paths\": [{\"workloadIds\": [workload.id], \"targetPort\": \"80\"}]}", "+ cmd result = run_command(curl_cmd) else: wget_cmd = \"wget -qO-", "kubectl_pod_exec(pod, cmd): command = \"exec \" + pod.name + \"", "hasattr(workload, 'publicEndpoints'): assert len(workload.publicEndpoints) > 0 url = \"http://\" url", "in pods_result[\"items\"]: assert pod[\"status\"][\"phase\"] == \"Running\" return pods_result[\"items\"] def validate_workload_with_sidekicks(p_client,", "inspect import json import os import random import subprocess import", "time.sleep(.5) multiclusterapps = client.list_multiClusterApp(uuid=multiClusterApp.uuid, name=multiClusterApp.name).data assert len(multiclusterapps) == 1 mapp", "node in nodes: if node.worker: schedulable_nodes.append(node) return schedulable_nodes def get_role_nodes(cluster,", "pods def get_admin_client_and_cluster(): client = get_admin_client() if CLUSTER_NAME == \"\":", "def validate_mcapp_cluster(app_id, p_client): mcapp = p_client.list_app(name=app_id).data assert len(mcapp) == 1", "aws_nodes = AmazonWebServices().get_nodes(filters) for node in aws_nodes: print(node.public_ip_address) AmazonWebServices().delete_nodes(aws_nodes) def", "to active\") time.sleep(.5) pods = client.list_pod(uuid=pod.uuid).data assert len(pods) == 1", "def assign_members_to_cluster(client, user, cluster, role_template_id): crtb = client.create_cluster_role_template_binding( clusterId=cluster.id, roleTemplateId=role_template_id,", "obj = client.reload(obj) except ApiError as e: if e.error.status !=", "for Imported and Custom Cluster if (len(nodes) > 0): cluster_type", "expectedstatus def validate_pod_images(expectedimage, workload, ns_name): for key, value in workload.workloadLabels.items():", "result = json.loads(result) print(result) return result def run_command(command): return subprocess.check_output(command,", "+ str(random_int(10000, 99999)) + \".com\" path = \"/name.html\" rule =", "available - TEST_IMAGE host = '{0}.{1}.svc.cluster.local'.format( record[\"name\"], record[\"namespaceId\"]) validate_dns_entry(pod, host,", "record[\"name\"], record[\"namespaceId\"]) validate_dns_entry(pod, host, expected) def validate_dns_entry(pod, host, expected): #", "o_pod) def check_connectivity_between_pods(pod1, pod2, allow_connectivity=True): pod_ip = pod2.status.podIp cmd =", "pod_count if type == \"daemonSet\": assert wl_result[\"status\"][\"currentNumberScheduled\"] == pod_count if", "in str(dig_output) def wait_for_nodes_to_become_active(client, cluster, exception_list=[], retry_count=0): nodes = client.list_node(clusterId=cluster.id).data", "pod.name + \" -n \" + pod.namespaceId + \" --", "if type == \"cronJob\": assert len(wl_result[\"status\"][\"active\"]) >= pod_count return for", "roleTemplateId=role_template_id, userId=user.id) return prtb def create_kubeconfig(cluster): generateKubeConfigOutput = cluster.generateKubeconfig() print(generateKubeConfigOutput.config)", "x: x.state == \"active\", lambda x: 'State is: ' +", "\"active\": print(mapp.uuid) print(mapp.state) if time.time() - start > timeout: raise", "if (insecure_redirect): curl_args = \" -L --insecure \" if len(host)", "True return url def wait_until_lb_is_active(url, timeout=300): start = time.time() while", "1 mapp = mcapps[0] print(mapp.state) while mapp.state != \"active\": print(mapp.uuid)", "+ pod.name + \" -n \" + pod.namespaceId + \"", "state to get to available\") time.sleep(.5) list = c_client.list_persistent_volume(uuid=pv_object.uuid).data assert", "ret = callback() return ret def random_name(): return \"test\" +", "== expected_k8s_version, \\ \"kubectl version: \" + kubectl_k8s_version + \\", "expectedImage, ns): workload = client.list_workload(uuid=workload.uuid).data[0] assert workload.containers[0].image == expectedImage validate_pod_images(expectedImage,", "time.time() workloads = client.list_workload(uuid=workload.uuid).data assert len(workloads) == 1 wl =", "in nodes: ip_list.append(node.externalIpAddress) assert len(ip_filter) > 0 print(ip_filter) aws_nodes =", "con = [{\"name\": \"test1\", \"image\": TEST_IMAGE, \"volumeMounts\": volumeMounts }] if", "waiting for correct pod images\") time.sleep(.5) pods = execute_kubectl_cmd(get_pods) pod", "assert len(ip_filter) > 0 print(ip_filter) aws_nodes = AmazonWebServices().get_nodes(filters) for node", "= client.list_workload(uuid=workload.uuid).data assert len(workloads) == 1 wl = workloads[0] return", "intermediate_state=\"provisioning\", check_intermediate_state=True, skipIngresscheck=True, nodes_not_in_active_state=[], k8s_version=\"\"): cluster = validate_cluster_state( client, cluster,", "def change_member_role_in_cluster(client, user, crtb, role_template_id): crtb = client.update( crtb, roleTemplateId=role_template_id,", "in pods: wait_for_pod_to_running(p_client, pod) wl_result = execute_kubectl_cmd( \"get \" +", "def get_endpoint_url_for_workload(p_client, workload, timeout=600): fqdn_available = False url = \"\"", "state to get to active\") time.sleep(.5) multiclusterapps = client.list_multiClusterApp(uuid=multiClusterApp.uuid, name=multiClusterApp.name).data", "+ str(target_name_list)) for node in nodes: host_ip = node.externalIpAddress curl_cmd", "response = stdout.readlines() return response def wait_for_ns_to_become_active(client, ns, timeout=DEFAULT_TIMEOUT): start", "\"-\" + str(random_int(10000, 99999)) def create_project_and_ns(token, cluster, project_name=None, ns_name=None): client", "nodes = client.list_node(uuid=uuid).data node_count = len(nodes) def wait_for_cluster_node_count(client, cluster, expected_node_count,", "+ label + \" -n \" + ns_name execute_kubectl_cmd(get_pods) pods_result", "does not exist anymore -\" + uuid) return None while", "1 \" + pod_ip response = kubectl_pod_exec(pod1, cmd) print(\"Actual ping", "waiting for state to get to active\") time.sleep(5) nodes =", "cmd def create_custom_host_registration_token(client, cluster): cluster_token = client.create_cluster_registration_token( clusterId=cluster.id) cluster_token =", "in ingress.publicEndpoints: if public_endpoint[\"hostname\"].startswith(ingress.name): fqdn_available = True url = \\", "cs in css: component_name = cs[\"metadata\"][\"name\"] assert component_name in components", "\":\" url = url + str(workload.publicEndpoints[0][\"port\"]) fqdn_available = True return", "wait_for_node_status(client, node, \"active\") if node is None: print(\"Need to re-evalauate", "`dig` available - TEST_IMAGE host = '{0}.{1}.svc.cluster.local'.format( record[\"name\"], record[\"namespaceId\"]) validate_dns_entry(pod,", "def create_project(client, cluster, project_name=None): if project_name is None: project_name =", "AssertionError( \"Timed out waiting for correct pod images\") time.sleep(.5) pods", "components.append(\"etcd-\" + str(i)) print(\"components to check - \" + str(components))", "in nodes: if node.etcd: etcd_nodes.append(node) if node.controlPlane: control_nodes.append(node) if node.worker:", "to get to available\") time.sleep(.5) list = c_client.list_persistent_volume(uuid=pv_object.uuid).data assert len(list)", "role_template_id): crtb = client.create_cluster_role_template_binding( clusterId=cluster.id, roleTemplateId=role_template_id, subjectKind=\"User\", userId=user.id) return crtb", "= len(nodes) while node_count != expected_node_count: if time.time() - start", "\"/name.html\" validate_http_response(curl_cmd, target_name_list) def validate_lb(p_client, workload): url = get_endpoint_url_for_workload(p_client, workload)", "while wl.transitioning != state: if time.time() - start > timeout:", "wait_for_ns_to_become_active(client, ns) ns = client.reload(ns) assert ns.state == 'active' return", "str(source_port) + \"/name.html\" validate_http_response(curl_cmd, target_name_list) def validate_lb(p_client, workload): url =", "target_name_list, pod) def wait_for_pv_to_be_available(c_client, pv_object, timeout=DEFAULT_TIMEOUT): start = time.time() time.sleep(2)", "json' if stderr: result = run_command_with_stderr(command) else: result = run_command(command)", "\\ cluster.appliedSpec[\"rancherKubernetesEngineConfig\"][ \"kubernetesVersion\"] assert cluster_k8s_version == version, \\ \"cluster_k8s_version: \"", "is None: ns_name = random_name() ns = client.create_namespace(name=ns_name, clusterId=cluster.id, projectId=project.id)", "+ ns_name execute_kubectl_cmd(get_pods) pods_result = execute_kubectl_cmd(get_pods) assert len(pods_result[\"items\"]) == pod_count", "e: output = e.output returncode = e.returncode print(returncode) return (output,", "intermediate_state=\"provisioning\", nodes_not_in_active_state=[]): if check_intermediate_state: cluster = wait_for_condition( client, cluster, lambda", "len(list) == 1 pv = list[0] while pv.state != \"available\":", "output.strip().decode('utf-8') == content def wait_for_mcapp_to_active(client, multiClusterApp, timeout=DEFAULT_MULTI_CLUSTER_APP_TIMEOUT): print(\"\\nuuid:\") print(multiClusterApp.uuid) time.sleep(5)", "return p, ns def create_project(client, cluster, project_name=None): if project_name is", "is None: curl_cmd = \"curl \" + cmd result =", "to active\") time.sleep(.5) pods = p_client.list_pod(workloadId=workload.id).data return pods def get_admin_client_and_cluster():", "p_client = get_project_client_for_token(project, ADMIN_TOKEN) con = [{\"name\": \"test1\", \"image\": TEST_IMAGE}]", "None or ret is False: time.sleep(.5) if time.time() - start", "time.sleep(.5) ingress_list = p_client.list_ingress(uuid=ingress.uuid).data assert len(ingress_list) == 1 ingress =", "= client.list_node(clusterId=cluster.id).data schedulable_nodes = [] for node in nodes: if", "\" -n \" + ns_name pods = execute_kubectl_cmd(get_pods) for pod", "the rest is: \", target_hit_list) assert len(target_hit_list) == 0 def", "start > timeout: raise AssertionError( \"Timed out waiting for correct", "to get to active\") time.sleep(.5) nodes = client.list_node(clusterId=cluster.id).data node_count =", "fqdn_available = True url = \\ public_endpoint[\"protocol\"].lower() + \"://\" +", "version[:version.find(\"-\")] k8s_version = execute_kubectl_cmd(\"version\") kubectl_k8s_version = k8s_version[\"serverVersion\"][\"gitVersion\"] assert kubectl_k8s_version ==", "\" + node.public_ip_address + \\ \" --internal-address \" + node.private_ip_address", "# Delete Cluster client.delete(cluster) # Delete nodes(in cluster) from AWS", "str(random_int(10000, 99999)) def create_project_and_ns(token, cluster, project_name=None, ns_name=None): client = get_client_for_token(token)", "= client.list_cluster_registration_token( clusterId=cluster.id).data if len(cluster_tokens) > 0: cluster_token = cluster_tokens[0]", "'Timeout waiting for [{}:{}] for condition after {}' \\ '", "break assert ping_validation_pass is True assert \" 0% packet loss\"", "workload.workloadLabels.items(): label = key + \"=\" + value get_pods =", "== 1 workload = workload_list[0] if hasattr(workload, 'publicEndpoints'): assert len(workload.publicEndpoints)", "def wait_for_pvc_to_be_bound(p_client, pvc_object, timeout=DEFAULT_TIMEOUT): start = time.time() time.sleep(2) list =", "assert len(wl_result[\"status\"][\"active\"]) >= pod_count return for key, value in workload.workloadLabels.items():", "= os.path.join( os.path.dirname(os.path.realpath(__file__)), \"rancher_env.config\") CLUSTER_NAME_2 = \"\" def random_str(): return", "= cluster.generateKubeconfig() print(generateKubeConfigOutput.config) file = open(kube_fname, \"w\") file.write(generateKubeConfigOutput.config) file.close() def", "wl_result[\"status\"][\"currentNumberScheduled\"] == pod_count if type == \"cronJob\": assert len(wl_result[\"status\"][\"active\"]) >=", "nodes = get_schedulable_nodes(cluster) for node in nodes: target_name_list = []", "+ url) return True def validate_http_response(cmd, target_name_list, client_pod=None): target_hit_list =", "fail_handler(resource) raise Exception(exceptionMsg) time.sleep(.5) resource = client.reload(resource) return resource def", "\".format(filename) output = kubectl_pod_exec(pod, cmd_get_content) assert output.strip().decode('utf-8') == content def", "' -o json' if stderr: result = run_command_with_stderr(command) else: result", "node def wait_for_node_to_be_deleted(client, node, timeout=300): uuid = node.uuid start =", "p_client1.list_pod(workloadId=workload1.id).data wl2_pods = p_client2.list_pod(workloadId=workload2.id).data for pod in wl1_pods: for o_pod", "= p_client.list_pod(workloadId=workload.id).data target_name_list = [] for pod in pods: target_name_list.append(pod[\"name\"])", "\"get \" + type + \" \" + workload.name +", "\"None\") CATTLE_API_URL = CATTLE_TEST_URL + \"/v3\" kube_fname = os.path.join(os.path.dirname(os.path.realpath(__file__)), \"k8s_kube_config\")", "have an Ingress with Workload # rule pointing to this", "start = time.time() nodes = client.list_node(clusterId=cluster.id).data node_count = len(nodes) while", "nodes = client.list_node(clusterId=cluster.id).data # Delete Cluster client.delete(cluster) # Delete nodes(in", "waiting for state to get to active\") time.sleep(.5) ingresses =", "pods: target_name_list.append(pod.name) print(\"target name list:\" + str(target_name_list)) for node in", "> {0}'\".format(filename, content) output = kubectl_pod_exec(pod, cmd_write) assert output.strip().decode('utf-8') ==", "= pod2.status.podIp cmd = \"ping -c 1 -W 1 \"", "= \"/bin/bash -c 'cat {0}' \".format(filename) output = kubectl_pod_exec(pod, cmd_get_content)", "= time.time() nodes = client.list_node(uuid=uuid).data node_count = len(nodes) # Handle", "CLUSTER_NAME == \"\" or CLUSTER_NAME_2 == \"\": clusters = client.list_cluster().data", "expectedImage validate_pod_images(expectedImage, workload, ns.name) def execute_kubectl_cmd(cmd, json_out=True, stderr=False): command =", "client_pod is None: curl_cmd = \"curl \" + cmd result", "cmd) print(\"result: \\t\" + result) assert result in target_name_list if", "return resource def wait_for(callback, timeout=DEFAULT_TIMEOUT, timeout_message=None): start = time.time() ret", "pods_result = execute_kubectl_cmd(get_pods) assert len(pods_result[\"items\"]) == pod_count for pod in", "pods = client.list_pod(uuid=pod.uuid).data assert len(pods) == 1 p = pods[0]", "target_hit_list) assert len(target_hit_list) == 0 def validate_cluster(client, cluster, intermediate_state=\"provisioning\", check_intermediate_state=True,", "return ret def random_name(): return \"test\" + \"-\" + str(random_int(10000,", "for expectedpod in expectedpods[\"items\"]: assert expectedpod[\"metadata\"][\"name\"] in curpodnames def validate_workload_image(client,", "= client.list_ingress(uuid=ingress.uuid).data assert len(ingresses) == 1 wl = ingresses[0] while", "None: curl_cmd = \"curl \" + cmd result = run_command(curl_cmd)", "assert ns.state == 'active' return ns def assign_members_to_cluster(client, user, cluster,", "= subprocess.check_output(command, shell=True, stderr=subprocess.PIPE) returncode = 0 except subprocess.CalledProcessError as", "nodes: if node.etcd: etcd_nodes.append(node) if node.controlPlane: control_nodes.append(node) if node.worker: worker_nodes.append(node)", "== 1 pvc = list[0] while pvc.state != \"bound\": if", "= client.list_node(clusterId=cluster.id).data for node in nodes: if node.etcd: etcd_nodes.append(node) if", "RANCHER_CLEANUP_CLUSTER = \\ ast.literal_eval(os.environ.get('RANCHER_CLEANUP_CLUSTER', \"True\")) env_file = os.path.join( os.path.dirname(os.path.realpath(__file__)), \"rancher_env.config\")", "for pod in pods[\"items\"]: assert pod[\"spec\"][\"containers\"][0][\"image\"] == expectedimage def validate_pods_are_running_by_id(expectedpods,", "pod2, allow_connectivity=True): pod_ip = pod2.status.podIp cmd = \"ping -c 1", "print(mapp.state) while mapp.state != \"active\": print(mapp.uuid) print(mapp.state) if time.time() -", "= execute_kubectl_cmd(\"version\") kubectl_k8s_version = k8s_version[\"serverVersion\"][\"gitVersion\"] assert kubectl_k8s_version == expected_k8s_version, \\", "return def check_for_no_access(url): try: requests.get(url) return False except requests.ConnectionError: print(\"Connection", "\"worker\", \"controlplane\"] cluster_tokens = client.list_cluster_registration_token( clusterId=cluster.id).data if len(cluster_tokens) > 0:", "userId=user.id) return prtb def create_kubeconfig(cluster): generateKubeConfigOutput = cluster.generateKubeconfig() print(generateKubeConfigOutput.config) file", "== 1 wl = workloads[0] while wl.transitioning != state: if", "validate_workload_paused(p_client, workload, expectedstatus): workloadStatus = p_client.list_workload(uuid=workload.uuid).data[0].paused assert workloadStatus == expectedstatus", "count = 5 * len(target_name_list) for i in range(1, count):", "create_config_file(env_details) def create_config_file(env_details): file = open(env_file, \"w\") file.write(env_details) file.close() def", "wait_until_available(client, p) assert p.state == 'active' return p def create_project_with_pspt(client,", "= \" -L --insecure \" if len(host) > 0: curl_args", "len(pods_result[\"items\"]) == pod_count for pod in pods_result[\"items\"]: assert pod[\"status\"][\"phase\"] ==", "> timeout: msg = 'Timeout waiting for [{}:{}] for condition", "ns_name, expectedimage, numofpods, timeout=DEFAULT_TIMEOUT): start = time.time() for key, value", "return random.randint(start, end) def random_test_name(name=\"test\"): return name + \"-\" +", "e.returncode print(returncode) return (output, returncode) def wait_for_wl_to_active(client, workload, timeout=DEFAULT_TIMEOUT): start", "time.sleep(.5) workloads = client.list_workload(uuid=workload.uuid).data assert len(workloads) == 1 wl =", "workloads, host, path, insecure_redirect=False): time.sleep(10) curl_args = \" \" if", "node.id: target_name_list.append(pod.name) break host_ip = node.externalIpAddress curl_cmd = \" http://\"", "\" -L --insecure \" if len(host) > 0: curl_args +=", "create_kubeconfig(cluster) if k8s_version != \"\": check_cluster_version(cluster, k8s_version) if hasattr(cluster, 'rancherKubernetesEngineConfig'):", "user, crtb, role_template_id): crtb = client.update( crtb, roleTemplateId=role_template_id, userId=user.id) return", "+ \"://\" + \\ public_endpoint[\"hostname\"] if \"path\" in public_endpoint.keys(): url", "nodes = client.list_node(clusterId=cluster.id).data node_count = len(nodes) def get_custom_host_registration_cmd(client, cluster, roles,", "cmd += \" --\" + role additional_options = \" --address", "pods[\"items\"][x] podimage = pod[\"spec\"][\"containers\"][0][\"image\"] while podimage != expectedimage: if time.time()", "+ x.state, timeout=MACHINE_TIMEOUT) assert cluster.state == intermediate_state cluster = wait_for_condition(", "print(\"target name list:\" + str(target_name_list)) return target_name_list def get_endpoint_url_for_workload(p_client, workload,", "\" + url) return True def validate_http_response(cmd, target_name_list, client_pod=None): target_hit_list", "record, expected): # requires pod with `dig` available - TEST_IMAGE", "packet loss\" in str(ping_output) dig_cmd = 'dig {0} +short'.format(host) dig_output", "node in aws_nodes: print(node.public_ip_address) AmazonWebServices().delete_nodes(aws_nodes) def check_connectivity_between_workloads(p_client1, workload1, p_client2, workload2,", "= os.environ.get('CATTLE_TEST_URL', \"http://localhost:80\") ADMIN_TOKEN = os.environ.get('ADMIN_TOKEN', \"None\") CATTLE_API_URL = CATTLE_TEST_URL", "== \"error\" print(workload.transitioningMessage) assert error_message in workload.transitioningMessage def validate_workload(p_client, workload,", "component_name in components components.remove(component_name) assert cs[\"conditions\"][0][\"status\"] == \"True\" assert cs[\"conditions\"][0][\"type\"]", "start > timeout: raise Exception('Timed out waiting for LB to", "\"w\") file.write(generateKubeConfigOutput.config) file.close() def validate_psp_error_worklaod(p_client, workload, error_message): workload = wait_for_wl_transitioning(p_client,", "yet\") if time.time() - start > timeout: raise Exception('Timed out", "stdout.readlines() return response def wait_for_ns_to_become_active(client, ns, timeout=DEFAULT_TIMEOUT): start = time.time()", "time.sleep(5) nodes = client.list_node(uuid=uuid).data node_count = len(nodes) if node_count ==", "client.create_cluster_registration_token( clusterId=cluster.id) cluster_token = client.wait_success(cluster_token) assert cluster_token.state == 'active' return", "+ \" -n \" + ns_name pods = execute_kubectl_cmd(get_pods) curpodnames", "with Workload # rule pointing to this daemonset create_kubeconfig(cluster) if", "for state to get to active\") time.sleep(.5) nodes = client.list_node(clusterId=cluster.id).data", "loss\" in str(response) else: assert pod_ip in str(response) and \"", "workload = p_client.create_workload(name=wl_name, containers=con, namespaceId=ns_id, volumes=volumes, daemonSetConfig={}) else: workload =", "get_project_client_for_token(project, ADMIN_TOKEN) con = [{\"name\": \"test1\", \"image\": TEST_IMAGE}] name =", "x: 'State is: ' + x.state, timeout=MACHINE_TIMEOUT) assert cluster.state ==", "p_client = rancher.Client(url=p_url, token=token, verify=False) return p_client def get_cluster_client_for_token(cluster, token):", "LB to become active') return def check_for_no_access(url): try: requests.get(url) return", "ret is False: time.sleep(.5) if time.time() - start > timeout:", "== 2 assert \"running\" in pod[\"status\"][\"containerStatuses\"][0][\"state\"] assert \"running\" in pod[\"status\"][\"containerStatuses\"][1][\"state\"]", "wl_result = execute_kubectl_cmd( \"get \" + type + \" \"", "= run_command_with_stderr(command) else: result = run_command(command) if json_out: result =", "random import subprocess import time import requests import ast import", "cluster, p, ns_name) return p, ns def create_project(client, cluster, project_name=None):", "for node in nodes: if node.worker: schedulable_nodes.append(node) return schedulable_nodes def", "[] for pod in pods: target_name_list.append(pod[\"name\"]) curl_cmd = \"http://\" +", "> MACHINE_TIMEOUT: raise AssertionError( \"Timed out waiting for state to", "wl def wait_for_pod_to_running(client, pod, timeout=DEFAULT_TIMEOUT): start = time.time() pods =", "= client.list_node(uuid=uuid).data node_count = len(nodes) # Handle the case of", "get to bound\") time.sleep(.5) list = p_client.list_persistent_volume_claim(uuid=pvc_object.uuid).data assert len(list) ==", "client.list_app(name=app_id).data assert len(apps) == 1 mapp = apps[0] return mapp", "== 2 return client, clusters def validate_multi_cluster_app_cluster(app_id1, app_id2, p_client1, p_client2):", "multiClusterApp, timeout=DEFAULT_MULTI_CLUSTER_APP_TIMEOUT): print(\"\\nuuid:\") print(multiClusterApp.uuid) time.sleep(5) mcapps = client.list_multiClusterApp(uuid=multiClusterApp.uuid, name=multiClusterApp.name).data start", "active\") time.sleep(5) nodes = client.list_node(uuid=uuid).data node_count = len(nodes) if node_count", "True: time.sleep(sleep) sleep *= 2 if sleep > 2: sleep", "pod in pods[\"items\"]: curpodnames.append(pod[\"metadata\"][\"name\"]) for expectedpod in expectedpods[\"items\"]: assert expectedpod[\"metadata\"][\"name\"]", "1): pod = pods[\"items\"][x] podimage = pod[\"spec\"][\"containers\"][0][\"image\"] while podimage !=", "assert len(multiclusterapps) == 1 mapp = multiclusterapps[0] return mapp def", "= execute_kubectl_cmd(get_pods) assert len(pods_result[\"items\"]) == pod_count for pod in pods_result[\"items\"]:", "\"Timed out waiting for endpoint to be available\") time.sleep(.5) workload_list", "def run_command(command): return subprocess.check_output(command, shell=True, text=True) def run_command_with_stderr(command): try: output", "paramiko.SSHClient() ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) ssh.connect(ip, username=\"root\", password=password, port=port) stdin, stdout, stderr =", "does not exist anymore -\" + uuid) return None return", "verify=False) def get_client_for_token(token): return rancher.Client(url=CATTLE_API_URL, token=token, verify=False) def get_project_client_for_token(project, token):", "+ \\ str(source_port) + \"/name.html\" validate_http_response(curl_cmd, target_name_list) def validate_clusterIp(p_client, workload,", "\" + ns_name pods = execute_kubectl_cmd(get_pods) for pod in pods[\"items\"]:", "p = wait_until_available(client, p) assert p.state == 'active' return set_pspt_for_project(p,", "cs[\"conditions\"][0][\"type\"] == \"Healthy\" assert len(components) == 0 def validate_dns_record(pod, record,", "p_client.list_pod(workloadId=workload.id).data while len(pods) != pod_count: if time.time() - start >", "len(clusters) > 0 cluster = clusters[0] return client, cluster def", "client.list_node(clusterId=cluster.id).data # Delete Cluster client.delete(cluster) # Delete nodes(in cluster) from", "p_client.create_workload(name=wl_name, containers=con, namespaceId=ns_id, volumes=volumes) return workload def write_content_to_file(pod, content, filename):", "state=\"error\"): start = time.time() workloads = client.list_workload(uuid=workload.uuid).data assert len(workloads) ==", "if \"rancherKubernetesEngineConfig\" in cluster: nodes = client.list_node(clusterId=cluster.id).data if len(nodes) >", "'kubectl --kubeconfig {0} {1}'.format( kube_fname, cmd) if json_out: command +=", "\"\") RANCHER_CLEANUP_CLUSTER = \\ ast.literal_eval(os.environ.get('RANCHER_CLEANUP_CLUSTER', \"True\")) env_file = os.path.join( os.path.dirname(os.path.realpath(__file__)),", "\"=\" + value get_pods = \"get pods -l\" + label", "import paramiko import rancher from rancher import ApiError from lib.aws", "wait_for_cron_pods=60): workload = wait_for_wl_to_active(p_client, workload) assert workload.state == \"active\" #", "p_client.list_persistent_volume_claim(uuid=pvc_object.uuid).data assert len(list) == 1 pvc = list[0] return pvc", "return target_name_list def get_endpoint_url_for_workload(p_client, workload, timeout=600): fqdn_available = False url", "exist anymore -\" + uuid) return None while node_status !=", "\" 100% packet loss\" in str(response) def kubectl_pod_exec(pod, cmd): command", "== \"\" def validate_file_content(pod, content, filename): cmd_get_content = \"/bin/bash -c", "-c 'echo {1} > {0}'\".format(filename, content) output = kubectl_pod_exec(pod, cmd_write)", "assert kubectl_k8s_version == expected_k8s_version, \\ \"kubectl version: \" + kubectl_k8s_version", "cmd) ping_validation_pass = False for expected_value in expected: if expected_value", "except requests.ConnectionError: print(\"Connection Error - \" + url) return True", "def get_target_names(p_client, workloads): pods = [] for workload in workloads:", "expected_value in expected: if expected_value in str(ping_output): ping_validation_pass = True", "ip_list filters.append(ip_filter) for node in nodes: ip_list.append(node.externalIpAddress) assert len(ip_filter) >", "0% packet loss\" in str(response) else: assert pod_ip in str(response)", "+= public_endpoint[\"path\"] time.sleep(10) validate_http_response(url, target_name_list) def get_target_names(p_client, workloads): pods =", "workloads[0] return wl def wait_for_ingress_to_active(client, ingress, timeout=DEFAULT_TIMEOUT): start = time.time()", "!= \"\": check_cluster_version(cluster, k8s_version) if hasattr(cluster, 'rancherKubernetesEngineConfig'): check_cluster_state(len(get_role_nodes(cluster, \"etcd\"))) project,", "\"://\" + \\ public_endpoint[\"hostname\"] if \"path\" in public_endpoint.keys(): url +=", "stderr=subprocess.PIPE) returncode = 0 except subprocess.CalledProcessError as e: output =", "assert len(components) == 0 def validate_dns_record(pod, record, expected): # requires", "1 print(\"Retry Count:\" + str(retry_count)) if node_auto_deleted and retry_count <", "worker_nodes.append(node) if role == \"etcd\": node_list = etcd_nodes if role", "name list:\" + str(target_name_list)) return target_name_list def get_endpoint_url_for_workload(p_client, workload, timeout=600):", "get_target_names(p_client, workloads) start = time.time() fqdn_available = False url =", "case of nodes getting auto deleted when they are part", "return pv def wait_for_pvc_to_be_bound(p_client, pvc_object, timeout=DEFAULT_TIMEOUT): start = time.time() time.sleep(2)", "if node is None: print(\"Need to re-evalauate new node list\")", "# Delete nodes(in cluster) from AWS for Imported and Custom", "wait_for_mcapp_to_active(client, multiClusterApp, timeout=DEFAULT_MULTI_CLUSTER_APP_TIMEOUT): print(\"\\nuuid:\") print(multiClusterApp.uuid) time.sleep(5) mcapps = client.list_multiClusterApp(uuid=multiClusterApp.uuid, name=multiClusterApp.name).data", "pods: wait_for_pod_to_running(p_client, pod) wl_result = execute_kubectl_cmd( \"get \" + type", "file = open(kube_fname, \"w\") file.write(generateKubeConfigOutput.config) file.close() def validate_psp_error_worklaod(p_client, workload, error_message):", "pods[0] while p.state != \"running\": if time.time() - start >", "def validate_dns_record(pod, record, expected): # requires pod with `dig` available", "== 1 mapp = apps[0] return mapp def get_admin_client_and_cluster_mcapp(): clusters", "to active\") time.sleep(.5) workloads = client.list_workload(uuid=workload.uuid).data assert len(workloads) == 1", "\"\": validate_mcapp_cluster(app_id2, p_client2) # verify app in cluster is active", "[] for pod in pods[\"items\"]: curpodnames.append(pod[\"metadata\"][\"name\"]) for expectedpod in expectedpods[\"items\"]:", "project def create_ns(client, cluster, project, ns_name=None): if ns_name is None:", "for state to get to active\") time.sleep(.5) ingresses = client.list_ingress(uuid=ingress.uuid).data", "return prtb def change_member_role_in_cluster(client, user, crtb, role_template_id): crtb = client.update(", "get_pods = \"get pods -l\" + label + \" -n", "= get_admin_client() if CLUSTER_NAME == \"\": clusters = client.list_cluster().data else:", "return (output, returncode) def wait_for_wl_to_active(client, workload, timeout=DEFAULT_TIMEOUT): start = time.time()", "out waiting for state to get to active\") time.sleep(.5) nss", "app_id2 != \"\": validate_mcapp_cluster(app_id2, p_client2) # verify app in cluster", "= \"get pods -l\" + label + \" -n \"", "for expected_value in expected: if expected_value in str(ping_output): ping_validation_pass =", "kubectl_pod_exec(client_pod, wget_cmd) result = result.decode() result = result.rstrip() print(\"cmd: \\t\"", "check_cluster_version(cluster, version): cluster_k8s_version = \\ cluster.appliedSpec[\"rancherKubernetesEngineConfig\"][ \"kubernetesVersion\"] assert cluster_k8s_version ==", "p def create_project_with_pspt(client, cluster, pspt): p = client.create_project(name=random_name(), clusterId=cluster.id) p", "= node.externalIpAddress cmd = curl_args + \" http://\" + host_ip", "url = \"\" start = time.time() while not fqdn_available: if", "+ \":\" url = url + str(workload.publicEndpoints[0][\"port\"]) fqdn_available = True", "content) output = kubectl_pod_exec(pod, cmd_write) assert output.strip().decode('utf-8') == \"\" def", "expected_value in expected: assert expected_value in str(dig_output) def wait_for_nodes_to_become_active(client, cluster,", "\"daemonSet\": assert wl_result[\"status\"][\"currentNumberScheduled\"] == pod_count if type == \"cronJob\": assert", "= client.list_pod(uuid=pod.uuid).data assert len(pods) == 1 p = pods[0] while", "type + \" \" + workload.name + \" -n \"", "\\ \"cluster_k8s_version: \" + cluster_k8s_version + \\ \" Expected: \"", "[] worker_nodes = [] node_list = [] client = get_admin_client()", "print(\"After removing all, the rest is: \", target_hit_list) assert len(target_hit_list)", "= get_schedulable_nodes(cluster) target_name_list = get_target_names(p_client, workloads) for node in nodes:", "+ \"-\" + str(random_int(10000, 99999)) def create_project_and_ns(token, cluster, project_name=None, ns_name=None):", "in target_hit_list: target_hit_list.remove(result) print(\"After removing all, the rest is: \",", "TEST_IMAGE host = '{0}.{1}.svc.cluster.local'.format( record[\"name\"], record[\"namespaceId\"]) validate_dns_entry(pod, host, expected) def", "podimage != expectedimage: if time.time() - start > timeout: raise", "nodes = get_schedulable_nodes(cluster) target_name_list = get_target_names(p_client, workloads) for node in", "time.sleep(.5) apps = client.list_app(name=app_id).data assert len(apps) == 1 mapp =", "assert cluster_k8s_version == version, \\ \"cluster_k8s_version: \" + cluster_k8s_version +", "p_client.list_pod(workloadId=workload.id).data target_name_list = [] for pod in pods: target_name_list.append(pod.name) print(\"target", "start > timeout: exceptionMsg = 'Timeout waiting for ' +", "waiting for ' + resource.baseType + \\ ' to satisfy", "target_name_list = get_target_names(p_client, workloads) for node in nodes: host_ip =", "p) assert p.state == 'active' return set_pspt_for_project(p, client, pspt) def", "= json.loads(result) print(result) return result def run_command(command): return subprocess.check_output(command, shell=True,", "= [{\"readOnly\": \"False\", \"type\": \"volumeMount\", \"mountPath\": mount_path, \"subPath\": sub_path, \"name\":", "pod[\"status\"][\"phase\"] == \"Running\" assert len(pod[\"status\"][\"containerStatuses\"]) == 2 assert \"running\" in", "time.time() assert len(mcapps) == 1 mapp = mcapps[0] print(mapp.state) while", "wait_until_lb_is_active(url, timeout=300): start = time.time() while check_for_no_access(url): time.sleep(.5) print(\"No access", "mcapps = client.list_app(name=app_id).data start = time.time() assert len(mcapps) == 1", "in str(response) def kubectl_pod_exec(pod, cmd): command = \"exec \" +", "filename): cmd_get_content = \"/bin/bash -c 'cat {0}' \".format(filename) output =", "-L --insecure \" if len(host) > 0: curl_args += \"", "\"Healthy\" assert len(components) == 0 def validate_dns_record(pod, record, expected): #", "= \"ping -c 1 -W 1 \" + pod_ip response", "pod = pods[\"items\"][x] podimage = pod[\"spec\"][\"containers\"][0][\"image\"] def wait_for_pods_in_workload(p_client, workload, pod_count,", "cluster_config return \"Imported\" def delete_cluster(client, cluster): nodes = client.list_node(clusterId=cluster.id).data #", "\"active\", lambda x: 'State is: ' + x.state, timeout=MACHINE_TIMEOUT) assert", "token=token, verify=False) def get_project_client_for_token(project, token): p_url = project.links['self'] + '/schemas'", "len(pods) == pod_count for pod in pods: wait_for_pod_to_running(p_client, pod) wl_result", "1 mapp = multiclusterapps[0] return mapp def validate_mcapp_cluster(app_id, p_client): mcapp", "assign_members_to_cluster(client, user, cluster, role_template_id): crtb = client.create_cluster_role_template_binding( clusterId=cluster.id, roleTemplateId=role_template_id, subjectKind=\"User\",", "CATTLE_API_URL = CATTLE_TEST_URL + \"/v3\" kube_fname = os.path.join(os.path.dirname(os.path.realpath(__file__)), \"k8s_kube_config\") MACHINE_TIMEOUT", "workload, pod_count, timeout=DEFAULT_TIMEOUT): start = time.time() pods = p_client.list_pod(workloadId=workload.id).data while", "def validate_cluster_state(client, cluster, check_intermediate_state=True, intermediate_state=\"provisioning\", nodes_not_in_active_state=[]): if check_intermediate_state: cluster =", "AssertionError( \"Timed out waiting for state to get to bound\")", "response = kubectl_pod_exec(pod1, cmd) print(\"Actual ping Response from \" +", "validate_pod_images(expectedimage, workload, ns_name): for key, value in workload.workloadLabels.items(): label =", "requires pod with `dig` available - TEST_IMAGE cmd = 'ping", "for pod in pods_result[\"items\"]: assert pod[\"status\"][\"phase\"] == \"Running\" return pods_result[\"items\"]", "= time.time() nodes = client.list_node(clusterId=cluster.id).data node_count = len(nodes) while node_count", "wl2_pods = p_client2.list_pod(workloadId=workload2.id).data for pod in wl1_pods: for o_pod in", "cluster_ip, test_pods): pods = p_client.list_pod(workloadId=workload.id).data target_name_list = [] for pod", "for LB to become active') return def check_for_no_access(url): try: requests.get(url)", "wait_for_ingress_to_active(client, ingress, timeout=DEFAULT_TIMEOUT): start = time.time() ingresses = client.list_ingress(uuid=ingress.uuid).data assert", "is_daemonSet=False): volumes = [{\"type\": \"volume\", \"name\": \"vol1\", \"persistentVolumeClaim\": { \"readOnly\":", "\"exec \" + pod.name + \" -n \" + pod.namespaceId", "in nodes: host_ip = node.externalIpAddress cmd = curl_args + \"", "os import random import subprocess import time import requests import", "check_function, fail_handler=None, timeout=DEFAULT_TIMEOUT): start = time.time() resource = client.reload(resource) while", "else: raise Exception('Timeout waiting for condition') ret = callback() return", "when they are part of # nodepools if node_count ==", "def get_schedulable_nodes(cluster): client = get_admin_client() nodes = client.list_node(clusterId=cluster.id).data schedulable_nodes =", "assert len(pod[\"status\"][\"containerStatuses\"]) == 2 assert \"running\" in pod[\"status\"][\"containerStatuses\"][0][\"state\"] assert \"running\"", "waiting for state to get to available\") time.sleep(.5) list =", "def get_admin_client_and_cluster(): client = get_admin_client() if CLUSTER_NAME == \"\": clusters", "ip_filter['Name'] = \\ 'network-interface.addresses.association.public-ip' ip_filter['Values'] = ip_list filters.append(ip_filter) for node", "intermediate_state, lambda x: 'State is: ' + x.state, timeout=MACHINE_TIMEOUT) assert", "pod_ip in str(response) and \" 100% packet loss\" in str(response)", "pod_count=1): workload = wait_for_wl_to_active(p_client, workload) assert workload.state == \"active\" pods", "aws_nodes=None): if RANCHER_CLEANUP_CLUSTER: client.delete(cluster) if aws_nodes is not None: delete_node(aws_nodes)", "json_out: command += ' -o json' if stderr: result =", "client.list_pod(uuid=pod.uuid).data assert len(pods) == 1 p = pods[0] while p.state", "== 1 wl = workloads[0] return wl def wait_for_ingress_to_active(client, ingress,", "nss = client.list_namespace(uuid=ns.uuid).data assert len(nss) == 1 ns = nss[0]", "+ \"'\" nodes = get_schedulable_nodes(cluster) target_name_list = get_target_names(p_client, workloads) for", "validate_http_response(cmd, target_name_list) def validate_ingress_using_endpoint(p_client, ingress, workloads, timeout=300): target_name_list = get_target_names(p_client,", "in workload.transitioningMessage def validate_workload(p_client, workload, type, ns_name, pod_count=1, wait_for_cron_pods=60): workload", "0: if nodes[0].nodeTemplateId is None: return \"Custom\" for cluster_config in", "return ns def assign_members_to_cluster(client, user, cluster, role_template_id): crtb = client.create_cluster_role_template_binding(", "for state to get to active\") time.sleep(.5) workloads = client.list_workload(uuid=workload.uuid).data", "= open(env_file, \"w\") file.write(env_details) file.close() def validate_hostPort(p_client, workload, source_port, cluster):", "stderr = ssh.exec_command(cmd) response = stdout.readlines() return response def wait_for_ns_to_become_active(client,", "pod[\"status\"][\"containerStatuses\"][0][\"state\"] assert \"running\" in pod[\"status\"][\"containerStatuses\"][1][\"state\"] def validate_workload_paused(p_client, workload, expectedstatus): workloadStatus", "\\ ' seconds'.format(obj.type, obj.id, delta) raise Exception(msg) def delete_node(aws_nodes): for", "= clusters[0] return client, cluster def validate_cluster_state(client, cluster, check_intermediate_state=True, intermediate_state=\"provisioning\",", "= \\ cluster.appliedSpec[\"rancherKubernetesEngineConfig\"][ \"kubernetesVersion\"] assert cluster_k8s_version == version, \\ \"cluster_k8s_version:", "state to get to active\") time.sleep(5) nodes = client.list_node(uuid=uuid).data node_count", "containers=con, namespaceId=ns_id, volumes=volumes, daemonSetConfig={}) else: workload = p_client.create_workload(name=wl_name, containers=con, namespaceId=ns_id,", "workload, source_port, cluster): pods = p_client.list_pod(workloadId=workload.id).data nodes = get_schedulable_nodes(cluster) for", "1 ns = nss[0] while ns.state != \"active\": if time.time()", "pod_ip response = kubectl_pod_exec(pod1, cmd) print(\"Actual ping Response from \"", "token=ADMIN_TOKEN, verify=False) def get_client_for_token(token): return rancher.Client(url=CATTLE_API_URL, token=token, verify=False) def get_project_client_for_token(project,", "o_pod, allow_connectivity) def check_connectivity_between_workload_pods(p_client, workload): pods = p_client.list_pod(workloadId=workload.id).data for pod", "def create_kubeconfig(cluster): generateKubeConfigOutput = cluster.generateKubeconfig() print(generateKubeConfigOutput.config) file = open(kube_fname, \"w\")", "time.sleep(.5) nodes = client.list_node(clusterId=cluster.id).data node_count = len(nodes) def get_custom_host_registration_cmd(client, cluster,", "validate_pods_are_running_by_id(expectedpods, workload, ns_name): for key, value in workload.workloadLabels.items(): label =", "= client.list_multiClusterApp(uuid=multiClusterApp.uuid, name=multiClusterApp.name).data start = time.time() assert len(mcapps) == 1", "execute_kubectl_cmd(get_pods) pod = pods[\"items\"][x] podimage = pod[\"spec\"][\"containers\"][0][\"image\"] def wait_for_pods_in_workload(p_client, workload,", "{0} +short'.format(host) dig_output = kubectl_pod_exec(pod, dig_cmd) for expected_value in expected:", "file.write(generateKubeConfigOutput.config) file.close() def validate_psp_error_worklaod(p_client, workload, error_message): workload = wait_for_wl_transitioning(p_client, workload)", "Expected: \" + expected_k8s_version def check_cluster_state(etcd_count): css_resp = execute_kubectl_cmd(\"get cs\")", "time.sleep(.5) pods = p_client.list_pod(workloadId=workload.id).data return pods def get_admin_client_and_cluster(): client =", "\"subPath\": sub_path, \"name\": \"vol1\" }] con = [{\"name\": \"test1\", \"image\":", "\\ \"kubectl version: \" + kubectl_k8s_version + \\ \" Expected:", "url) return True def validate_http_response(cmd, target_name_list, client_pod=None): target_hit_list = target_name_list[:]", "get_target_names(p_client, workloads) for node in nodes: host_ip = node.externalIpAddress cmd", "cluster): source_port = workload.publicEndpoints[0][\"port\"] nodes = get_schedulable_nodes(cluster) pods = p_client.list_pod(workloadId=workload.id).data", "cluster: nodes = client.list_node(clusterId=cluster.id).data if len(nodes) > 0: if nodes[0].nodeTemplateId", "client = get_admin_client() nodes = client.list_node(clusterId=cluster.id).data schedulable_nodes = [] for", "= url + workload.publicEndpoints[0][\"addresses\"][0] + \":\" url = url +", "test_pods): pods = p_client.list_pod(workloadId=workload.id).data target_name_list = [] for pod in", "waiting for endpoint to be available\") time.sleep(.5) ingress_list = p_client.list_ingress(uuid=ingress.uuid).data", "validate_psp_error_worklaod(p_client, workload, error_message): workload = wait_for_wl_transitioning(p_client, workload) assert workload.state ==", "nodes = client.list_node(clusterId=cluster.id).data if len(nodes) > 0: if nodes[0].nodeTemplateId is", "content, filename): cmd_get_content = \"/bin/bash -c 'cat {0}' \".format(filename) output", "\"Timed out waiting for state to get to active\") time.sleep(5)", "len(cluster_tokens) > 0: cluster_token = cluster_tokens[0] else: cluster_token = create_custom_host_registration_token(client,", "= run_command(command) if json_out: result = json.loads(result) print(result) return result", "\" if (insecure_redirect): curl_args = \" -L --insecure \" if", "= \" --address \" + node.public_ip_address + \\ \" --internal-address", "lib.aws import AmazonWebServices DEFAULT_TIMEOUT = 120 DEFAULT_MULTI_CLUSTER_APP_TIMEOUT = 300 CATTLE_TEST_URL", "len(mcapp) == 1 app = mcapp[0] return app def wait_for_mcapp_cluster_level_to_active(client,", "is True assert \" 0% packet loss\" in str(ping_output) dig_cmd", "False url = \"\" start = time.time() while not fqdn_available:", "= client.list_cluster().data else: clusters.append(client.list_cluster(name=CLUSTER_NAME).data) clusters.append(client.list_cluster(name=CLUSTER_NAME_2).data) assert len(clusters) == 2 return", "ns.name) def execute_kubectl_cmd(cmd, json_out=True, stderr=False): command = 'kubectl --kubeconfig {0}", "node_list = etcd_nodes if role == \"control\": node_list = control_nodes", "workload = p_client.create_workload(name=name, containers=con, namespaceId=ns.id, daemonSetConfig={}) validate_workload(p_client, workload, \"daemonSet\", ns.name,", "validate_http_response(curl_cmd, target_name_list, pod) def wait_for_pv_to_be_available(c_client, pv_object, timeout=DEFAULT_TIMEOUT): start = time.time()", "pv_object, timeout=DEFAULT_TIMEOUT): start = time.time() time.sleep(2) list = c_client.list_persistent_volume(uuid=pv_object.uuid).data assert", "not skipIngresscheck: host = \"test\" + str(random_int(10000, 99999)) + \".com\"", "obj, timeout=DEFAULT_TIMEOUT): start = time.time() sleep = 0.01 while True:", "= p_client.create_ingress(name=name, namespaceId=ns.id, rules=[rule]) wait_for_ingress_to_active(p_client, ingress) validate_ingress(p_client, cluster, [workload], host,", "dig_output = kubectl_pod_exec(pod, dig_cmd) for expected_value in expected: assert expected_value", "def validate_workload_paused(p_client, workload, expectedstatus): workloadStatus = p_client.list_workload(uuid=workload.uuid).data[0].paused assert workloadStatus ==", "client.create_project_role_template_binding( projectId=project.id, roleTemplateId=role_template_id, subjectKind=\"User\", userId=user.id) return prtb def change_member_role_in_cluster(client, user,", "list[0] return pvc def create_wl_with_nfs(p_client, ns_id, pvc_name, wl_name, mount_path, sub_path,", "= c_client.list_persistent_volume(uuid=pv_object.uuid).data assert len(list) == 1 pv = list[0] return", "= time.time() ret = callback() while ret is None or", "get to available\") time.sleep(.5) list = c_client.list_persistent_volume(uuid=pv_object.uuid).data assert len(list) ==", "\" + version expected_k8s_version = version[:version.find(\"-\")] k8s_version = execute_kubectl_cmd(\"version\") kubectl_k8s_version", "client.update( crtb, roleTemplateId=role_template_id, userId=user.id) return crtb def change_member_role_in_project(client, user, prtb,", "ast import paramiko import rancher from rancher import ApiError from", "\" check \" + node.id) if pod.nodeId == node.id: target_name_list.append(pod.name)", "\"false\", \"type\": \"persistentVolumeClaimVolumeSource\", \"persistentVolumeClaimId\": pvc_name }}] volumeMounts = [{\"readOnly\": \"False\",", "subjectKind=\"User\", userId=user.id) return prtb def change_member_role_in_cluster(client, user, crtb, role_template_id): crtb", "returncode = 0 except subprocess.CalledProcessError as e: output = e.output", "workload = workload_list[0] if hasattr(workload, 'publicEndpoints'): assert len(workload.publicEndpoints) > 0", "expectedpod[\"metadata\"][\"name\"] in curpodnames def validate_workload_image(client, workload, expectedImage, ns): workload =", "== 1 mapp = mcapps[0] print(mapp.state) while mapp.state != \"active\":", "AWS for Imported and Custom Cluster if (len(nodes) > 0):", "check \" + node.id) if pod.nodeId == node.id: target_name_list.append(pod.name) break", "< 5: wait_for_nodes_to_become_active(client, cluster, exception_list, retry_count) def wait_for_node_status(client, node, state):", "return execute_kubectl_cmd(command, json_out=False, stderr=True) def exec_shell_command(ip, port, cmd, password): ssh", "for expected_value in expected: assert expected_value in str(dig_output) def wait_for_nodes_to_become_active(client,", "wl = ingresses[0] while wl.state != \"active\": if time.time() -", "client, cluster, lambda x: x.state == \"active\", lambda x: 'State", "output = kubectl_pod_exec(pod, cmd_get_content) assert output.strip().decode('utf-8') == content def wait_for_mcapp_to_active(client,", "= True url = \\ public_endpoint[\"protocol\"].lower() + \"://\" + \\", "expected_value in str(dig_output) def wait_for_nodes_to_become_active(client, cluster, exception_list=[], retry_count=0): nodes =", "end) def random_test_name(name=\"test\"): return name + \"-\" + str(random_int(10000, 99999))", "+ \\ str(source_port) + \"/name.html\" validate_http_response(curl_cmd, target_name_list) def validate_lb(p_client, workload):", "validate_multi_cluster_app_cluster(app_id1, app_id2, p_client1, p_client2): validate_mcapp_cluster(app_id1, p_client1) if app_id2 != \"\":", "(output, returncode) def wait_for_wl_to_active(client, workload, timeout=DEFAULT_TIMEOUT): start = time.time() workloads", "in range(1, count): if len(target_hit_list) == 0: break if client_pod", "== 1 p = pods[0] while p.state != \"running\": if", "return \"test\" + \"-\" + str(random_int(10000, 99999)) def create_project_and_ns(token, cluster,", "cmd = curl_args + \" http://\" + host_ip + path", "assert component_name in components components.remove(component_name) assert cs[\"conditions\"][0][\"status\"] == \"True\" assert", "is: ' + x.state, timeout=MACHINE_TIMEOUT) assert cluster.state == \"active\" wait_for_nodes_to_become_active(client,", "multiclusterapps = client.list_multiClusterApp(uuid=multiClusterApp.uuid, name=multiClusterApp.name).data assert len(multiclusterapps) == 1 mapp =", "'random-{0}-{1}'.format(random_num(), int(time.time())) def random_num(): return random.randint(0, 1000000) def random_int(start, end):", "cluster_token.state == 'active' return cluster_token def get_cluster_type(client, cluster): cluster_configs =", "+ expected_k8s_version def check_cluster_state(etcd_count): css_resp = execute_kubectl_cmd(\"get cs\") css =", "to get to active\") time.sleep(.5) nss = client.list_namespace(uuid=ns.uuid).data assert len(nss)", "while not fqdn_available: if time.time() - start > timeout: raise", "+ node.public_ip_address + \\ \" --internal-address \" + node.private_ip_address cmd", "rancher.Client(url=CATTLE_API_URL, token=ADMIN_TOKEN, verify=False) def get_client_for_token(token): return rancher.Client(url=CATTLE_API_URL, token=token, verify=False) def", "rancher.Client(url=c_url, token=token, verify=False) return c_client def wait_state(client, obj, state, timeout=DEFAULT_TIMEOUT):", "for node in nodes: if node.requestedHostname not in exception_list: node", "import rancher from rancher import ApiError from lib.aws import AmazonWebServices", "cmd) if json_out: command += ' -o json' if stderr:", "> 0 print(ip_filter) aws_nodes = AmazonWebServices().get_nodes(filters) for node in aws_nodes:", "+= 1 print(\"Retry Count:\" + str(retry_count)) if node_auto_deleted and retry_count", "\"running\" in pod[\"status\"][\"containerStatuses\"][0][\"state\"] assert \"running\" in pod[\"status\"][\"containerStatuses\"][1][\"state\"] def validate_workload_paused(p_client, workload,", "time.time() time.sleep(2) nss = client.list_namespace(uuid=ns.uuid).data assert len(nss) == 1 ns", "workload, timeout=600): fqdn_available = False url = \"\" start =", "wait_for_pv_to_be_available(c_client, pv_object, timeout=DEFAULT_TIMEOUT): start = time.time() time.sleep(2) list = c_client.list_persistent_volume(uuid=pv_object.uuid).data", "client, clusters def validate_multi_cluster_app_cluster(app_id1, app_id2, p_client1, p_client2): validate_mcapp_cluster(app_id1, p_client1) if", "for pod in pods[\"items\"]: curpodnames.append(pod[\"metadata\"][\"name\"]) for expectedpod in expectedpods[\"items\"]: assert", "ingresses = client.list_ingress(uuid=ingress.uuid).data assert len(ingresses) == 1 wl = ingresses[0]", "print(ip_filter) aws_nodes = AmazonWebServices().get_nodes(filters) for node in aws_nodes: print(node.public_ip_address) AmazonWebServices().delete_nodes(aws_nodes)", "return random.randint(0, 1000000) def random_int(start, end): return random.randint(start, end) def", "== 1 p = pods[0] return p def get_schedulable_nodes(cluster): client", "validate_nodePort(p_client, workload, cluster): source_port = workload.publicEndpoints[0][\"port\"] nodes = get_schedulable_nodes(cluster) pods", "if CLUSTER_NAME == \"\" or CLUSTER_NAME_2 == \"\": clusters =", "get to active\") time.sleep(.5) ingresses = client.list_ingress(uuid=ingress.uuid).data assert len(ingresses) ==", "node_auto_deleted and retry_count < 5: wait_for_nodes_to_become_active(client, cluster, exception_list, retry_count) def", "pvc.state != \"bound\": if time.time() - start > timeout: raise", "for o_pod in pods: check_connectivity_between_pods(pod, o_pod) def check_connectivity_between_pods(pod1, pod2, allow_connectivity=True):", "+ \"/name.html\" validate_http_response(curl_cmd, target_name_list) def validate_lb(p_client, workload): url = get_endpoint_url_for_workload(p_client,", "return wl def wait_for_wl_transitioning(client, workload, timeout=DEFAULT_TIMEOUT, state=\"error\"): start = time.time()", "== 1 wl = ingresses[0] while wl.state != \"active\": if", "timeout=DEFAULT_TIMEOUT): start = time.time() for key, value in workload.workloadLabels.items(): label", "cluster_k8s_version == version, \\ \"cluster_k8s_version: \" + cluster_k8s_version + \\", "start = time.time() sleep = 0.01 while True: time.sleep(sleep) sleep", "workload = p_client.create_workload(name=wl_name, containers=con, namespaceId=ns_id, volumes=volumes) return workload def write_content_to_file(pod,", "change_member_role_in_cluster(client, user, crtb, role_template_id): crtb = client.update( crtb, roleTemplateId=role_template_id, userId=user.id)", "p_client.create_ingress(name=name, namespaceId=ns.id, rules=[rule]) wait_for_ingress_to_active(p_client, ingress) validate_ingress(p_client, cluster, [workload], host, path)", "or ret is False: time.sleep(.5) if time.time() - start >", "delete_node(aws_nodes): for node in aws_nodes: AmazonWebServices().delete_node(node) def cluster_cleanup(client, cluster, aws_nodes=None):", "print(\"\\nuuid:\") print(multiClusterApp.uuid) time.sleep(5) mcapps = client.list_multiClusterApp(uuid=multiClusterApp.uuid, name=multiClusterApp.name).data start = time.time()", "output = e.output returncode = e.returncode print(returncode) return (output, returncode)", "out waiting for state to get to active\") time.sleep(.5) ingresses", "project, ns_name=None): if ns_name is None: ns_name = random_name() ns", "k8s_version != \"\": check_cluster_version(cluster, k8s_version) if hasattr(cluster, 'rancherKubernetesEngineConfig'): check_cluster_state(len(get_role_nodes(cluster, \"etcd\")))", "[{}:{}] for condition after {}' \\ ' seconds'.format(obj.type, obj.id, delta)", "mount_path, sub_path, is_daemonSet=False): volumes = [{\"type\": \"volume\", \"name\": \"vol1\", \"persistentVolumeClaim\":", "random.randint(start, end) def random_test_name(name=\"test\"): return name + \"-\" + str(random_int(10000,", "2 return client, clusters def validate_multi_cluster_app_cluster(app_id1, app_id2, p_client1, p_client2): validate_mcapp_cluster(app_id1,", "workloads[0] return wl def wait_for_pod_to_running(client, pod, timeout=DEFAULT_TIMEOUT): start = time.time()", "= paramiko.SSHClient() ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) ssh.connect(ip, username=\"root\", password=password, port=port) stdin, stdout, stderr", "workload, cluster_ip, test_pods): pods = p_client.list_pod(workloadId=workload.id).data target_name_list = [] for", "\"active\" wait_for_nodes_to_become_active(client, cluster, exception_list=nodes_not_in_active_state) return cluster def wait_until_available(client, obj, timeout=DEFAULT_TIMEOUT):", "return url def wait_until_lb_is_active(url, timeout=300): start = time.time() while check_for_no_access(url):", "get_schedulable_nodes(cluster) target_name_list = get_target_names(p_client, workloads) for node in nodes: host_ip", "time.time() while check_for_no_access(url): time.sleep(.5) print(\"No access yet\") if time.time() -", "to get to active\") time.sleep(.5) nodes = client.list_node(uuid=uuid).data node_count =", "def wait_for_mcapp_cluster_level_to_active(client, app_id, timeout=DEFAULT_MULTI_CLUSTER_APP_TIMEOUT): mcapps = client.list_app(name=app_id).data start = time.time()", "active\") time.sleep(.5) ingresses = client.list_ingress(uuid=ingress.uuid).data assert len(ingresses) == 1 wl", "wait_for_node_status(client, node, state): uuid = node.uuid start = time.time() nodes", "\"type\": \"volumeMount\", \"mountPath\": mount_path, \"subPath\": sub_path, \"name\": \"vol1\" }] con", "workload, error_message): workload = wait_for_wl_transitioning(p_client, workload) assert workload.state == \"updating\"", "\"Running\" assert len(pod[\"status\"][\"containerStatuses\"]) == 2 assert \"running\" in pod[\"status\"][\"containerStatuses\"][0][\"state\"] assert", "validate_ingress(p_client, cluster, workloads, host, path, insecure_redirect=False): time.sleep(10) curl_args = \"", "output.strip().decode('utf-8') == \"\" def validate_file_content(pod, content, filename): cmd_get_content = \"/bin/bash", "crtb = client.update( crtb, roleTemplateId=role_template_id, userId=user.id) return crtb def change_member_role_in_project(client,", "in nodes: if node.worker: schedulable_nodes.append(node) return schedulable_nodes def get_role_nodes(cluster, role):", "sleep = 2 try: obj = client.reload(obj) except ApiError as", "workload, pod_count) assert len(pods) == pod_count for pod in pods:", "open(kube_fname, \"w\") file.write(generateKubeConfigOutput.config) file.close() def validate_psp_error_worklaod(p_client, workload, error_message): workload =", "return workload def write_content_to_file(pod, content, filename): cmd_write = \"/bin/bash -c", "is None: print(\"Need to re-evalauate new node list\") node_auto_deleted =", "to re-evalauate new node list\") node_auto_deleted = True retry_count +=", "client.list_cluster().data else: clusters = client.list_cluster(name=CLUSTER_NAME).data assert len(clusters) > 0 cluster", "pv = list[0] while pv.state != \"available\": if time.time() -", "workload = wait_for_wl_to_active(p_client, workload) assert workload.state == \"active\" # For", "node.worker: worker_nodes.append(node) if role == \"etcd\": node_list = etcd_nodes if", "wait_for_condition( client, cluster, lambda x: x.state == intermediate_state, lambda x:", "shell=True, text=True) def run_command_with_stderr(command): try: output = subprocess.check_output(command, shell=True, stderr=subprocess.PIPE)", "\\ \" --internal-address \" + node.private_ip_address cmd += additional_options return", "pspt): p = client.create_project(name=random_name(), clusterId=cluster.id) p = wait_until_available(client, p) assert", "len(target_name_list) for i in range(1, count): if len(target_hit_list) == 0:", "return rancher.Client(url=CATTLE_API_URL, token=token, verify=False) def get_project_client_for_token(project, token): p_url = project.links['self']", "pod_count for pod in pods_result[\"items\"]: assert pod[\"status\"][\"phase\"] == \"Running\" return", "# scheduled wait time if type == \"cronJob\": time.sleep(wait_for_cron_pods) pods", "expectedstatus): workloadStatus = p_client.list_workload(uuid=workload.uuid).data[0].paused assert workloadStatus == expectedstatus def validate_pod_images(expectedimage,", "{0} {1}'.format( kube_fname, cmd) if json_out: command += ' -o", "= client.wait_success(cluster_token) assert cluster_token.state == 'active' return cluster_token def get_cluster_type(client,", "in cluster is active or not wait_for_mcapp_cluster_level_to_active(p_client1, app_id1) if app_id2", "\"image\": TEST_IMAGE}] name = random_test_name(\"default\") workload = p_client.create_workload(name=name, containers=con, namespaceId=ns.id,", "return app def wait_for_mcapp_cluster_level_to_active(client, app_id, timeout=DEFAULT_MULTI_CLUSTER_APP_TIMEOUT): mcapps = client.list_app(name=app_id).data start", "get_client_for_token(token) p = create_project(client, cluster, project_name) c_client = get_cluster_client_for_token(cluster, token)", "= cluster_tokens[0] else: cluster_token = create_custom_host_registration_token(client, cluster) cmd = cluster_token.nodeCommand", "print(\"Need to re-evalauate new node list\") node_auto_deleted = True retry_count", "timeout=DEFAULT_TIMEOUT): start = time.time() time.sleep(2) list = c_client.list_persistent_volume(uuid=pv_object.uuid).data assert len(list)", "pspt) def set_pspt_for_project(project, client, pspt): project.setpodsecuritypolicytemplate(podSecurityPolicyTemplateId=pspt.id) project = wait_until_available(client, project)", "= list[0] while pvc.state != \"bound\": if time.time() - start", "available\") time.sleep(.5) ingress_list = p_client.list_ingress(uuid=ingress.uuid).data assert len(ingress_list) == 1 ingress", "0: if time.time() - start > timeout: raise AssertionError( \"Timed", "= \"wget -qO- \" + cmd result = kubectl_pod_exec(client_pod, wget_cmd)", "cluster, check_intermediate_state=True, intermediate_state=\"provisioning\", nodes_not_in_active_state=[]): if check_intermediate_state: cluster = wait_for_condition( client,", "for x in range(0, numofpods - 1): pod = pods[\"items\"][x]", "\"rancher_env.config\") CLUSTER_NAME_2 = \"\" def random_str(): return 'random-{0}-{1}'.format(random_num(), int(time.time())) def", "\"readOnly\": \"false\", \"type\": \"persistentVolumeClaimVolumeSource\", \"persistentVolumeClaimId\": pvc_name }}] volumeMounts = [{\"readOnly\":", "def validate_nodePort(p_client, workload, cluster): source_port = workload.publicEndpoints[0][\"port\"] nodes = get_schedulable_nodes(cluster)", "\"sangeetha/mytestcontainer\" CLUSTER_NAME = os.environ.get(\"RANCHER_CLUSTER_NAME\", \"\") RANCHER_CLEANUP_CLUSTER = \\ ast.literal_eval(os.environ.get('RANCHER_CLEANUP_CLUSTER', \"True\"))", "workloads): pods = [] for workload in workloads: pod_list =", "[] node_list = [] client = get_admin_client() nodes = client.list_node(clusterId=cluster.id).data", "etcd_count): components.append(\"etcd-\" + str(i)) print(\"components to check - \" +", "result = run_command(command) if json_out: result = json.loads(result) print(result) return", "if check_intermediate_state: cluster = wait_for_condition( client, cluster, lambda x: x.state", "in str(ping_output): ping_validation_pass = True break assert ping_validation_pass is True", "+ \"/name.html\" for pod in test_pods: validate_http_response(curl_cmd, target_name_list, pod) def", "def get_project_client_for_token(project, token): p_url = project.links['self'] + '/schemas' p_client =", "assert pod[\"spec\"][\"containers\"][0][\"image\"] == expectedimage def validate_pods_are_running_by_id(expectedpods, workload, ns_name): for key,", "= cs[\"metadata\"][\"name\"] assert component_name in components components.remove(component_name) assert cs[\"conditions\"][0][\"status\"] ==", "= get_target_names(p_client, workloads) for node in nodes: host_ip = node.externalIpAddress", "validate_dns_entry(pod, host, expected) def validate_dns_entry(pod, host, expected): # requires pod", "\" -- \" + cmd return execute_kubectl_cmd(command, json_out=False, stderr=True) def", "in expected: if expected_value in str(ping_output): ping_validation_pass = True break", "> 0: cluster_token = cluster_tokens[0] else: cluster_token = create_custom_host_registration_token(client, cluster)", "== \"deployment\" or type == \"statefulSet\": assert wl_result[\"status\"][\"readyReplicas\"] == pod_count", "len(ingresses) == 1 wl = ingresses[0] return wl def wait_for_wl_transitioning(client,", "state to get to active\") time.sleep(.5) nodes = client.list_node(clusterId=cluster.id).data node_count", "= wait_for_condition( client, cluster, lambda x: x.state == \"active\", lambda", "exception_list, retry_count) def wait_for_node_status(client, node, state): uuid = node.uuid start", "project.links['self'] + '/schemas' p_client = rancher.Client(url=p_url, token=token, verify=False) return p_client", "public_endpoint[\"hostname\"] if \"path\" in public_endpoint.keys(): url += public_endpoint[\"path\"] time.sleep(10) validate_http_response(url,", "node.public_ip_address + \\ \" --internal-address \" + node.private_ip_address cmd +=", "print(mapp.state) if time.time() - start > timeout: raise AssertionError( \"Timed", "import subprocess import time import requests import ast import paramiko", "= [] client = get_admin_client() nodes = client.list_node(clusterId=cluster.id).data for node", "raise Exception(timeout_message) else: raise Exception('Timeout waiting for condition') ret =", "rancher.Client(url=CATTLE_API_URL, token=token, verify=False) def get_project_client_for_token(project, token): p_url = project.links['self'] +", "target_name_list) def get_target_names(p_client, workloads): pods = [] for workload in", "'tag:Name', 'Values': ['testcustom*', 'teststess*']}] ip_filter = {} ip_list = []", "1 mapp = mcapps[0] while mapp.state != \"active\": if time.time()", "1 wl = workloads[0] while wl.transitioning != state: if time.time()", "= workloads[0] return wl def wait_for_ingress_to_active(client, ingress, timeout=DEFAULT_TIMEOUT): start =", "= time.time() pods = client.list_pod(uuid=pod.uuid).data assert len(pods) == 1 p", "\\t\" + cmd) print(\"result: \\t\" + result) assert result in", "pvc_name }}] volumeMounts = [{\"readOnly\": \"False\", \"type\": \"volumeMount\", \"mountPath\": mount_path,", "validate_http_response(url + \"/name.html\", target_name_list) def validate_nodePort(p_client, workload, cluster): source_port =", "exception_list=nodes_not_in_active_state) return cluster def wait_until_available(client, obj, timeout=DEFAULT_TIMEOUT): start = time.time()", "== 1 pvc = list[0] return pvc def create_wl_with_nfs(p_client, ns_id,", "= time.time() fqdn_available = False url = None while not", "nodes = client.list_node(clusterId=cluster.id).data filters = [ {'Name': 'tag:Name', 'Values': ['testcustom*',", "\"controller-manager\"] for i in range(0, etcd_count): components.append(\"etcd-\" + str(i)) print(\"components", "' + \\ inspect.getsource(check_function) if fail_handler: exceptionMsg = exceptionMsg +", "get_endpoint_url_for_workload(p_client, workload) target_name_list = get_target_names(p_client, [workload]) wait_until_lb_is_active(url) validate_http_response(url + \"/name.html\",", "'active' return cluster_token def get_cluster_type(client, cluster): cluster_configs = [ \"amazonElasticContainerServiceConfig\",", "+ path validate_http_response(cmd, target_name_list) def validate_ingress_using_endpoint(p_client, ingress, workloads, timeout=300): target_name_list", "def random_test_name(name=\"test\"): return name + \"-\" + str(random_int(10000, 99999)) def", "error_message in workload.transitioningMessage def validate_workload(p_client, workload, type, ns_name, pod_count=1, wait_for_cron_pods=60):", "cluster, lambda x: x.state == intermediate_state, lambda x: 'State is:", "role == \"control\": node_list = control_nodes if role == \"worker\":", "def random_name(): return \"test\" + \"-\" + str(random_int(10000, 99999)) def", "\"test\" + str(random_int(10000, 99999)) + \".com\" path = \"/name.html\" rule", "pods = execute_kubectl_cmd(get_pods) curpodnames = [] for pod in pods[\"items\"]:", "pod_count for key, value in workload.workloadLabels.items(): label = key +", "token=token, verify=False) return p_client def get_cluster_client_for_token(cluster, token): c_url = cluster.links['self']", "+ \"-\" + str(random_int(10000, 99999)) def get_admin_client(): return rancher.Client(url=CATTLE_API_URL, token=ADMIN_TOKEN,", "to active\") time.sleep(.5) nss = client.list_namespace(uuid=ns.uuid).data assert len(nss) == 1", "= client.reload(resource) return resource def wait_for(callback, timeout=DEFAULT_TIMEOUT, timeout_message=None): start =", "def create_config_file(env_details): file = open(env_file, \"w\") file.write(env_details) file.close() def validate_hostPort(p_client,", "+ uuid) return None return node def wait_for_node_to_be_deleted(client, node, timeout=300):", "timeout=DEFAULT_TIMEOUT, state=\"error\"): start = time.time() workloads = client.list_workload(uuid=workload.uuid).data assert len(workloads)", "p_client1) if app_id2 != \"\": validate_mcapp_cluster(app_id2, p_client2) # verify app", "node.controlPlane: control_nodes.append(node) if node.worker: worker_nodes.append(node) if role == \"etcd\": node_list", "if app_id2 != \"\": validate_mcapp_cluster(app_id2, p_client2) # verify app in", "intermediate_state=intermediate_state, nodes_not_in_active_state=nodes_not_in_active_state) # Create Daemon set workload and have an", "ns = client.reload(ns) assert ns.state == 'active' return ns def", "images\") time.sleep(.5) pods = execute_kubectl_cmd(get_pods) pod = pods[\"items\"][x] podimage =", "k8s_version = execute_kubectl_cmd(\"version\") kubectl_k8s_version = k8s_version[\"serverVersion\"][\"gitVersion\"] assert kubectl_k8s_version == expected_k8s_version,", "0): cluster_type = get_cluster_type(client, cluster) print(cluster_type) if get_cluster_type(client, cluster) in", "Exception(timeout_message) else: raise Exception('Timeout waiting for condition') ret = callback()", "in curpodnames def validate_workload_image(client, workload, expectedImage, ns): workload = client.list_workload(uuid=workload.uuid).data[0]", "time.time() sleep = 0.01 while True: time.sleep(sleep) sleep *= 2", "\"'\\n\" env_details += \"env.ADMIN_TOKEN='\" + ADMIN_TOKEN + \"'\\n\" env_details +=", "app = mcapp[0] return app def wait_for_mcapp_cluster_level_to_active(client, app_id, timeout=DEFAULT_MULTI_CLUSTER_APP_TIMEOUT): mcapps", "timeout=600): fqdn_available = False url = \"\" start = time.time()", "from lib.aws import AmazonWebServices DEFAULT_TIMEOUT = 120 DEFAULT_MULTI_CLUSTER_APP_TIMEOUT = 300", "become active') return def check_for_no_access(url): try: requests.get(url) return False except", "assert cluster.state == \"active\" wait_for_nodes_to_become_active(client, cluster, exception_list=nodes_not_in_active_state) return cluster def", "time.time() while not fqdn_available: if time.time() - start > timeout:", "len(nodes) if node_count == 1: node_status = nodes[0].state else: print(\"Node", "Exception('Timed out waiting for LB to become active') return def", "skipIngresscheck: host = \"test\" + str(random_int(10000, 99999)) + \".com\" path", "node_status != state: if time.time() - start > MACHINE_TIMEOUT: raise", "[] for pod in pods: target_name_list.append(pod.name) print(\"target name list:\" +", "def wait_for_pods_in_workload(p_client, workload, pod_count, timeout=DEFAULT_TIMEOUT): start = time.time() pods =", "project_name is None: project_name = random_name() p = client.create_project(name=project_name, clusterId=cluster.id)", "\"vol1\", \"persistentVolumeClaim\": { \"readOnly\": \"false\", \"type\": \"persistentVolumeClaimVolumeSource\", \"persistentVolumeClaimId\": pvc_name }}]", "if json_out: result = json.loads(result) print(result) return result def run_command(command):", "if pod.nodeId == node.id: target_name_list.append(pod.name) break host_ip = node.externalIpAddress curl_cmd", "def validate_http_response(cmd, target_name_list, client_pod=None): target_hit_list = target_name_list[:] count = 5", "mount_path, \"subPath\": sub_path, \"name\": \"vol1\" }] con = [{\"name\": \"test1\",", "wl = workloads[0] while wl.state != \"active\": if time.time() -", "while ret is None or ret is False: time.sleep(.5) if", "1 wl = ingresses[0] return wl def wait_for_wl_transitioning(client, workload, timeout=DEFAULT_TIMEOUT,", "ingress) validate_ingress(p_client, cluster, [workload], host, path) return cluster def check_cluster_version(cluster,", "pod_ip in str(response) and \" 0% packet loss\" in str(response)", "len(ingress_list) == 1 ingress = ingress_list[0] if hasattr(ingress, 'publicEndpoints'): for", "url = url + workload.publicEndpoints[0][\"addresses\"][0] + \":\" url = url", "return cluster_config return \"Imported\" def delete_cluster(client, cluster): nodes = client.list_node(clusterId=cluster.id).data", "\"etcd\"))) project, ns = create_project_and_ns(ADMIN_TOKEN, cluster) p_client = get_project_client_for_token(project, ADMIN_TOKEN)", "== intermediate_state, lambda x: 'State is: ' + x.state, timeout=MACHINE_TIMEOUT)", "cluster, [workload], host, path) return cluster def check_cluster_version(cluster, version): cluster_k8s_version", "import os import random import subprocess import time import requests", "workload) assert workload.state == \"updating\" assert workload.transitioning == \"error\" print(workload.transitioningMessage)", "ssh.exec_command(cmd) response = stdout.readlines() return response def wait_for_ns_to_become_active(client, ns, timeout=DEFAULT_TIMEOUT):", "None: print(\"Need to re-evalauate new node list\") node_auto_deleted = True", "out waiting for state to get to active\") time.sleep(.5) multiclusterapps", "time.sleep(2) nss = client.list_namespace(uuid=ns.uuid).data assert len(nss) == 1 ns =", "def validate_workload_with_sidekicks(p_client, workload, type, ns_name, pod_count=1): workload = wait_for_wl_to_active(p_client, workload)", "= nodes[0].state else: print(\"Node does not exist anymore -\" +", "\"http://\" + cluster_ip + \"/name.html\" for pod in test_pods: validate_http_response(curl_cmd,", "= client.list_node(uuid=uuid).data node_count = len(nodes) while node_count != 0: if", "> 2: sleep = 2 try: obj = client.reload(obj) except", "\" --internal-address \" + node.private_ip_address cmd += additional_options return cmd", "in css: component_name = cs[\"metadata\"][\"name\"] assert component_name in components components.remove(component_name)", "{0}'\".format(filename, content) output = kubectl_pod_exec(pod, cmd_write) assert output.strip().decode('utf-8') == \"\"", "start = time.time() resource = client.reload(resource) while not check_function(resource): if", "assert len(list) == 1 pvc = list[0] return pvc def", "not exist anymore -\" + uuid) return None while node_status", "== 1 mapp = multiclusterapps[0] return mapp def validate_mcapp_cluster(app_id, p_client):", "ret is None or ret is False: time.sleep(.5) if time.time()", "get created after # scheduled wait time if type ==", "end): return random.randint(start, end) def random_test_name(name=\"test\"): return name + \"-\"", "= time.time() time.sleep(2) list = c_client.list_persistent_volume(uuid=pv_object.uuid).data assert len(list) == 1", "resource.baseType + \\ ' to satisfy condition: ' + \\", "in target_name_list if result in target_hit_list: target_hit_list.remove(result) print(\"After removing all,", "\" --address \" + node.public_ip_address + \\ \" --internal-address \"", "if project_name is None: project_name = random_name() p = client.create_project(name=project_name,", "validate_cluster_state( client, cluster, check_intermediate_state=check_intermediate_state, intermediate_state=intermediate_state, nodes_not_in_active_state=nodes_not_in_active_state) # Create Daemon set", "token=token, verify=False) return c_client def wait_state(client, obj, state, timeout=DEFAULT_TIMEOUT): wait_for(lambda:", "cluster): cluster_token = client.create_cluster_registration_token( clusterId=cluster.id) cluster_token = client.wait_success(cluster_token) assert cluster_token.state", "= pod[\"spec\"][\"containers\"][0][\"image\"] while podimage != expectedimage: if time.time() - start", "True assert \" 0% packet loss\" in str(ping_output) dig_cmd =", "client.list_app(name=app_id).data start = time.time() assert len(mcapps) == 1 mapp =", "client = get_admin_client() if CLUSTER_NAME == \"\": clusters = client.list_cluster().data", "in pod[\"status\"][\"containerStatuses\"][1][\"state\"] def validate_workload_paused(p_client, workload, expectedstatus): workloadStatus = p_client.list_workload(uuid=workload.uuid).data[0].paused assert", "= list[0] return pvc def create_wl_with_nfs(p_client, ns_id, pvc_name, wl_name, mount_path,", "--insecure \" if len(host) > 0: curl_args += \" --header", "def check_for_no_access(url): try: requests.get(url) return False except requests.ConnectionError: print(\"Connection Error", "DEFAULT_MULTI_CLUSTER_APP_TIMEOUT = 300 CATTLE_TEST_URL = os.environ.get('CATTLE_TEST_URL', \"http://localhost:80\") ADMIN_TOKEN = os.environ.get('ADMIN_TOKEN',", "\" -n \" + pod.namespaceId + \" -- \" +", "css: component_name = cs[\"metadata\"][\"name\"] assert component_name in components components.remove(component_name) assert", "\"image\": TEST_IMAGE, \"volumeMounts\": volumeMounts }] if is_daemonSet: workload = p_client.create_workload(name=wl_name,", "> 0 cluster = clusters[0] return client, cluster def validate_cluster_state(client,", "and have an Ingress with Workload # rule pointing to", "= {} ip_list = [] ip_filter['Name'] = \\ 'network-interface.addresses.association.public-ip' ip_filter['Values']", "node_count != expected_node_count: if time.time() - start > timeout: raise", "path) return cluster def check_cluster_version(cluster, version): cluster_k8s_version = \\ cluster.appliedSpec[\"rancherKubernetesEngineConfig\"][", "-l\" + label + \" -n \" + ns_name pods_result", "= mcapps[0] while mapp.state != \"active\": if time.time() - start", "Expected: \" + version expected_k8s_version = version[:version.find(\"-\")] k8s_version = execute_kubectl_cmd(\"version\")", "clusterId=cluster.id) p = wait_until_available(client, p) assert p.state == 'active' return", "wait_for(callback, timeout=DEFAULT_TIMEOUT, timeout_message=None): start = time.time() ret = callback() while", "start = time.time() ret = callback() while ret is None", "import inspect import json import os import random import subprocess", "= client.list_ingress(uuid=ingress.uuid).data assert len(ingresses) == 1 wl = ingresses[0] return", "pod[\"spec\"][\"containers\"][0][\"image\"] def wait_for_pods_in_workload(p_client, workload, pod_count, timeout=DEFAULT_TIMEOUT): start = time.time() pods", "callback() while ret is None or ret is False: time.sleep(.5)", "target_name_list = [] for pod in pods: print(pod.nodeId + \"", "+ host + \"'\" nodes = get_schedulable_nodes(cluster) target_name_list = get_target_names(p_client,", "+ ns_name pods = execute_kubectl_cmd(get_pods) curpodnames = [] for pod", "if time.time() - start > timeout: exceptionMsg = 'Timeout waiting", "result = result.rstrip() print(\"cmd: \\t\" + cmd) print(\"result: \\t\" +", "daemonSetConfig={}) else: workload = p_client.create_workload(name=wl_name, containers=con, namespaceId=ns_id, volumes=volumes) return workload", "state: if time.time() - start > MACHINE_TIMEOUT: raise AssertionError( \"Timed", "is None: project_name = random_name() p = client.create_project(name=project_name, clusterId=cluster.id) time.sleep(5)", "True def validate_http_response(cmd, target_name_list, client_pod=None): target_hit_list = target_name_list[:] count =", "{}' \\ ' seconds'.format(obj.type, obj.id, delta) raise Exception(msg) def delete_node(aws_nodes):", "project_name = random_name() p = client.create_project(name=project_name, clusterId=cluster.id) time.sleep(5) p =", "None while node_status != state: if time.time() - start >", "time.sleep(.5) list = p_client.list_persistent_volume_claim(uuid=pvc_object.uuid).data assert len(list) == 1 pvc =", "len(workloads) == 1 wl = workloads[0] return wl def wait_for_ingress_to_active(client,", "p = client.create_project(name=project_name, clusterId=cluster.id) time.sleep(5) p = wait_until_available(client, p) assert", "def check_connectivity_between_pods(pod1, pod2, allow_connectivity=True): pod_ip = pod2.status.podIp cmd = \"ping", "time.time() ret = callback() while ret is None or ret", "[ {'Name': 'tag:Name', 'Values': ['testcustom*', 'teststess*']}] ip_filter = {} ip_list", "role in allowed_roles cmd += \" --\" + role additional_options", "print(generateKubeConfigOutput.config) file = open(kube_fname, \"w\") file.write(generateKubeConfigOutput.config) file.close() def validate_psp_error_worklaod(p_client, workload,", "= get_endpoint_url_for_workload(p_client, workload) target_name_list = get_target_names(p_client, [workload]) wait_until_lb_is_active(url) validate_http_response(url +", "in cluster: return cluster_config return \"Imported\" def delete_cluster(client, cluster): nodes", "+ cmd) print(\"result: \\t\" + result) assert result in target_name_list", "random_test_name(name=\"test\"): return name + \"-\" + str(random_int(10000, 99999)) def get_admin_client():", "pod_count if type == \"cronJob\": assert len(wl_result[\"status\"][\"active\"]) >= pod_count return", "curl_args = \" -L --insecure \" if len(host) > 0:", "cluster is active or not wait_for_mcapp_cluster_level_to_active(p_client1, app_id1) if app_id2 !=", "\" + pod.namespaceId + \" -- \" + cmd return", "x.state == intermediate_state, lambda x: 'State is: ' + x.state,", "def cluster_cleanup(client, cluster, aws_nodes=None): if RANCHER_CLEANUP_CLUSTER: client.delete(cluster) if aws_nodes is", "return response def wait_for_ns_to_become_active(client, ns, timeout=DEFAULT_TIMEOUT): start = time.time() time.sleep(2)", "= \"http://\" url = url + workload.publicEndpoints[0][\"addresses\"][0] + \":\" url", "seconds'.format(obj.type, obj.id, delta) raise Exception(msg) def delete_node(aws_nodes): for node in", "1 pvc = list[0] while pvc.state != \"bound\": if time.time()", "for pod in wl1_pods: for o_pod in wl2_pods: check_connectivity_between_pods(pod, o_pod,", "get_admin_client_and_cluster_mcapp(): clusters = [] client = get_admin_client() if CLUSTER_NAME ==", "workload.publicEndpoints[0][\"addresses\"][0] + \":\" url = url + str(workload.publicEndpoints[0][\"port\"]) fqdn_available =", "c_client def up(cluster, token): c_url = cluster.links['self'] + '/schemas' c_client", "pod in pods: target_name_list.append(pod.name) print(\"target name list:\" + str(target_name_list)) return", "= worker_nodes return node_list def validate_ingress(p_client, cluster, workloads, host, path,", "+ str(components)) for cs in css: component_name = cs[\"metadata\"][\"name\"] assert", "-n \" + ns_name) assert wl_result[\"status\"][\"readyReplicas\"] == pod_count for key,", "wait_for_pods_in_workload(p_client, workload, pod_count, timeout=DEFAULT_TIMEOUT): start = time.time() pods = p_client.list_pod(workloadId=workload.id).data", "\" \" + workload.name + \" -n \" + ns_name)", "cluster): pods = p_client.list_pod(workloadId=workload.id).data nodes = get_schedulable_nodes(cluster) for node in", "def get_admin_client_and_cluster_mcapp(): clusters = [] client = get_admin_client() if CLUSTER_NAME", "Exception(exceptionMsg) time.sleep(.5) resource = client.reload(resource) return resource def wait_for(callback, timeout=DEFAULT_TIMEOUT,", "= create_custom_host_registration_token(client, cluster) cmd = cluster_token.nodeCommand for role in roles:", "pod[\"spec\"][\"containers\"][0][\"image\"] while podimage != expectedimage: if time.time() - start >", "user, project, role_template_id): prtb = client.create_project_role_template_binding( projectId=project.id, roleTemplateId=role_template_id, subjectKind=\"User\", userId=user.id)", "create_kubeconfig(cluster): generateKubeConfigOutput = cluster.generateKubeconfig() print(generateKubeConfigOutput.config) file = open(kube_fname, \"w\") file.write(generateKubeConfigOutput.config)", "+ ns_name pods = execute_kubectl_cmd(get_pods) for x in range(0, numofpods", "0: break if client_pod is None: curl_cmd = \"curl \"", "return ns def wait_for_pod_images(p_client, workload, ns_name, expectedimage, numofpods, timeout=DEFAULT_TIMEOUT): start", "\\ str(source_port) + \"/name.html\" validate_http_response(curl_cmd, target_name_list) def validate_lb(p_client, workload): url", "= \" \" if (insecure_redirect): curl_args = \" -L --insecure", "for state to get to active\") time.sleep(5) nodes = client.list_node(uuid=uuid).data", "\" if len(host) > 0: curl_args += \" --header 'Host:", "not exist anymore -\" + uuid) return None return node", "nodes getting auto deleted when they are part of #", "[{\"readOnly\": \"False\", \"type\": \"volumeMount\", \"mountPath\": mount_path, \"subPath\": sub_path, \"name\": \"vol1\"", "}] if is_daemonSet: workload = p_client.create_workload(name=wl_name, containers=con, namespaceId=ns_id, volumes=volumes, daemonSetConfig={})", "allowed_roles cmd += \" --\" + role additional_options = \"", "print(\"Connection Error - \" + url) return True def validate_http_response(cmd,", "time.time() pods = p_client.list_pod(workloadId=workload.id).data while len(pods) != pod_count: if time.time()", "pods = p_client.list_pod(workloadId=workload.id).data target_name_list = [] for pod in pods:", "client, pspt) def set_pspt_for_project(project, client, pspt): project.setpodsecuritypolicytemplate(podSecurityPolicyTemplateId=pspt.id) project = wait_until_available(client,", "403: raise e else: return obj delta = time.time() -", "None: delete_node(aws_nodes) else: env_details = \"env.CATTLE_TEST_URL='\" + CATTLE_TEST_URL + \"'\\n\"", "pods = [] for workload in workloads: pod_list = p_client.list_pod(workloadId=workload.id).data", "prtb, role_template_id): prtb = client.update( prtb, roleTemplateId=role_template_id, userId=user.id) return prtb", "cluster, pspt): p = client.create_project(name=random_name(), clusterId=cluster.id) p = wait_until_available(client, p)", "workload) assert workload.state == \"active\" pods = wait_for_pods_in_workload(p_client, workload, pod_count)", "namespaceId=ns.id, daemonSetConfig={}) validate_workload(p_client, workload, \"daemonSet\", ns.name, len(get_schedulable_nodes(cluster))) if not skipIngresscheck:", "retry_count) def wait_for_node_status(client, node, state): uuid = node.uuid start =", "type, ns_name, pod_count=1, wait_for_cron_pods=60): workload = wait_for_wl_to_active(p_client, workload) assert workload.state", "\"path\" in public_endpoint.keys(): url += public_endpoint[\"path\"] time.sleep(10) validate_http_response(url, target_name_list) def", "url = None while not fqdn_available: if time.time() - start", "+ role additional_options = \" --address \" + node.public_ip_address +", "= ingresses[0] return wl def wait_for_wl_transitioning(client, workload, timeout=DEFAULT_TIMEOUT, state=\"error\"): start", "def wait_for_wl_transitioning(client, workload, timeout=DEFAULT_TIMEOUT, state=\"error\"): start = time.time() workloads =", "ingress.publicEndpoints: if public_endpoint[\"hostname\"].startswith(ingress.name): fqdn_available = True url = \\ public_endpoint[\"protocol\"].lower()", "True retry_count += 1 print(\"Retry Count:\" + str(retry_count)) if node_auto_deleted", "def wait_for_mcapp_to_active(client, multiClusterApp, timeout=DEFAULT_MULTI_CLUSTER_APP_TIMEOUT): print(\"\\nuuid:\") print(multiClusterApp.uuid) time.sleep(5) mcapps = client.list_multiClusterApp(uuid=multiClusterApp.uuid,", "1 -W 1 {0}'.format(host) ping_output = kubectl_pod_exec(pod, cmd) ping_validation_pass =", "project, role_template_id): prtb = client.create_project_role_template_binding( projectId=project.id, roleTemplateId=role_template_id, subjectKind=\"User\", userId=user.id) return", "+ host_ip + path validate_http_response(cmd, target_name_list) def validate_ingress_using_endpoint(p_client, ingress, workloads,", "ApiError as e: if e.error.status != 403: raise e else:", "0: curl_args += \" --header 'Host: \" + host +", "[\"Imported\", \"Custom\"]: nodes = client.list_node(clusterId=cluster.id).data filters = [ {'Name': 'tag:Name',", "\\ public_endpoint[\"protocol\"].lower() + \"://\" + \\ public_endpoint[\"hostname\"] if \"path\" in", "get to active\") time.sleep(.5) apps = client.list_app(name=app_id).data assert len(apps) ==", "value in workload.workloadLabels.items(): label = key + \"=\" + value", "wl.transitioning != state: if time.time() - start > timeout: raise", "= [] for pod in pods: target_name_list.append(pod.name) print(\"target name list:\"", "\".com\" path = \"/name.html\" rule = {\"host\": host, \"paths\": [{\"workloadIds\":", "type == \"statefulSet\": assert wl_result[\"status\"][\"readyReplicas\"] == pod_count if type ==", "in pods: for o_pod in pods: check_connectivity_between_pods(pod, o_pod) def check_connectivity_between_pods(pod1,", "ip_filter['Values'] = ip_list filters.append(ip_filter) for node in nodes: ip_list.append(node.externalIpAddress) assert", "check_intermediate_state=True, skipIngresscheck=True, nodes_not_in_active_state=[], k8s_version=\"\"): cluster = validate_cluster_state( client, cluster, check_intermediate_state=check_intermediate_state,", "pod in pods_result[\"items\"]: assert pod[\"status\"][\"phase\"] == \"Running\" assert len(pod[\"status\"][\"containerStatuses\"]) ==", "# verify app in cluster is active or not wait_for_mcapp_cluster_level_to_active(p_client1,", "pvc = list[0] while pvc.state != \"bound\": if time.time() -", "== 0 def validate_cluster(client, cluster, intermediate_state=\"provisioning\", check_intermediate_state=True, skipIngresscheck=True, nodes_not_in_active_state=[], k8s_version=\"\"):", "uuid) return None while node_status != state: if time.time() -", "p_client def get_cluster_client_for_token(cluster, token): c_url = cluster.links['self'] + '/schemas' c_client", "cluster, lambda x: x.state == \"active\", lambda x: 'State is:", "get to active\") time.sleep(.5) workloads = client.list_workload(uuid=workload.uuid).data assert len(workloads) ==", "time.sleep(.5) if time.time() - start > timeout: if timeout_message: raise", "-\" + uuid) return None return node def wait_for_node_to_be_deleted(client, node,", "\" + workload.name + \" -n \" + ns_name) assert", "is_daemonSet: workload = p_client.create_workload(name=wl_name, containers=con, namespaceId=ns_id, volumes=volumes, daemonSetConfig={}) else: workload", "node_count = len(nodes) if node_count == 1: node_status = nodes[0].state", "x: x.state == intermediate_state, lambda x: 'State is: ' +", "type == \"cronJob\": assert len(wl_result[\"status\"][\"active\"]) >= pod_count return for key,", "target_hit_list = target_name_list[:] count = 5 * len(target_name_list) for i", "\"http://\" url = url + workload.publicEndpoints[0][\"addresses\"][0] + \":\" url =", "assert len(ingress_list) == 1 ingress = ingress_list[0] if hasattr(ingress, 'publicEndpoints'):", "allow_connectivity=True): pod_ip = pod2.status.podIp cmd = \"ping -c 1 -W", "nodes = client.list_node(clusterId=cluster.id).data node_count = len(nodes) while node_count != expected_node_count:", "list = p_client.list_persistent_volume_claim(uuid=pvc_object.uuid).data assert len(list) == 1 pvc = list[0]", "time.time() - start > timeout: raise AssertionError( \"Timed out waiting", "print(node.public_ip_address) AmazonWebServices().delete_nodes(aws_nodes) def check_connectivity_between_workloads(p_client1, workload1, p_client2, workload2, allow_connectivity=True): wl1_pods =", "validate_http_response(url, target_name_list) def get_target_names(p_client, workloads): pods = [] for workload", "result = kubectl_pod_exec(client_pod, wget_cmd) result = result.decode() result = result.rstrip()", "- start > MACHINE_TIMEOUT: raise AssertionError( \"Timed out waiting for", "app_id, timeout=DEFAULT_MULTI_CLUSTER_APP_TIMEOUT): mcapps = client.list_app(name=app_id).data start = time.time() assert len(mcapps)", "= random_name() p = client.create_project(name=project_name, clusterId=cluster.id) time.sleep(5) p = wait_until_available(client,", "node.uuid start = time.time() nodes = client.list_node(uuid=uuid).data node_count = len(nodes)", "= time.time() resource = client.reload(resource) while not check_function(resource): if time.time()", "for node in nodes: host_ip = node.externalIpAddress cmd = curl_args", "1 p = pods[0] return p def get_schedulable_nodes(cluster): client =", "cluster, project_name=None): if project_name is None: project_name = random_name() p", "None: ns_name = random_name() ns = client.create_namespace(name=ns_name, clusterId=cluster.id, projectId=project.id) wait_for_ns_to_become_active(client,", "= 5 * len(target_name_list) for i in range(1, count): if", "command = 'kubectl --kubeconfig {0} {1}'.format( kube_fname, cmd) if json_out:", "validate_http_response(cmd, target_name_list, client_pod=None): target_hit_list = target_name_list[:] count = 5 *", "node.id) if pod.nodeId == node.id: target_name_list.append(pod.name) break host_ip = node.externalIpAddress", "assert cs[\"conditions\"][0][\"status\"] == \"True\" assert cs[\"conditions\"][0][\"type\"] == \"Healthy\" assert len(components)", "o_pod in pods: check_connectivity_between_pods(pod, o_pod) def check_connectivity_between_pods(pod1, pod2, allow_connectivity=True): pod_ip", "if len(nodes) > 0: if nodes[0].nodeTemplateId is None: return \"Custom\"", "cluster, exception_list=[], retry_count=0): nodes = client.list_node(clusterId=cluster.id).data node_auto_deleted = False for", "1 ns = nss[0] return ns def wait_for_pod_images(p_client, workload, ns_name,", "waiting for LB to become active') return def check_for_no_access(url): try:", "create_project(client, cluster, project_name=None): if project_name is None: project_name = random_name()", "pod, timeout=DEFAULT_TIMEOUT): start = time.time() pods = client.list_pod(uuid=pod.uuid).data assert len(pods)", "client.delete(cluster) if aws_nodes is not None: delete_node(aws_nodes) else: env_details =", "else: assert pod_ip in str(response) and \" 100% packet loss\"", "0 url = \"http://\" url = url + workload.publicEndpoints[0][\"addresses\"][0] +", "= time.time() nodes = client.list_node(uuid=uuid).data node_count = len(nodes) while node_count", "mapp def validate_mcapp_cluster(app_id, p_client): mcapp = p_client.list_app(name=app_id).data assert len(mcapp) ==", "--kubeconfig {0} {1}'.format( kube_fname, cmd) if json_out: command += '", "= callback() while ret is None or ret is False:", "node.externalIpAddress cmd = curl_args + \" http://\" + host_ip +", "= kubectl_pod_exec(pod, cmd_get_content) assert output.strip().decode('utf-8') == content def wait_for_mcapp_to_active(client, multiClusterApp,", "return schedulable_nodes def get_role_nodes(cluster, role): etcd_nodes = [] control_nodes =", "while True: time.sleep(sleep) sleep *= 2 if sleep > 2:", "assert result in target_name_list if result in target_hit_list: target_hit_list.remove(result) print(\"After", "pods: target_name_list.append(pod[\"name\"]) curl_cmd = \"http://\" + cluster_ip + \"/name.html\" for", "= client.update( prtb, roleTemplateId=role_template_id, userId=user.id) return prtb def create_kubeconfig(cluster): generateKubeConfigOutput", "roles, node): allowed_roles = [\"etcd\", \"worker\", \"controlplane\"] cluster_tokens = client.list_cluster_registration_token(", "+ version expected_k8s_version = version[:version.find(\"-\")] k8s_version = execute_kubectl_cmd(\"version\") kubectl_k8s_version =", "len(get_schedulable_nodes(cluster))) if not skipIngresscheck: host = \"test\" + str(random_int(10000, 99999))", "role additional_options = \" --address \" + node.public_ip_address + \\", "they are part of # nodepools if node_count == 1:", "Workload # rule pointing to this daemonset create_kubeconfig(cluster) if k8s_version", "= [ {'Name': 'tag:Name', 'Values': ['testcustom*', 'teststess*']}] ip_filter = {}", "clusters = [] client = get_admin_client() if CLUSTER_NAME == \"\"", "str(response) def kubectl_pod_exec(pod, cmd): command = \"exec \" + pod.name", "client, pspt): project.setpodsecuritypolicytemplate(podSecurityPolicyTemplateId=pspt.id) project = wait_until_available(client, project) assert project.state ==", "out waiting for state to get to bound\") time.sleep(.5) list", "delta = time.time() - start if delta > timeout: msg", "control_nodes.append(node) if node.worker: worker_nodes.append(node) if role == \"etcd\": node_list =", "p_client.list_pod(workloadId=workload.id).data nodes = get_schedulable_nodes(cluster) for node in nodes: target_name_list =", "+ \"'\\n\" env_details += \"env.CLUSTER_NAME='\" + cluster.name + \"'\\n\" create_config_file(env_details)", "= wait_for_wl_transitioning(p_client, workload) assert workload.state == \"updating\" assert workload.transitioning ==", "to active\") time.sleep(.5) nodes = client.list_node(clusterId=cluster.id).data node_count = len(nodes) def", "wait time if type == \"cronJob\": time.sleep(wait_for_cron_pods) pods = p_client.list_pod(workloadId=workload.id).data", "+ \" \" + workload.name + \" -n \" +", "Create Daemon set workload and have an Ingress with Workload", "= 'ping -c 1 -W 1 {0}'.format(host) ping_output = kubectl_pod_exec(pod,", "cmd_write = \"/bin/bash -c 'echo {1} > {0}'\".format(filename, content) output", "target_name_list) def validate_ingress_using_endpoint(p_client, ingress, workloads, timeout=300): target_name_list = get_target_names(p_client, workloads)", "in expected: assert expected_value in str(dig_output) def wait_for_nodes_to_become_active(client, cluster, exception_list=[],", "= get_cluster_type(client, cluster) print(cluster_type) if get_cluster_type(client, cluster) in [\"Imported\", \"Custom\"]:", "assert len(pods) == 1 p = pods[0] while p.state !=", "fail_handler=None, timeout=DEFAULT_TIMEOUT): start = time.time() resource = client.reload(resource) while not", "project_name) c_client = get_cluster_client_for_token(cluster, token) ns = create_ns(c_client, cluster, p,", "' to satisfy condition: ' + \\ inspect.getsource(check_function) if fail_handler:", "- \" + str(components)) for cs in css: component_name =", "== intermediate_state cluster = wait_for_condition( client, cluster, lambda x: x.state", "\"paths\": [{\"workloadIds\": [workload.id], \"targetPort\": \"80\"}]} ingress = p_client.create_ingress(name=name, namespaceId=ns.id, rules=[rule])", "str(source_port) + \"/name.html\" validate_http_response(curl_cmd, target_name_list) def validate_clusterIp(p_client, workload, cluster_ip, test_pods):", "stderr=False): command = 'kubectl --kubeconfig {0} {1}'.format( kube_fname, cmd) if", "an Ingress with Workload # rule pointing to this daemonset", "validate_ingress(p_client, cluster, [workload], host, path) return cluster def check_cluster_version(cluster, version):", "CATTLE_TEST_URL = os.environ.get('CATTLE_TEST_URL', \"http://localhost:80\") ADMIN_TOKEN = os.environ.get('ADMIN_TOKEN', \"None\") CATTLE_API_URL =", "for state to get to active\") time.sleep(.5) pods = client.list_pod(uuid=pod.uuid).data", "type == \"cronJob\": time.sleep(wait_for_cron_pods) pods = p_client.list_pod(workloadId=workload.id).data assert len(pods) ==", "-n \" + ns_name pods_result = execute_kubectl_cmd(get_pods) assert len(pods_result[\"items\"]) ==", "return client.reload(obj) def wait_for_condition(client, resource, check_function, fail_handler=None, timeout=DEFAULT_TIMEOUT): start =", "[] client = get_admin_client() nodes = client.list_node(clusterId=cluster.id).data for node in", "for cs in css: component_name = cs[\"metadata\"][\"name\"] assert component_name in", "{'Name': 'tag:Name', 'Values': ['testcustom*', 'teststess*']}] ip_filter = {} ip_list =", "up(cluster, token): c_url = cluster.links['self'] + '/schemas' c_client = rancher.Client(url=c_url,", "get to active\") time.sleep(.5) multiclusterapps = client.list_multiClusterApp(uuid=multiClusterApp.uuid, name=multiClusterApp.name).data assert len(multiclusterapps)", "= p_client.list_pod(workloadId=workload.id).data target_name_list = [] for pod in pods: target_name_list.append(pod.name)", "start = time.time() pods = client.list_pod(uuid=pod.uuid).data assert len(pods) == 1", "> timeout: raise AssertionError( \"Timed out waiting for state to", "1 workload = workload_list[0] if hasattr(workload, 'publicEndpoints'): assert len(workload.publicEndpoints) >", "cluster_config in cluster: return cluster_config return \"Imported\" def delete_cluster(client, cluster):", "curpodnames def validate_workload_image(client, workload, expectedImage, ns): workload = client.list_workload(uuid=workload.uuid).data[0] assert", "from AWS for Imported and Custom Cluster if (len(nodes) >", "def wait_until_available(client, obj, timeout=DEFAULT_TIMEOUT): start = time.time() sleep = 0.01", "return prtb def create_kubeconfig(cluster): generateKubeConfigOutput = cluster.generateKubeconfig() print(generateKubeConfigOutput.config) file =", "wait_for_wl_transitioning(p_client, workload) assert workload.state == \"updating\" assert workload.transitioning == \"error\"", "client.list_ingress(uuid=ingress.uuid).data assert len(ingresses) == 1 wl = ingresses[0] while wl.state", "\" + ns_name pods = execute_kubectl_cmd(get_pods) for x in range(0,", "userId=user.id) return crtb def assign_members_to_project(client, user, project, role_template_id): prtb =", "= workloads[0] while wl.transitioning != state: if time.time() - start", "-W 1 {0}'.format(host) ping_output = kubectl_pod_exec(pod, cmd) ping_validation_pass = False", "available\") time.sleep(.5) workload_list = p_client.list_workload(uuid=workload.uuid).data assert len(workload_list) == 1 workload", "exceptionMsg = 'Timeout waiting for ' + resource.baseType + \\", "expectedpod in expectedpods[\"items\"]: assert expectedpod[\"metadata\"][\"name\"] in curpodnames def validate_workload_image(client, workload,", "wl = workloads[0] while wl.transitioning != state: if time.time() -", "prtb def create_kubeconfig(cluster): generateKubeConfigOutput = cluster.generateKubeconfig() print(generateKubeConfigOutput.config) file = open(kube_fname,", "--header 'Host: \" + host + \"'\" nodes = get_schedulable_nodes(cluster)", "kubectl_pod_exec(pod, cmd) ping_validation_pass = False for expected_value in expected: if", "in wl2_pods: check_connectivity_between_pods(pod, o_pod, allow_connectivity) def check_connectivity_between_workload_pods(p_client, workload): pods =", "= client.list_namespace(uuid=ns.uuid).data assert len(nss) == 1 ns = nss[0] return", "== 1 pv = list[0] while pv.state != \"available\": if", "type, ns_name, pod_count=1): workload = wait_for_wl_to_active(p_client, workload) assert workload.state ==", "e.error.status != 403: raise e else: return obj delta =", "= execute_kubectl_cmd(get_pods) pod = pods[\"items\"][x] podimage = pod[\"spec\"][\"containers\"][0][\"image\"] def wait_for_pods_in_workload(p_client,", "endpoint to be available\") time.sleep(.5) workload_list = p_client.list_workload(uuid=workload.uuid).data assert len(workload_list)", "clusterId=cluster.id, roleTemplateId=role_template_id, subjectKind=\"User\", userId=user.id) return crtb def assign_members_to_project(client, user, project,", "raise Exception(msg) def delete_node(aws_nodes): for node in aws_nodes: AmazonWebServices().delete_node(node) def", "c_url = cluster.links['self'] + '/schemas' c_client = rancher.Client(url=c_url, token=token, verify=False)", "pod to get created after # scheduled wait time if", "curl_cmd = \" http://\" + host_ip + \":\" + \\", "username=\"root\", password=password, port=port) stdin, stdout, stderr = ssh.exec_command(cmd) response =", "len(clusters) == 2 return client, clusters def validate_multi_cluster_app_cluster(app_id1, app_id2, p_client1,", "fqdn_available = True return url def wait_until_lb_is_active(url, timeout=300): start =", "assert workloadStatus == expectedstatus def validate_pod_images(expectedimage, workload, ns_name): for key,", "[{\"name\": \"test1\", \"image\": TEST_IMAGE, \"volumeMounts\": volumeMounts }] if is_daemonSet: workload", "path = \"/name.html\" rule = {\"host\": host, \"paths\": [{\"workloadIds\": [workload.id],", "curpodnames = [] for pod in pods[\"items\"]: curpodnames.append(pod[\"metadata\"][\"name\"]) for expectedpod", "CATTLE_TEST_URL + \"'\\n\" env_details += \"env.ADMIN_TOKEN='\" + ADMIN_TOKEN + \"'\\n\"", "in components components.remove(component_name) assert cs[\"conditions\"][0][\"status\"] == \"True\" assert cs[\"conditions\"][0][\"type\"] ==", "requests.ConnectionError: print(\"Connection Error - \" + url) return True def", "assert \"running\" in pod[\"status\"][\"containerStatuses\"][1][\"state\"] def validate_workload_paused(p_client, workload, expectedstatus): workloadStatus =", "\" 0% packet loss\" in str(ping_output) dig_cmd = 'dig {0}", "> timeout: raise Exception('Timed out waiting for LB to become", "uuid) return None return node def wait_for_node_to_be_deleted(client, node, timeout=300): uuid", ">= pod_count return for key, value in workload.workloadLabels.items(): label =", "cluster.appliedSpec[\"rancherKubernetesEngineConfig\"][ \"kubernetesVersion\"] assert cluster_k8s_version == version, \\ \"cluster_k8s_version: \" +", "resource def wait_for(callback, timeout=DEFAULT_TIMEOUT, timeout_message=None): start = time.time() ret =", "assert wl_result[\"status\"][\"readyReplicas\"] == pod_count if type == \"daemonSet\": assert wl_result[\"status\"][\"currentNumberScheduled\"]", "in nodes: if node.requestedHostname not in exception_list: node = wait_for_node_status(client,", "+ host_ip + \":\" + \\ str(source_port) + \"/name.html\" validate_http_response(curl_cmd,", "wl = workloads[0] return wl def wait_for_ingress_to_active(client, ingress, timeout=DEFAULT_TIMEOUT): start", "def random_int(start, end): return random.randint(start, end) def random_test_name(name=\"test\"): return name", "returncode) def wait_for_wl_to_active(client, workload, timeout=DEFAULT_TIMEOUT): start = time.time() workloads =", "== 1: node_status = nodes[0].state else: print(\"Node does not exist", "fqdn_available = False url = \"\" start = time.time() while", "to get to active\") time.sleep(5) nodes = client.list_node(uuid=uuid).data node_count =", "+ \\ \" --internal-address \" + node.private_ip_address cmd += additional_options", "workload, expectedstatus): workloadStatus = p_client.list_workload(uuid=workload.uuid).data[0].paused assert workloadStatus == expectedstatus def", "Response from \" + pod1.name + \":\" + str(response)) if", "ns.state != \"active\": if time.time() - start > timeout: raise", "host_ip + path validate_http_response(cmd, target_name_list) def validate_ingress_using_endpoint(p_client, ingress, workloads, timeout=300):", "if public_endpoint[\"hostname\"].startswith(ingress.name): fqdn_available = True url = \\ public_endpoint[\"protocol\"].lower() +", "= wait_for_node_status(client, node, \"active\") if node is None: print(\"Need to", "aws_nodes is not None: delete_node(aws_nodes) else: env_details = \"env.CATTLE_TEST_URL='\" +", "= get_admin_client() nodes = client.list_node(clusterId=cluster.id).data schedulable_nodes = [] for node", "cmd += additional_options return cmd def create_custom_host_registration_token(client, cluster): cluster_token =", "= float(os.environ.get('RANCHER_MACHINE_TIMEOUT', \"1200\")) TEST_IMAGE = \"sangeetha/mytestcontainer\" CLUSTER_NAME = os.environ.get(\"RANCHER_CLUSTER_NAME\", \"\")", "= [{\"name\": \"test1\", \"image\": TEST_IMAGE}] name = random_test_name(\"default\") workload =", "the first pod to get created after # scheduled wait", "\"Custom\" for cluster_config in cluster_configs: if cluster_config in cluster: return", "== 1 wl = ingresses[0] return wl def wait_for_wl_transitioning(client, workload,", "ingress, timeout=DEFAULT_TIMEOUT): start = time.time() ingresses = client.list_ingress(uuid=ingress.uuid).data assert len(ingresses)", "in test_pods: validate_http_response(curl_cmd, target_name_list, pod) def wait_for_pv_to_be_available(c_client, pv_object, timeout=DEFAULT_TIMEOUT): start", "project.state == 'active' return project def create_ns(client, cluster, project, ns_name=None):", "pod_count=1, wait_for_cron_pods=60): workload = wait_for_wl_to_active(p_client, workload) assert workload.state == \"active\"", "== 1 pv = list[0] return pv def wait_for_pvc_to_be_bound(p_client, pvc_object,", "expected): # requires pod with `dig` available - TEST_IMAGE cmd", "check_connectivity_between_pods(pod, o_pod, allow_connectivity) def check_connectivity_between_workload_pods(p_client, workload): pods = p_client.list_pod(workloadId=workload.id).data for", "def wait_for_wl_to_active(client, workload, timeout=DEFAULT_TIMEOUT): start = time.time() workloads = client.list_workload(uuid=workload.uuid).data", "= run_command(curl_cmd) else: wget_cmd = \"wget -qO- \" + cmd", "= workload.publicEndpoints[0][\"port\"] nodes = get_schedulable_nodes(cluster) pods = p_client.list_pod(workloadId=workload.id).data target_name_list =", "== node.id: target_name_list.append(pod.name) break host_ip = node.externalIpAddress curl_cmd = \"", "= p_client.list_workload(uuid=workload.uuid).data[0].paused assert workloadStatus == expectedstatus def validate_pod_images(expectedimage, workload, ns_name):", "ingress_list[0] if hasattr(ingress, 'publicEndpoints'): for public_endpoint in ingress.publicEndpoints: if public_endpoint[\"hostname\"].startswith(ingress.name):", "workload, timeout=DEFAULT_TIMEOUT, state=\"error\"): start = time.time() workloads = client.list_workload(uuid=workload.uuid).data assert", "if not skipIngresscheck: host = \"test\" + str(random_int(10000, 99999)) +", "\" -n \" + ns_name pods = execute_kubectl_cmd(get_pods) curpodnames =", "execute_kubectl_cmd(\"version\") kubectl_k8s_version = k8s_version[\"serverVersion\"][\"gitVersion\"] assert kubectl_k8s_version == expected_k8s_version, \\ \"kubectl", "if (len(nodes) > 0): cluster_type = get_cluster_type(client, cluster) print(cluster_type) if", "= key + \"=\" + value get_pods = \"get pods", "get_schedulable_nodes(cluster) for node in nodes: target_name_list = [] for pod", "change_member_role_in_project(client, user, prtb, role_template_id): prtb = client.update( prtb, roleTemplateId=role_template_id, userId=user.id)", "for pod in pods: for o_pod in pods: check_connectivity_between_pods(pod, o_pod)", "TEST_IMAGE = \"sangeetha/mytestcontainer\" CLUSTER_NAME = os.environ.get(\"RANCHER_CLUSTER_NAME\", \"\") RANCHER_CLEANUP_CLUSTER = \\", "apps[0] return mapp def get_admin_client_and_cluster_mcapp(): clusters = [] client =", "to get to active\") time.sleep(.5) pods = p_client.list_pod(workloadId=workload.id).data return pods", "response def wait_for_ns_to_become_active(client, ns, timeout=DEFAULT_TIMEOUT): start = time.time() time.sleep(2) nss", "node_count = len(nodes) def get_custom_host_registration_cmd(client, cluster, roles, node): allowed_roles =", "\"'\\n\" create_config_file(env_details) def create_config_file(env_details): file = open(env_file, \"w\") file.write(env_details) file.close()", "pods = execute_kubectl_cmd(get_pods) for x in range(0, numofpods - 1):", "workload in workloads: pod_list = p_client.list_pod(workloadId=workload.id).data pods.extend(pod_list) target_name_list = []", "break host_ip = node.externalIpAddress curl_cmd = \" http://\" + host_ip", "or type == \"statefulSet\": assert wl_result[\"status\"][\"readyReplicas\"] == pod_count if type", "= apps[0] return mapp def get_admin_client_and_cluster_mcapp(): clusters = [] client", "client.create_cluster_role_template_binding( clusterId=cluster.id, roleTemplateId=role_template_id, subjectKind=\"User\", userId=user.id) return crtb def assign_members_to_project(client, user,", "client.list_node(clusterId=cluster.id).data node_auto_deleted = False for node in nodes: if node.requestedHostname", "= p_client.list_persistent_volume_claim(uuid=pvc_object.uuid).data assert len(list) == 1 pvc = list[0] return", "token=token, verify=False) return c_client def up(cluster, token): c_url = cluster.links['self']", "= client.list_multiClusterApp(uuid=multiClusterApp.uuid, name=multiClusterApp.name).data assert len(multiclusterapps) == 1 mapp = multiclusterapps[0]", "cluster_token def get_cluster_type(client, cluster): cluster_configs = [ \"amazonElasticContainerServiceConfig\", \"azureKubernetesServiceConfig\", \"googleKubernetesEngineConfig\",", "str(response)) if allow_connectivity: assert pod_ip in str(response) and \" 0%", "get_cluster_type(client, cluster) in [\"Imported\", \"Custom\"]: nodes = client.list_node(clusterId=cluster.id).data filters =", "the case of nodes getting auto deleted when they are", "nodes = client.list_node(clusterId=cluster.id).data for node in nodes: if node.etcd: etcd_nodes.append(node)", "{0}'.format(host) ping_output = kubectl_pod_exec(pod, cmd) ping_validation_pass = False for expected_value", "state to get to active\") time.sleep(.5) pods = client.list_pod(uuid=pod.uuid).data assert", "' seconds'.format(obj.type, obj.id, delta) raise Exception(msg) def delete_node(aws_nodes): for node", "clusterId=cluster.id) time.sleep(5) p = wait_until_available(client, p) assert p.state == 'active'", "rancher.Client(url=p_url, token=token, verify=False) return p_client def get_cluster_client_for_token(cluster, token): c_url =", "ns_name) assert wl_result[\"status\"][\"readyReplicas\"] == pod_count for key, value in workload.workloadLabels.items():", "run_command(command) if json_out: result = json.loads(result) print(result) return result def", "= callback() return ret def random_name(): return \"test\" + \"-\"", "= pods[0] return p def get_schedulable_nodes(cluster): client = get_admin_client() nodes", "= [] for workload in workloads: pod_list = p_client.list_pod(workloadId=workload.id).data pods.extend(pod_list)", "assert pod[\"status\"][\"phase\"] == \"Running\" assert len(pod[\"status\"][\"containerStatuses\"]) == 2 assert \"running\"", "# rule pointing to this daemonset create_kubeconfig(cluster) if k8s_version !=", "to become active') return def check_for_no_access(url): try: requests.get(url) return False", "AssertionError( \"Timed out waiting for state to get to available\")", "is active or not wait_for_mcapp_cluster_level_to_active(p_client1, app_id1) if app_id2 != \"\":", "Exception('Timeout waiting for condition') ret = callback() return ret def", "wait_for_wl_to_active(p_client, workload) assert workload.state == \"active\" pods = wait_for_pods_in_workload(p_client, workload,", "= p_client.list_persistent_volume_claim(uuid=pvc_object.uuid).data assert len(list) == 1 pvc = list[0] while", "= p_client.create_workload(name=name, containers=con, namespaceId=ns.id, daemonSetConfig={}) validate_workload(p_client, workload, \"daemonSet\", ns.name, len(get_schedulable_nodes(cluster)))", "url def wait_until_lb_is_active(url, timeout=300): start = time.time() while check_for_no_access(url): time.sleep(.5)", "+ workload.name + \" -n \" + ns_name) assert wl_result[\"status\"][\"readyReplicas\"]", "\"updating\" assert workload.transitioning == \"error\" print(workload.transitioningMessage) assert error_message in workload.transitioningMessage", "\"/name.html\" validate_http_response(curl_cmd, target_name_list) def validate_clusterIp(p_client, workload, cluster_ip, test_pods): pods =", "wait_for_pod_to_running(p_client, pod) wl_result = execute_kubectl_cmd( \"get \" + type +", "Custom Cluster if (len(nodes) > 0): cluster_type = get_cluster_type(client, cluster)", "volumeMounts = [{\"readOnly\": \"False\", \"type\": \"volumeMount\", \"mountPath\": mount_path, \"subPath\": sub_path,", "\"worker\": node_list = worker_nodes return node_list def validate_ingress(p_client, cluster, workloads,", "(insecure_redirect): curl_args = \" -L --insecure \" if len(host) >", "pods_result[\"items\"]: assert pod[\"status\"][\"phase\"] == \"Running\" assert len(pod[\"status\"][\"containerStatuses\"]) == 2 assert", "assert len(workload.publicEndpoints) > 0 url = \"http://\" url = url", "for pod in pods: print(pod.nodeId + \" check \" +", "= kubectl_pod_exec(pod1, cmd) print(\"Actual ping Response from \" + pod1.name", "role_template_id): prtb = client.update( prtb, roleTemplateId=role_template_id, userId=user.id) return prtb def", "for endpoint to be available\") time.sleep(.5) workload_list = p_client.list_workload(uuid=workload.uuid).data assert", "get_admin_client(): return rancher.Client(url=CATTLE_API_URL, token=ADMIN_TOKEN, verify=False) def get_client_for_token(token): return rancher.Client(url=CATTLE_API_URL, token=token,", "range(1, count): if len(target_hit_list) == 0: break if client_pod is", "= client.list_node(clusterId=cluster.id).data node_count = len(nodes) def get_custom_host_registration_cmd(client, cluster, roles, node):", "pv.state != \"available\": if time.time() - start > timeout: raise", "!= 0: if time.time() - start > timeout: raise AssertionError(", "= time.time() - start if delta > timeout: msg =", "in pods[\"items\"]: assert pod[\"spec\"][\"containers\"][0][\"image\"] == expectedimage def validate_pods_are_running_by_id(expectedpods, workload, ns_name):", "= p_client2.list_pod(workloadId=workload2.id).data for pod in wl1_pods: for o_pod in wl2_pods:", "p_client.list_pod(workloadId=workload.id).data for pod in pods: for o_pod in pods: check_connectivity_between_pods(pod,", "== pod_count for pod in pods_result[\"items\"]: assert pod[\"status\"][\"phase\"] == \"Running\"", "print(pod.nodeId + \" check \" + node.id) if pod.nodeId ==", "cluster, aws_nodes=None): if RANCHER_CLEANUP_CLUSTER: client.delete(cluster) if aws_nodes is not None:", "def wait_until_lb_is_active(url, timeout=300): start = time.time() while check_for_no_access(url): time.sleep(.5) print(\"No", "return rancher.Client(url=CATTLE_API_URL, token=ADMIN_TOKEN, verify=False) def get_client_for_token(token): return rancher.Client(url=CATTLE_API_URL, token=token, verify=False)", "client.reload(obj) except ApiError as e: if e.error.status != 403: raise", "ns = nss[0] while ns.state != \"active\": if time.time() -", "if aws_nodes is not None: delete_node(aws_nodes) else: env_details = \"env.CATTLE_TEST_URL='\"", "get_cluster_type(client, cluster): cluster_configs = [ \"amazonElasticContainerServiceConfig\", \"azureKubernetesServiceConfig\", \"googleKubernetesEngineConfig\", \"rancherKubernetesEngineConfig\" ]", "\"azureKubernetesServiceConfig\", \"googleKubernetesEngineConfig\", \"rancherKubernetesEngineConfig\" ] if \"rancherKubernetesEngineConfig\" in cluster: nodes =", "out waiting for LB to become active') return def check_for_no_access(url):", "c_client.list_persistent_volume(uuid=pv_object.uuid).data assert len(list) == 1 pv = list[0] while pv.state", "str(components)) for cs in css: component_name = cs[\"metadata\"][\"name\"] assert component_name", "cluster, workloads, host, path, insecure_redirect=False): time.sleep(10) curl_args = \" \"", "\"env.ADMIN_TOKEN='\" + ADMIN_TOKEN + \"'\\n\" env_details += \"env.CLUSTER_NAME='\" + cluster.name", "expected: assert expected_value in str(dig_output) def wait_for_nodes_to_become_active(client, cluster, exception_list=[], retry_count=0):", "pods = p_client.list_pod(workloadId=workload.id).data while len(pods) != pod_count: if time.time() -", "= [] client = get_admin_client() if CLUSTER_NAME == \"\" or", "assert \" 0% packet loss\" in str(ping_output) dig_cmd = 'dig", "else: result = run_command(command) if json_out: result = json.loads(result) print(result)", "cluster_configs = [ \"amazonElasticContainerServiceConfig\", \"azureKubernetesServiceConfig\", \"googleKubernetesEngineConfig\", \"rancherKubernetesEngineConfig\" ] if \"rancherKubernetesEngineConfig\"", "assert len(workloads) == 1 wl = workloads[0] while wl.state !=", "insecure_redirect=False): time.sleep(10) curl_args = \" \" if (insecure_redirect): curl_args =", "timeout: if timeout_message: raise Exception(timeout_message) else: raise Exception('Timeout waiting for", "ns_name): for key, value in workload.workloadLabels.items(): label = key +", "for o_pod in wl2_pods: check_connectivity_between_pods(pod, o_pod, allow_connectivity) def check_connectivity_between_workload_pods(p_client, workload):", "project_name=None, ns_name=None): client = get_client_for_token(token) p = create_project(client, cluster, project_name)", "def execute_kubectl_cmd(cmd, json_out=True, stderr=False): command = 'kubectl --kubeconfig {0} {1}'.format(", "= random_name() ns = client.create_namespace(name=ns_name, clusterId=cluster.id, projectId=project.id) wait_for_ns_to_become_active(client, ns) ns", "if node.worker: worker_nodes.append(node) if role == \"etcd\": node_list = etcd_nodes", "5: wait_for_nodes_to_become_active(client, cluster, exception_list, retry_count) def wait_for_node_status(client, node, state): uuid", "pod images\") time.sleep(.5) pods = execute_kubectl_cmd(get_pods) pod = pods[\"items\"][x] podimage", "to be available\") time.sleep(.5) workload_list = p_client.list_workload(uuid=workload.uuid).data assert len(workload_list) ==", "curl_args += \" --header 'Host: \" + host + \"'\"", "= 0.01 while True: time.sleep(sleep) sleep *= 2 if sleep", "timeout=DEFAULT_MULTI_CLUSTER_APP_TIMEOUT): print(\"\\nuuid:\") print(multiClusterApp.uuid) time.sleep(5) mcapps = client.list_multiClusterApp(uuid=multiClusterApp.uuid, name=multiClusterApp.name).data start =", "aws_nodes: print(node.public_ip_address) AmazonWebServices().delete_nodes(aws_nodes) def check_connectivity_between_workloads(p_client1, workload1, p_client2, workload2, allow_connectivity=True): wl1_pods", "return mapp def get_admin_client_and_cluster_mcapp(): clusters = [] client = get_admin_client()", "client.list_cluster_registration_token( clusterId=cluster.id).data if len(cluster_tokens) > 0: cluster_token = cluster_tokens[0] else:", "write_content_to_file(pod, content, filename): cmd_write = \"/bin/bash -c 'echo {1} >", "= wait_for_wl_to_active(p_client, workload) assert workload.state == \"active\" pods = wait_for_pods_in_workload(p_client,", "pods: target_name_list.append(pod.name) print(\"target name list:\" + str(target_name_list)) return target_name_list def", "file.write(env_details) file.close() def validate_hostPort(p_client, workload, source_port, cluster): pods = p_client.list_pod(workloadId=workload.id).data", "ns): workload = client.list_workload(uuid=workload.uuid).data[0] assert workload.containers[0].image == expectedImage validate_pod_images(expectedImage, workload,", "def create_ns(client, cluster, project, ns_name=None): if ns_name is None: ns_name", "new node list\") node_auto_deleted = True retry_count += 1 print(\"Retry", "+= ' -o json' if stderr: result = run_command_with_stderr(command) else:", "+ \" -n \" + ns_name) assert wl_result[\"status\"][\"readyReplicas\"] == pod_count", "target_name_list = get_target_names(p_client, [workload]) wait_until_lb_is_active(url) validate_http_response(url + \"/name.html\", target_name_list) def", "+ CATTLE_TEST_URL + \"'\\n\" env_details += \"env.ADMIN_TOKEN='\" + ADMIN_TOKEN +", "!= \"\": validate_mcapp_cluster(app_id2, p_client2) # verify app in cluster is", "99999)) + \".com\" path = \"/name.html\" rule = {\"host\": host,", "client.list_node(uuid=uuid).data node_count = len(nodes) if node_count == 1: node_status =", "node_list = [] client = get_admin_client() nodes = client.list_node(clusterId=cluster.id).data for", "random_num(): return random.randint(0, 1000000) def random_int(start, end): return random.randint(start, end)", "\" + type + \" \" + workload.name + \"", "'dig {0} +short'.format(host) dig_output = kubectl_pod_exec(pod, dig_cmd) for expected_value in", "timeout=DEFAULT_TIMEOUT): start = time.time() pods = p_client.list_pod(workloadId=workload.id).data while len(pods) !=", "= execute_kubectl_cmd( \"get \" + type + \" \" +", "'publicEndpoints'): for public_endpoint in ingress.publicEndpoints: if public_endpoint[\"hostname\"].startswith(ingress.name): fqdn_available = True", "node is None: print(\"Need to re-evalauate new node list\") node_auto_deleted", "return node def wait_for_node_to_be_deleted(client, node, timeout=300): uuid = node.uuid start", "\" + pod1.name + \":\" + str(response)) if allow_connectivity: assert", "roleTemplateId=role_template_id, subjectKind=\"User\", userId=user.id) return prtb def change_member_role_in_cluster(client, user, crtb, role_template_id):", "cs\") css = css_resp[\"items\"] components = [\"scheduler\", \"controller-manager\"] for i", "timeout=DEFAULT_TIMEOUT): start = time.time() workloads = client.list_workload(uuid=workload.uuid).data assert len(workloads) ==", "assert len(list) == 1 pv = list[0] return pv def", "kubectl_k8s_version = k8s_version[\"serverVersion\"][\"gitVersion\"] assert kubectl_k8s_version == expected_k8s_version, \\ \"kubectl version:", "= \\ public_endpoint[\"protocol\"].lower() + \"://\" + \\ public_endpoint[\"hostname\"] if \"path\"", "client.list_namespace(uuid=ns.uuid).data assert len(nss) == 1 ns = nss[0] return ns", "len(apps) == 1 mapp = apps[0] return mapp def get_admin_client_and_cluster_mcapp():", "== expectedstatus def validate_pod_images(expectedimage, workload, ns_name): for key, value in", "import AmazonWebServices DEFAULT_TIMEOUT = 120 DEFAULT_MULTI_CLUSTER_APP_TIMEOUT = 300 CATTLE_TEST_URL =", "def validate_file_content(pod, content, filename): cmd_get_content = \"/bin/bash -c 'cat {0}'", "assert len(nss) == 1 ns = nss[0] while ns.state !=", "= 'kubectl --kubeconfig {0} {1}'.format( kube_fname, cmd) if json_out: command", "crtb, roleTemplateId=role_template_id, userId=user.id) return crtb def change_member_role_in_project(client, user, prtb, role_template_id):", "= pod[\"spec\"][\"containers\"][0][\"image\"] def wait_for_pods_in_workload(p_client, workload, pod_count, timeout=DEFAULT_TIMEOUT): start = time.time()", "check_connectivity_between_pods(pod, o_pod) def check_connectivity_between_pods(pod1, pod2, allow_connectivity=True): pod_ip = pod2.status.podIp cmd", "rule = {\"host\": host, \"paths\": [{\"workloadIds\": [workload.id], \"targetPort\": \"80\"}]} ingress", "cluster_token.nodeCommand for role in roles: assert role in allowed_roles cmd", "= wait_for_condition( client, cluster, lambda x: x.state == intermediate_state, lambda", "is False: time.sleep(.5) if time.time() - start > timeout: if", "x.state, timeout=MACHINE_TIMEOUT) assert cluster.state == \"active\" wait_for_nodes_to_become_active(client, cluster, exception_list=nodes_not_in_active_state) return", "start = time.time() nodes = client.list_node(uuid=uuid).data node_count = len(nodes) #", "for state to get to active\") time.sleep(.5) nss = client.list_namespace(uuid=ns.uuid).data", "for node in aws_nodes: print(node.public_ip_address) AmazonWebServices().delete_nodes(aws_nodes) def check_connectivity_between_workloads(p_client1, workload1, p_client2,", "retry_count < 5: wait_for_nodes_to_become_active(client, cluster, exception_list, retry_count) def wait_for_node_status(client, node,", "True break assert ping_validation_pass is True assert \" 0% packet", "cluster.links['self'] + '/schemas' c_client = rancher.Client(url=c_url, token=token, verify=False) return c_client", "nodes: ip_list.append(node.externalIpAddress) assert len(ip_filter) > 0 print(ip_filter) aws_nodes = AmazonWebServices().get_nodes(filters)", "= get_cluster_client_for_token(cluster, token) ns = create_ns(c_client, cluster, p, ns_name) return", "- TEST_IMAGE cmd = 'ping -c 1 -W 1 {0}'.format(host)", "== \"etcd\": node_list = etcd_nodes if role == \"control\": node_list", "\" + node.private_ip_address cmd += additional_options return cmd def create_custom_host_registration_token(client,", "1 ingress = ingress_list[0] if hasattr(ingress, 'publicEndpoints'): for public_endpoint in", "cmd result = run_command(curl_cmd) else: wget_cmd = \"wget -qO- \"", "cmd, password): ssh = paramiko.SSHClient() ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) ssh.connect(ip, username=\"root\", password=password, port=port)", "else: clusters = client.list_cluster(name=CLUSTER_NAME).data assert len(clusters) > 0 cluster =", "expected_k8s_version = version[:version.find(\"-\")] k8s_version = execute_kubectl_cmd(\"version\") kubectl_k8s_version = k8s_version[\"serverVersion\"][\"gitVersion\"] assert", "time.sleep(10) curl_args = \" \" if (insecure_redirect): curl_args = \"", "content, filename): cmd_write = \"/bin/bash -c 'echo {1} > {0}'\".format(filename,", "check - \" + str(components)) for cs in css: component_name", "resource = client.reload(resource) while not check_function(resource): if time.time() - start", "time.sleep(.5) print(\"No access yet\") if time.time() - start > timeout:", "node_status = nodes[0].state else: print(\"Node does not exist anymore -\"", "available - TEST_IMAGE cmd = 'ping -c 1 -W 1", "role == \"etcd\": node_list = etcd_nodes if role == \"control\":", "nodes: if node.worker: schedulable_nodes.append(node) return schedulable_nodes def get_role_nodes(cluster, role): etcd_nodes", "pod_count, timeout=DEFAULT_TIMEOUT): start = time.time() pods = p_client.list_pod(workloadId=workload.id).data while len(pods)", "= os.environ.get('ADMIN_TOKEN', \"None\") CATTLE_API_URL = CATTLE_TEST_URL + \"/v3\" kube_fname =", "state to get to active\") time.sleep(.5) ingresses = client.list_ingress(uuid=ingress.uuid).data assert", "pod) wl_result = execute_kubectl_cmd( \"get \" + type + \"", "client.list_ingress(uuid=ingress.uuid).data assert len(ingresses) == 1 wl = ingresses[0] return wl", "= ingress_list[0] if hasattr(ingress, 'publicEndpoints'): for public_endpoint in ingress.publicEndpoints: if", "cluster_k8s_version + \\ \" Expected: \" + version expected_k8s_version =", "password=password, port=port) stdin, stdout, stderr = ssh.exec_command(cmd) response = stdout.readlines()", "get_endpoint_url_for_workload(p_client, workload, timeout=600): fqdn_available = False url = \"\" start", "= etcd_nodes if role == \"control\": node_list = control_nodes if", "requests import ast import paramiko import rancher from rancher import", "sub_path, \"name\": \"vol1\" }] con = [{\"name\": \"test1\", \"image\": TEST_IMAGE,", "def wait_for_ingress_to_active(client, ingress, timeout=DEFAULT_TIMEOUT): start = time.time() ingresses = client.list_ingress(uuid=ingress.uuid).data", "def wait_for_pod_images(p_client, workload, ns_name, expectedimage, numofpods, timeout=DEFAULT_TIMEOUT): start = time.time()", "if len(cluster_tokens) > 0: cluster_token = cluster_tokens[0] else: cluster_token =", "return p def get_schedulable_nodes(cluster): client = get_admin_client() nodes = client.list_node(clusterId=cluster.id).data", "def validate_hostPort(p_client, workload, source_port, cluster): pods = p_client.list_pod(workloadId=workload.id).data nodes =", "p_client2, workload2, allow_connectivity=True): wl1_pods = p_client1.list_pod(workloadId=workload1.id).data wl2_pods = p_client2.list_pod(workloadId=workload2.id).data for", "result = run_command_with_stderr(command) else: result = run_command(command) if json_out: result", "if role == \"etcd\": node_list = etcd_nodes if role ==", "curl_cmd = \"curl \" + cmd result = run_command(curl_cmd) else:", "in str(ping_output) dig_cmd = 'dig {0} +short'.format(host) dig_output = kubectl_pod_exec(pod,", "list\") node_auto_deleted = True retry_count += 1 print(\"Retry Count:\" +", "= \"/name.html\" rule = {\"host\": host, \"paths\": [{\"workloadIds\": [workload.id], \"targetPort\":", "cluster_tokens = client.list_cluster_registration_token( clusterId=cluster.id).data if len(cluster_tokens) > 0: cluster_token =", "if cluster_config in cluster: return cluster_config return \"Imported\" def delete_cluster(client,", "Cluster if (len(nodes) > 0): cluster_type = get_cluster_type(client, cluster) print(cluster_type)", "wait_for_cluster_node_count(client, cluster, expected_node_count, timeout=300): start = time.time() nodes = client.list_node(clusterId=cluster.id).data", "cluster_type = get_cluster_type(client, cluster) print(cluster_type) if get_cluster_type(client, cluster) in [\"Imported\",", "cluster) from AWS for Imported and Custom Cluster if (len(nodes)", "= time.time() time.sleep(2) list = p_client.list_persistent_volume_claim(uuid=pvc_object.uuid).data assert len(list) == 1", "available\") time.sleep(.5) list = c_client.list_persistent_volume(uuid=pv_object.uuid).data assert len(list) == 1 pv", "active\") time.sleep(.5) pods = client.list_pod(uuid=pod.uuid).data assert len(pods) == 1 p", "result) assert result in target_name_list if result in target_hit_list: target_hit_list.remove(result)", "client.list_node(clusterId=cluster.id).data filters = [ {'Name': 'tag:Name', 'Values': ['testcustom*', 'teststess*']}] ip_filter", "ast.literal_eval(os.environ.get('RANCHER_CLEANUP_CLUSTER', \"True\")) env_file = os.path.join( os.path.dirname(os.path.realpath(__file__)), \"rancher_env.config\") CLUSTER_NAME_2 = \"\"", "additional_options = \" --address \" + node.public_ip_address + \\ \"", "= \"http://\" + cluster_ip + \"/name.html\" for pod in test_pods:", "in nodes: target_name_list = [] for pod in pods: print(pod.nodeId", "etcd_nodes if role == \"control\": node_list = control_nodes if role", "daemonset create_kubeconfig(cluster) if k8s_version != \"\": check_cluster_version(cluster, k8s_version) if hasattr(cluster,", "pod) def wait_for_pv_to_be_available(c_client, pv_object, timeout=DEFAULT_TIMEOUT): start = time.time() time.sleep(2) list", "def change_member_role_in_project(client, user, prtb, role_template_id): prtb = client.update( prtb, roleTemplateId=role_template_id,", "for condition after {}' \\ ' seconds'.format(obj.type, obj.id, delta) raise", "start = time.time() fqdn_available = False url = None while", "validate_dns_entry(pod, host, expected): # requires pod with `dig` available -", "validate_ingress_using_endpoint(p_client, ingress, workloads, timeout=300): target_name_list = get_target_names(p_client, workloads) start =", "stderr: result = run_command_with_stderr(command) else: result = run_command(command) if json_out:", "if node_count == 1: node_status = nodes[0].state else: print(\"Node does", "= [] control_nodes = [] worker_nodes = [] node_list =", "Delete Cluster client.delete(cluster) # Delete nodes(in cluster) from AWS for", "+ label + \" -n \" + ns_name pods_result =", "- \" + url) return True def validate_http_response(cmd, target_name_list, client_pod=None):", "def check_cluster_version(cluster, version): cluster_k8s_version = \\ cluster.appliedSpec[\"rancherKubernetesEngineConfig\"][ \"kubernetesVersion\"] assert cluster_k8s_version", "create_project(client, cluster, project_name) c_client = get_cluster_client_for_token(cluster, token) ns = create_ns(c_client,", "== \"daemonSet\": assert wl_result[\"status\"][\"currentNumberScheduled\"] == pod_count if type == \"cronJob\":", "dig_cmd) for expected_value in expected: assert expected_value in str(dig_output) def", "pods[0] return p def get_schedulable_nodes(cluster): client = get_admin_client() nodes =", "nodes_not_in_active_state=[]): if check_intermediate_state: cluster = wait_for_condition( client, cluster, lambda x:", "in nodes: host_ip = node.externalIpAddress curl_cmd = \" http://\" +", "= p_client.list_pod(workloadId=workload.id).data pods.extend(pod_list) target_name_list = [] for pod in pods:", "= p_client.list_ingress(uuid=ingress.uuid).data assert len(ingress_list) == 1 ingress = ingress_list[0] if", "-n \" + ns_name pods = execute_kubectl_cmd(get_pods) for x in", "execute_kubectl_cmd(get_pods) assert len(pods_result[\"items\"]) == pod_count for pod in pods_result[\"items\"]: assert", "== \"active\" pods = wait_for_pods_in_workload(p_client, workload, pod_count) assert len(pods) ==", "to get created after # scheduled wait time if type", "hasattr(ingress, 'publicEndpoints'): for public_endpoint in ingress.publicEndpoints: if public_endpoint[\"hostname\"].startswith(ingress.name): fqdn_available =", "else: clusters.append(client.list_cluster(name=CLUSTER_NAME).data) clusters.append(client.list_cluster(name=CLUSTER_NAME_2).data) assert len(clusters) == 2 return client, clusters", "if RANCHER_CLEANUP_CLUSTER: client.delete(cluster) if aws_nodes is not None: delete_node(aws_nodes) else:", "clusters = client.list_cluster().data else: clusters = client.list_cluster(name=CLUSTER_NAME).data assert len(clusters) >", "def create_wl_with_nfs(p_client, ns_id, pvc_name, wl_name, mount_path, sub_path, is_daemonSet=False): volumes =", "= client.create_project(name=project_name, clusterId=cluster.id) time.sleep(5) p = wait_until_available(client, p) assert p.state", "raise Exception('Timeout waiting for condition') ret = callback() return ret", "time.sleep(5) mcapps = client.list_multiClusterApp(uuid=multiClusterApp.uuid, name=multiClusterApp.name).data start = time.time() assert len(mcapps)", "sub_path, is_daemonSet=False): volumes = [{\"type\": \"volume\", \"name\": \"vol1\", \"persistentVolumeClaim\": {", "\":\" + \\ str(source_port) + \"/name.html\" validate_http_response(curl_cmd, target_name_list) def validate_clusterIp(p_client,", "is: ' + x.state, timeout=MACHINE_TIMEOUT) assert cluster.state == intermediate_state cluster", "= time.time() time.sleep(2) nss = client.list_namespace(uuid=ns.uuid).data assert len(nss) == 1", "if len(target_hit_list) == 0: break if client_pod is None: curl_cmd", "components = [\"scheduler\", \"controller-manager\"] for i in range(0, etcd_count): components.append(\"etcd-\"", "def validate_ingress(p_client, cluster, workloads, host, path, insecure_redirect=False): time.sleep(10) curl_args =", "roleTemplateId=role_template_id, userId=user.id) return crtb def change_member_role_in_project(client, user, prtb, role_template_id): prtb", "p_client.create_workload(name=name, containers=con, namespaceId=ns.id, daemonSetConfig={}) validate_workload(p_client, workload, \"daemonSet\", ns.name, len(get_schedulable_nodes(cluster))) if", "+ pod1.name + \":\" + str(response)) if allow_connectivity: assert pod_ip", "in cluster: nodes = client.list_node(clusterId=cluster.id).data if len(nodes) > 0: if", "app_id2, p_client1, p_client2): validate_mcapp_cluster(app_id1, p_client1) if app_id2 != \"\": validate_mcapp_cluster(app_id2,", "ADMIN_TOKEN) con = [{\"name\": \"test1\", \"image\": TEST_IMAGE}] name = random_test_name(\"default\")", "\"volumeMount\", \"mountPath\": mount_path, \"subPath\": sub_path, \"name\": \"vol1\" }] con =", "[{\"type\": \"volume\", \"name\": \"vol1\", \"persistentVolumeClaim\": { \"readOnly\": \"false\", \"type\": \"persistentVolumeClaimVolumeSource\",", "mcapps[0] while mapp.state != \"active\": if time.time() - start >", "volumes=volumes, daemonSetConfig={}) else: workload = p_client.create_workload(name=wl_name, containers=con, namespaceId=ns_id, volumes=volumes) return", "'State is: ' + x.state, timeout=MACHINE_TIMEOUT) assert cluster.state == \"active\"", "len(list) == 1 pvc = list[0] return pvc def create_wl_with_nfs(p_client,", "pod[\"spec\"][\"containers\"][0][\"image\"] == expectedimage def validate_pods_are_running_by_id(expectedpods, workload, ns_name): for key, value", "len(multiclusterapps) == 1 mapp = multiclusterapps[0] return mapp def validate_mcapp_cluster(app_id,", "while node_count != 0: if time.time() - start > timeout:", "ns_name, pod_count=1, wait_for_cron_pods=60): workload = wait_for_wl_to_active(p_client, workload) assert workload.state ==", "= p_client.list_workload(uuid=workload.uuid).data assert len(workload_list) == 1 workload = workload_list[0] if", "cluster def wait_until_available(client, obj, timeout=DEFAULT_TIMEOUT): start = time.time() sleep =", "prtb def change_member_role_in_cluster(client, user, crtb, role_template_id): crtb = client.update( crtb,", "curpodnames.append(pod[\"metadata\"][\"name\"]) for expectedpod in expectedpods[\"items\"]: assert expectedpod[\"metadata\"][\"name\"] in curpodnames def", "if allow_connectivity: assert pod_ip in str(response) and \" 0% packet", "wait_for_pod_to_running(client, pod, timeout=DEFAULT_TIMEOUT): start = time.time() pods = client.list_pod(uuid=pod.uuid).data assert", "to active\") time.sleep(.5) ingresses = client.list_ingress(uuid=ingress.uuid).data assert len(ingresses) == 1", "out waiting for state to get to active\") time.sleep(.5) pods", "= target_name_list[:] count = 5 * len(target_name_list) for i in", "1 pv = list[0] return pv def wait_for_pvc_to_be_bound(p_client, pvc_object, timeout=DEFAULT_TIMEOUT):", "\"False\", \"type\": \"volumeMount\", \"mountPath\": mount_path, \"subPath\": sub_path, \"name\": \"vol1\" }]", "in wl1_pods: for o_pod in wl2_pods: check_connectivity_between_pods(pod, o_pod, allow_connectivity) def", "pod_count for pod in pods: wait_for_pod_to_running(p_client, pod) wl_result = execute_kubectl_cmd(", "url + workload.publicEndpoints[0][\"addresses\"][0] + \":\" url = url + str(workload.publicEndpoints[0][\"port\"])", "-c 1 -W 1 \" + pod_ip response = kubectl_pod_exec(pod1,", "= pods[\"items\"][x] podimage = pod[\"spec\"][\"containers\"][0][\"image\"] def wait_for_pods_in_workload(p_client, workload, pod_count, timeout=DEFAULT_TIMEOUT):", "env_details += \"env.CLUSTER_NAME='\" + cluster.name + \"'\\n\" create_config_file(env_details) def create_config_file(env_details):", "= [] for pod in pods: target_name_list.append(pod[\"name\"]) curl_cmd = \"http://\"", "len(nodes) while node_count != expected_node_count: if time.time() - start >", "import ApiError from lib.aws import AmazonWebServices DEFAULT_TIMEOUT = 120 DEFAULT_MULTI_CLUSTER_APP_TIMEOUT", "check_connectivity_between_workloads(p_client1, workload1, p_client2, workload2, allow_connectivity=True): wl1_pods = p_client1.list_pod(workloadId=workload1.id).data wl2_pods =", "verify app in cluster is active or not wait_for_mcapp_cluster_level_to_active(p_client1, app_id1)", "AssertionError( \"Timed out waiting for state to get to active\")", "returncode = e.returncode print(returncode) return (output, returncode) def wait_for_wl_to_active(client, workload,", "\":\" + str(response)) if allow_connectivity: assert pod_ip in str(response) and", "return pods def get_admin_client_and_cluster(): client = get_admin_client() if CLUSTER_NAME ==", "wait_for_wl_transitioning(client, workload, timeout=DEFAULT_TIMEOUT, state=\"error\"): start = time.time() workloads = client.list_workload(uuid=workload.uuid).data", "timeout=300): target_name_list = get_target_names(p_client, workloads) start = time.time() fqdn_available =", "password): ssh = paramiko.SSHClient() ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) ssh.connect(ip, username=\"root\", password=password, port=port) stdin,", "assert workload.transitioning == \"error\" print(workload.transitioningMessage) assert error_message in workload.transitioningMessage def", "app def wait_for_mcapp_cluster_level_to_active(client, app_id, timeout=DEFAULT_MULTI_CLUSTER_APP_TIMEOUT): mcapps = client.list_app(name=app_id).data start =", "first pod to get created after # scheduled wait time", "TEST_IMAGE}] name = random_test_name(\"default\") workload = p_client.create_workload(name=name, containers=con, namespaceId=ns.id, daemonSetConfig={})", "multiclusterapps[0] return mapp def validate_mcapp_cluster(app_id, p_client): mcapp = p_client.list_app(name=app_id).data assert", "wait_for_nodes_to_become_active(client, cluster, exception_list=[], retry_count=0): nodes = client.list_node(clusterId=cluster.id).data node_auto_deleted = False", "= node.uuid start = time.time() nodes = client.list_node(uuid=uuid).data node_count =", "AmazonWebServices().delete_node(node) def cluster_cleanup(client, cluster, aws_nodes=None): if RANCHER_CLEANUP_CLUSTER: client.delete(cluster) if aws_nodes", "\"active\" pods = wait_for_pods_in_workload(p_client, workload, pod_count) assert len(pods) == pod_count", "start = time.time() ingresses = client.list_ingress(uuid=ingress.uuid).data assert len(ingresses) == 1", "waiting for state to get to active\") time.sleep(.5) nodes =", "client = get_admin_client() nodes = client.list_node(clusterId=cluster.id).data for node in nodes:", "except ApiError as e: if e.error.status != 403: raise e", "anymore -\" + uuid) return None while node_status != state:", "mcapps[0] print(mapp.state) while mapp.state != \"active\": print(mapp.uuid) print(mapp.state) if time.time()", "- start > timeout: raise Exception('Timed out waiting for LB", "i in range(1, count): if len(target_hit_list) == 0: break if", "as e: if e.error.status != 403: raise e else: return", "return node_list def validate_ingress(p_client, cluster, workloads, host, path, insecure_redirect=False): time.sleep(10)", "MACHINE_TIMEOUT = float(os.environ.get('RANCHER_MACHINE_TIMEOUT', \"1200\")) TEST_IMAGE = \"sangeetha/mytestcontainer\" CLUSTER_NAME = os.environ.get(\"RANCHER_CLUSTER_NAME\",", "workload_list = p_client.list_workload(uuid=workload.uuid).data assert len(workload_list) == 1 workload = workload_list[0]", "node.etcd: etcd_nodes.append(node) if node.controlPlane: control_nodes.append(node) if node.worker: worker_nodes.append(node) if role", "p_client.list_pod(workloadId=workload.id).data assert len(pods) == pod_count for pod in pods: wait_for_pod_to_running(p_client,", "cluster_configs: if cluster_config in cluster: return cluster_config return \"Imported\" def", "wait_for_wl_to_active(p_client, workload) assert workload.state == \"active\" # For cronjob, wait", "client.list_namespace(uuid=ns.uuid).data assert len(nss) == 1 ns = nss[0] while ns.state", "+= additional_options return cmd def create_custom_host_registration_token(client, cluster): cluster_token = client.create_cluster_registration_token(", "> timeout: raise AssertionError( \"Timed out waiting for correct pod", "== pod_count if type == \"cronJob\": assert len(wl_result[\"status\"][\"active\"]) >= pod_count", "public_endpoint[\"path\"] time.sleep(10) validate_http_response(url, target_name_list) def get_target_names(p_client, workloads): pods = []", "name + \"-\" + str(random_int(10000, 99999)) def get_admin_client(): return rancher.Client(url=CATTLE_API_URL,", "if sleep > 2: sleep = 2 try: obj =", "ApiError from lib.aws import AmazonWebServices DEFAULT_TIMEOUT = 120 DEFAULT_MULTI_CLUSTER_APP_TIMEOUT =", "for state to get to active\") time.sleep(.5) apps = client.list_app(name=app_id).data", "DEFAULT_TIMEOUT = 120 DEFAULT_MULTI_CLUSTER_APP_TIMEOUT = 300 CATTLE_TEST_URL = os.environ.get('CATTLE_TEST_URL', \"http://localhost:80\")", "= random_test_name(\"default\") workload = p_client.create_workload(name=name, containers=con, namespaceId=ns.id, daemonSetConfig={}) validate_workload(p_client, workload,", "len(list) == 1 pvc = list[0] while pvc.state != \"bound\":", "rancher import ApiError from lib.aws import AmazonWebServices DEFAULT_TIMEOUT = 120", "\" + ns_name) if type == \"deployment\" or type ==", "== \"cronJob\": assert len(wl_result[\"status\"][\"active\"]) >= pod_count return for key, value", "verify=False) return c_client def wait_state(client, obj, state, timeout=DEFAULT_TIMEOUT): wait_for(lambda: client.reload(obj).state", "str(target_name_list)) for node in nodes: host_ip = node.externalIpAddress curl_cmd =", "resource = client.reload(resource) return resource def wait_for(callback, timeout=DEFAULT_TIMEOUT, timeout_message=None): start", "= client.list_node(clusterId=cluster.id).data if len(nodes) > 0: if nodes[0].nodeTemplateId is None:", "active\") time.sleep(.5) pods = p_client.list_pod(workloadId=workload.id).data return pods def get_admin_client_and_cluster(): client", "if CLUSTER_NAME == \"\": clusters = client.list_cluster().data else: clusters =", "+ \\ \" Expected: \" + expected_k8s_version def check_cluster_state(etcd_count): css_resp", "== \"Running\" return pods_result[\"items\"] def validate_workload_with_sidekicks(p_client, workload, type, ns_name, pod_count=1):", "validate_cluster(client, cluster, intermediate_state=\"provisioning\", check_intermediate_state=True, skipIngresscheck=True, nodes_not_in_active_state=[], k8s_version=\"\"): cluster = validate_cluster_state(", "len(mcapps) == 1 mapp = mcapps[0] print(mapp.state) while mapp.state !=", "assert wl_result[\"status\"][\"currentNumberScheduled\"] == pod_count if type == \"cronJob\": assert len(wl_result[\"status\"][\"active\"])", "result in target_name_list if result in target_hit_list: target_hit_list.remove(result) print(\"After removing", "result in target_hit_list: target_hit_list.remove(result) print(\"After removing all, the rest is:", "assert expected_value in str(dig_output) def wait_for_nodes_to_become_active(client, cluster, exception_list=[], retry_count=0): nodes", "create_ns(c_client, cluster, p, ns_name) return p, ns def create_project(client, cluster,", "'network-interface.addresses.association.public-ip' ip_filter['Values'] = ip_list filters.append(ip_filter) for node in nodes: ip_list.append(node.externalIpAddress)", "allow_connectivity=True): wl1_pods = p_client1.list_pod(workloadId=workload1.id).data wl2_pods = p_client2.list_pod(workloadId=workload2.id).data for pod in", "> timeout: exceptionMsg = 'Timeout waiting for ' + resource.baseType", "+ \"=\" + value get_pods = \"get pods -l\" +", "host_ip + \":\" + \\ str(source_port) + \"/name.html\" validate_http_response(curl_cmd, target_name_list)", "- start > timeout: if timeout_message: raise Exception(timeout_message) else: raise", "pod in pods[\"items\"]: assert pod[\"spec\"][\"containers\"][0][\"image\"] == expectedimage def validate_pods_are_running_by_id(expectedpods, workload,", "random.randint(0, 1000000) def random_int(start, end): return random.randint(start, end) def random_test_name(name=\"test\"):", "time.time() - start > timeout: raise Exception('Timed out waiting for", "-c 'cat {0}' \".format(filename) output = kubectl_pod_exec(pod, cmd_get_content) assert output.strip().decode('utf-8')", "+ str(workload.publicEndpoints[0][\"port\"]) fqdn_available = True return url def wait_until_lb_is_active(url, timeout=300):", "== 1 ingress = ingress_list[0] if hasattr(ingress, 'publicEndpoints'): for public_endpoint", "assert len(list) == 1 pvc = list[0] while pvc.state !=", "# For cronjob, wait for the first pod to get", "get_admin_client() if CLUSTER_NAME == \"\": clusters = client.list_cluster().data else: clusters", "create_wl_with_nfs(p_client, ns_id, pvc_name, wl_name, mount_path, sub_path, is_daemonSet=False): volumes = [{\"type\":", "check_cluster_version(cluster, k8s_version) if hasattr(cluster, 'rancherKubernetesEngineConfig'): check_cluster_state(len(get_role_nodes(cluster, \"etcd\"))) project, ns =", "ns_name) if type == \"deployment\" or type == \"statefulSet\": assert", "in pods_result[\"items\"]: assert pod[\"status\"][\"phase\"] == \"Running\" assert len(pod[\"status\"][\"containerStatuses\"]) == 2", "assert len(nss) == 1 ns = nss[0] return ns def", "target_name_list = [] for pod in pods: target_name_list.append(pod[\"name\"]) curl_cmd =", "\" + kubectl_k8s_version + \\ \" Expected: \" + expected_k8s_version", "= None while not fqdn_available: if time.time() - start >", "p.state != \"running\": if time.time() - start > timeout: raise", "= mcapps[0] print(mapp.state) while mapp.state != \"active\": print(mapp.uuid) print(mapp.state) if", "= get_target_names(p_client, workloads) start = time.time() fqdn_available = False url", "# nodepools if node_count == 1: node_status = nodes[0].state else:", "pod_count for pod in pods_result[\"items\"]: assert pod[\"status\"][\"phase\"] == \"Running\" assert", "\"1200\")) TEST_IMAGE = \"sangeetha/mytestcontainer\" CLUSTER_NAME = os.environ.get(\"RANCHER_CLUSTER_NAME\", \"\") RANCHER_CLEANUP_CLUSTER =", "create_project_and_ns(token, cluster, project_name=None, ns_name=None): client = get_client_for_token(token) p = create_project(client,", "\"\" start = time.time() while not fqdn_available: if time.time() -", "node_count = len(nodes) while node_count != expected_node_count: if time.time() -", "after {}' \\ ' seconds'.format(obj.type, obj.id, delta) raise Exception(msg) def", "waiting for state to get to active\") time.sleep(.5) multiclusterapps =", "waiting for state to get to active\") time.sleep(.5) apps =", "time.sleep(.5) workload_list = p_client.list_workload(uuid=workload.uuid).data assert len(workload_list) == 1 workload =", "path validate_http_response(cmd, target_name_list) def validate_ingress_using_endpoint(p_client, ingress, workloads, timeout=300): target_name_list =", "etcd_nodes.append(node) if node.controlPlane: control_nodes.append(node) if node.worker: worker_nodes.append(node) if role ==", "crtb def assign_members_to_project(client, user, project, role_template_id): prtb = client.create_project_role_template_binding( projectId=project.id,", "pods: for o_pod in pods: check_connectivity_between_pods(pod, o_pod) def check_connectivity_between_pods(pod1, pod2,", "assert pod_ip in str(response) and \" 0% packet loss\" in", "random_int(start, end): return random.randint(start, end) def random_test_name(name=\"test\"): return name +", "workload) target_name_list = get_target_names(p_client, [workload]) wait_until_lb_is_active(url) validate_http_response(url + \"/name.html\", target_name_list)", "\"persistentVolumeClaimVolumeSource\", \"persistentVolumeClaimId\": pvc_name }}] volumeMounts = [{\"readOnly\": \"False\", \"type\": \"volumeMount\",", "nodes: host_ip = node.externalIpAddress curl_cmd = \" http://\" + host_ip", "node in nodes: ip_list.append(node.externalIpAddress) assert len(ip_filter) > 0 print(ip_filter) aws_nodes", "Count:\" + str(retry_count)) if node_auto_deleted and retry_count < 5: wait_for_nodes_to_become_active(client,", "token): p_url = project.links['self'] + '/schemas' p_client = rancher.Client(url=p_url, token=token,", "removing all, the rest is: \", target_hit_list) assert len(target_hit_list) ==", "state to get to bound\") time.sleep(.5) list = p_client.list_persistent_volume_claim(uuid=pvc_object.uuid).data assert", "--address \" + node.public_ip_address + \\ \" --internal-address \" +", "if role == \"control\": node_list = control_nodes if role ==", "\" http://\" + host_ip + path validate_http_response(cmd, target_name_list) def validate_ingress_using_endpoint(p_client,", "'{0}.{1}.svc.cluster.local'.format( record[\"name\"], record[\"namespaceId\"]) validate_dns_entry(pod, host, expected) def validate_dns_entry(pod, host, expected):", "= \"curl \" + cmd result = run_command(curl_cmd) else: wget_cmd", "getting auto deleted when they are part of # nodepools", "workload.state == \"updating\" assert workload.transitioning == \"error\" print(workload.transitioningMessage) assert error_message", "correct pod images\") time.sleep(.5) pods = execute_kubectl_cmd(get_pods) pod = pods[\"items\"][x]", "node_count = len(nodes) while node_count != 0: if time.time() -", "assert len(target_hit_list) == 0 def validate_cluster(client, cluster, intermediate_state=\"provisioning\", check_intermediate_state=True, skipIngresscheck=True,", "volumes=volumes) return workload def write_content_to_file(pod, content, filename): cmd_write = \"/bin/bash", "requests.get(url) return False except requests.ConnectionError: print(\"Connection Error - \" +", "p_client2): validate_mcapp_cluster(app_id1, p_client1) if app_id2 != \"\": validate_mcapp_cluster(app_id2, p_client2) #", "== \"\": clusters = client.list_cluster().data else: clusters.append(client.list_cluster(name=CLUSTER_NAME).data) clusters.append(client.list_cluster(name=CLUSTER_NAME_2).data) assert len(clusters)", "client.list_node(uuid=uuid).data node_count = len(nodes) # Handle the case of nodes", "+ \\ inspect.getsource(check_function) if fail_handler: exceptionMsg = exceptionMsg + fail_handler(resource)", "pod_ip = pod2.status.podIp cmd = \"ping -c 1 -W 1", "user, cluster, role_template_id): crtb = client.create_cluster_role_template_binding( clusterId=cluster.id, roleTemplateId=role_template_id, subjectKind=\"User\", userId=user.id)", "str(target_name_list)) return target_name_list def get_endpoint_url_for_workload(p_client, workload, timeout=600): fqdn_available = False", "+ \" -n \" + ns_name execute_kubectl_cmd(get_pods) pods_result = execute_kubectl_cmd(get_pods)", "create_project_and_ns(ADMIN_TOKEN, cluster) p_client = get_project_client_for_token(project, ADMIN_TOKEN) con = [{\"name\": \"test1\",", "pvc_object, timeout=DEFAULT_TIMEOUT): start = time.time() time.sleep(2) list = p_client.list_persistent_volume_claim(uuid=pvc_object.uuid).data assert", "# Create Daemon set workload and have an Ingress with", "get_admin_client() nodes = client.list_node(clusterId=cluster.id).data schedulable_nodes = [] for node in", "host + \"'\" nodes = get_schedulable_nodes(cluster) target_name_list = get_target_names(p_client, workloads)", "CLUSTER_NAME_2 = \"\" def random_str(): return 'random-{0}-{1}'.format(random_num(), int(time.time())) def random_num():", "return cluster def wait_until_available(client, obj, timeout=DEFAULT_TIMEOUT): start = time.time() sleep", "while len(pods) != pod_count: if time.time() - start > timeout:", "timeout=300): uuid = node.uuid start = time.time() nodes = client.list_node(uuid=uuid).data", "host_ip = node.externalIpAddress curl_cmd = \" http://\" + host_ip +", "random_str(): return 'random-{0}-{1}'.format(random_num(), int(time.time())) def random_num(): return random.randint(0, 1000000) def", "mapp = apps[0] return mapp def get_admin_client_and_cluster_mcapp(): clusters = []", "pods: print(pod.nodeId + \" check \" + node.id) if pod.nodeId", "verify=False) return p_client def get_cluster_client_for_token(cluster, token): c_url = cluster.links['self'] +", "get_schedulable_nodes(cluster): client = get_admin_client() nodes = client.list_node(clusterId=cluster.id).data schedulable_nodes = []", "= node.externalIpAddress curl_cmd = \" http://\" + host_ip + \":\"", "\"daemonSet\", ns.name, len(get_schedulable_nodes(cluster))) if not skipIngresscheck: host = \"test\" +", "timeout: msg = 'Timeout waiting for [{}:{}] for condition after", "url + str(workload.publicEndpoints[0][\"port\"]) fqdn_available = True return url def wait_until_lb_is_active(url,", "'Values': ['testcustom*', 'teststess*']}] ip_filter = {} ip_list = [] ip_filter['Name']", "= os.environ.get(\"RANCHER_CLUSTER_NAME\", \"\") RANCHER_CLEANUP_CLUSTER = \\ ast.literal_eval(os.environ.get('RANCHER_CLEANUP_CLUSTER', \"True\")) env_file =", "in aws_nodes: print(node.public_ip_address) AmazonWebServices().delete_nodes(aws_nodes) def check_connectivity_between_workloads(p_client1, workload1, p_client2, workload2, allow_connectivity=True):", "or not wait_for_mcapp_cluster_level_to_active(p_client1, app_id1) if app_id2 != \"\": wait_for_mcapp_cluster_level_to_active(p_client2, app_id2)", "99999)) def create_project_and_ns(token, cluster, project_name=None, ns_name=None): client = get_client_for_token(token) p", "ns.state == 'active' return ns def assign_members_to_cluster(client, user, cluster, role_template_id):", "rancher.Client(url=c_url, token=token, verify=False) return c_client def up(cluster, token): c_url =", "set workload and have an Ingress with Workload # rule", "validate_dns_record(pod, record, expected): # requires pod with `dig` available -", "list[0] while pv.state != \"available\": if time.time() - start >", "' + resource.baseType + \\ ' to satisfy condition: '", "wait_for_condition( client, cluster, lambda x: x.state == \"active\", lambda x:", "bound\") time.sleep(.5) list = p_client.list_persistent_volume_claim(uuid=pvc_object.uuid).data assert len(list) == 1 pvc", "client.create_namespace(name=ns_name, clusterId=cluster.id, projectId=project.id) wait_for_ns_to_become_active(client, ns) ns = client.reload(ns) assert ns.state", "[workload], host, path) return cluster def check_cluster_version(cluster, version): cluster_k8s_version =", "state to get to active\") time.sleep(.5) nss = client.list_namespace(uuid=ns.uuid).data assert", "['testcustom*', 'teststess*']}] ip_filter = {} ip_list = [] ip_filter['Name'] =", "nodepools if node_count == 1: node_status = nodes[0].state else: print(\"Node", "node_list = control_nodes if role == \"worker\": node_list = worker_nodes", "import time import requests import ast import paramiko import rancher", "raise Exception(exceptionMsg) time.sleep(.5) resource = client.reload(resource) return resource def wait_for(callback,", "= client.update( crtb, roleTemplateId=role_template_id, userId=user.id) return crtb def change_member_role_in_project(client, user,", "+ fail_handler(resource) raise Exception(exceptionMsg) time.sleep(.5) resource = client.reload(resource) return resource", "os.environ.get(\"RANCHER_CLUSTER_NAME\", \"\") RANCHER_CLEANUP_CLUSTER = \\ ast.literal_eval(os.environ.get('RANCHER_CLEANUP_CLUSTER', \"True\")) env_file = os.path.join(", "out waiting for state to get to active\") time.sleep(.5) nodes", "get to active\") time.sleep(.5) nodes = client.list_node(uuid=uuid).data node_count = len(nodes)", "for pod in pods: target_name_list.append(pod[\"name\"]) curl_cmd = \"http://\" + cluster_ip", "\"bound\": if time.time() - start > timeout: raise AssertionError( \"Timed", "= \"test\" + str(random_int(10000, 99999)) + \".com\" path = \"/name.html\"", "check_intermediate_state: cluster = wait_for_condition( client, cluster, lambda x: x.state ==", "error_message): workload = wait_for_wl_transitioning(p_client, workload) assert workload.state == \"updating\" assert", "p_client): mcapp = p_client.list_app(name=app_id).data assert len(mcapp) == 1 app =", "print(cluster_type) if get_cluster_type(client, cluster) in [\"Imported\", \"Custom\"]: nodes = client.list_node(clusterId=cluster.id).data", "+ '/schemas' c_client = rancher.Client(url=c_url, token=token, verify=False) return c_client def", "\"\" def validate_file_content(pod, content, filename): cmd_get_content = \"/bin/bash -c 'cat", "cluster.name + \"'\\n\" create_config_file(env_details) def create_config_file(env_details): file = open(env_file, \"w\")", "packet loss\" in str(response) def kubectl_pod_exec(pod, cmd): command = \"exec", "endpoint to be available\") time.sleep(.5) ingress_list = p_client.list_ingress(uuid=ingress.uuid).data assert len(ingress_list)", "crtb def change_member_role_in_project(client, user, prtb, role_template_id): prtb = client.update( prtb,", "wait_for_nodes_to_become_active(client, cluster, exception_list, retry_count) def wait_for_node_status(client, node, state): uuid =", "text=True) def run_command_with_stderr(command): try: output = subprocess.check_output(command, shell=True, stderr=subprocess.PIPE) returncode", "== \"active\" wait_for_nodes_to_become_active(client, cluster, exception_list=nodes_not_in_active_state) return cluster def wait_until_available(client, obj,", "= create_project(client, cluster, project_name) c_client = get_cluster_client_for_token(cluster, token) ns =", "execute_kubectl_cmd( \"get \" + type + \" \" + workload.name", "{ \"readOnly\": \"false\", \"type\": \"persistentVolumeClaimVolumeSource\", \"persistentVolumeClaimId\": pvc_name }}] volumeMounts =", "return wl def wait_for_ingress_to_active(client, ingress, timeout=DEFAULT_TIMEOUT): start = time.time() ingresses", "def wait_for_node_to_be_deleted(client, node, timeout=300): uuid = node.uuid start = time.time()", "workload2, allow_connectivity=True): wl1_pods = p_client1.list_pod(workloadId=workload1.id).data wl2_pods = p_client2.list_pod(workloadId=workload2.id).data for pod", "start > timeout: if timeout_message: raise Exception(timeout_message) else: raise Exception('Timeout", "+ pod.namespaceId + \" -- \" + cmd return execute_kubectl_cmd(command,", "run_command_with_stderr(command) else: result = run_command(command) if json_out: result = json.loads(result)", "== \"updating\" assert workload.transitioning == \"error\" print(workload.transitioningMessage) assert error_message in", "pvc_name, wl_name, mount_path, sub_path, is_daemonSet=False): volumes = [{\"type\": \"volume\", \"name\":", "pod with `dig` available - TEST_IMAGE host = '{0}.{1}.svc.cluster.local'.format( record[\"name\"],", "= [{\"name\": \"test1\", \"image\": TEST_IMAGE, \"volumeMounts\": volumeMounts }] if is_daemonSet:", "active\") time.sleep(.5) apps = client.list_app(name=app_id).data assert len(apps) == 1 mapp", "while pvc.state != \"bound\": if time.time() - start > timeout:", "= get_schedulable_nodes(cluster) for node in nodes: target_name_list = [] for", "(len(nodes) > 0): cluster_type = get_cluster_type(client, cluster) print(cluster_type) if get_cluster_type(client,", "ns def assign_members_to_cluster(client, user, cluster, role_template_id): crtb = client.create_cluster_role_template_binding( clusterId=cluster.id,", "pod in pods_result[\"items\"]: assert pod[\"status\"][\"phase\"] == \"Running\" return pods_result[\"items\"] def", "\" \" if (insecure_redirect): curl_args = \" -L --insecure \"", "-l\" + label + \" -n \" + ns_name execute_kubectl_cmd(get_pods)", "target_name_list.append(pod.name) print(\"target name list:\" + str(target_name_list)) return target_name_list def get_endpoint_url_for_workload(p_client,", "expectedimage, numofpods, timeout=DEFAULT_TIMEOUT): start = time.time() for key, value in", "pods = p_client.list_pod(workloadId=workload.id).data nodes = get_schedulable_nodes(cluster) for node in nodes:", "\" -n \" + ns_name pods_result = execute_kubectl_cmd(get_pods) assert len(pods_result[\"items\"])", "name=multiClusterApp.name).data assert len(multiclusterapps) == 1 mapp = multiclusterapps[0] return mapp", "def wait_for_ns_to_become_active(client, ns, timeout=DEFAULT_TIMEOUT): start = time.time() time.sleep(2) nss =", "pv def wait_for_pvc_to_be_bound(p_client, pvc_object, timeout=DEFAULT_TIMEOUT): start = time.time() time.sleep(2) list", "\"cluster_k8s_version: \" + cluster_k8s_version + \\ \" Expected: \" +", "= e.output returncode = e.returncode print(returncode) return (output, returncode) def", "== 'active' return cluster_token def get_cluster_type(client, cluster): cluster_configs = [", "nodes[0].state else: print(\"Node does not exist anymore -\" + uuid)", "= client.reload(obj) except ApiError as e: if e.error.status != 403:", "= p_client.create_workload(name=wl_name, containers=con, namespaceId=ns_id, volumes=volumes) return workload def write_content_to_file(pod, content,", "def validate_psp_error_worklaod(p_client, workload, error_message): workload = wait_for_wl_transitioning(p_client, workload) assert workload.state", "nodes_not_in_active_state=[], k8s_version=\"\"): cluster = validate_cluster_state( client, cluster, check_intermediate_state=check_intermediate_state, intermediate_state=intermediate_state, nodes_not_in_active_state=nodes_not_in_active_state)", "packet loss\" in str(response) else: assert pod_ip in str(response) and", "in expectedpods[\"items\"]: assert expectedpod[\"metadata\"][\"name\"] in curpodnames def validate_workload_image(client, workload, expectedImage,", "\"ping -c 1 -W 1 \" + pod_ip response =", "time.time() - start if delta > timeout: msg = 'Timeout", "expected_node_count: if time.time() - start > timeout: raise AssertionError( \"Timed", "for pod in pods: wait_for_pod_to_running(p_client, pod) wl_result = execute_kubectl_cmd( \"get", "waiting for state to get to active\") time.sleep(.5) pods =", "CLUSTER_NAME = os.environ.get(\"RANCHER_CLUSTER_NAME\", \"\") RANCHER_CLEANUP_CLUSTER = \\ ast.literal_eval(os.environ.get('RANCHER_CLEANUP_CLUSTER', \"True\")) env_file", "c_client def wait_state(client, obj, state, timeout=DEFAULT_TIMEOUT): wait_for(lambda: client.reload(obj).state == state,", "key, value in workload.workloadLabels.items(): label = key + \"=\" +", "'cat {0}' \".format(filename) output = kubectl_pod_exec(pod, cmd_get_content) assert output.strip().decode('utf-8') ==", "1 mapp = apps[0] return mapp def get_admin_client_and_cluster_mcapp(): clusters =", "== pod_count if type == \"daemonSet\": assert wl_result[\"status\"][\"currentNumberScheduled\"] == pod_count", "len(host) > 0: curl_args += \" --header 'Host: \" +", "wget_cmd = \"wget -qO- \" + cmd result = kubectl_pod_exec(client_pod,", "assert expectedpod[\"metadata\"][\"name\"] in curpodnames def validate_workload_image(client, workload, expectedImage, ns): workload", "= project.links['self'] + '/schemas' p_client = rancher.Client(url=p_url, token=token, verify=False) return", "wl_result[\"status\"][\"readyReplicas\"] == pod_count for key, value in workload.workloadLabels.items(): label =", "= \"sangeetha/mytestcontainer\" CLUSTER_NAME = os.environ.get(\"RANCHER_CLUSTER_NAME\", \"\") RANCHER_CLEANUP_CLUSTER = \\ ast.literal_eval(os.environ.get('RANCHER_CLEANUP_CLUSTER',", "workloads[0] while wl.state != \"active\": if time.time() - start >", "ingress = p_client.create_ingress(name=name, namespaceId=ns.id, rules=[rule]) wait_for_ingress_to_active(p_client, ingress) validate_ingress(p_client, cluster, [workload],", "start = time.time() for key, value in workload.workloadLabels.items(): label =", "= wait_until_available(client, p) assert p.state == 'active' return set_pspt_for_project(p, client,", "len(pod[\"status\"][\"containerStatuses\"]) == 2 assert \"running\" in pod[\"status\"][\"containerStatuses\"][0][\"state\"] assert \"running\" in", "len(target_hit_list) == 0: break if client_pod is None: curl_cmd =", "workload, cluster): source_port = workload.publicEndpoints[0][\"port\"] nodes = get_schedulable_nodes(cluster) pods =", "node_auto_deleted = False for node in nodes: if node.requestedHostname not", "\"Timed out waiting for state to get to available\") time.sleep(.5)", "cluster_tokens[0] else: cluster_token = create_custom_host_registration_token(client, cluster) cmd = cluster_token.nodeCommand for", "= result.rstrip() print(\"cmd: \\t\" + cmd) print(\"result: \\t\" + result)", "1 wl = ingresses[0] while wl.state != \"active\": if time.time()", "containers=con, namespaceId=ns_id, volumes=volumes) return workload def write_content_to_file(pod, content, filename): cmd_write", "= 0 except subprocess.CalledProcessError as e: output = e.output returncode", "wl = workloads[0] return wl def wait_for_pod_to_running(client, pod, timeout=DEFAULT_TIMEOUT): start", "in pods: check_connectivity_between_pods(pod, o_pod) def check_connectivity_between_pods(pod1, pod2, allow_connectivity=True): pod_ip =", "= create_project_and_ns(ADMIN_TOKEN, cluster) p_client = get_project_client_for_token(project, ADMIN_TOKEN) con = [{\"name\":", "[] for node in nodes: if node.worker: schedulable_nodes.append(node) return schedulable_nodes", "be available\") time.sleep(.5) ingress_list = p_client.list_ingress(uuid=ingress.uuid).data assert len(ingress_list) == 1", "to get to active\") time.sleep(.5) apps = client.list_app(name=app_id).data assert len(apps)", "\"kubernetesVersion\"] assert cluster_k8s_version == version, \\ \"cluster_k8s_version: \" + cluster_k8s_version", "timeout: raise Exception('Timed out waiting for LB to become active')", "'echo {1} > {0}'\".format(filename, content) output = kubectl_pod_exec(pod, cmd_write) assert", "in workloads: pod_list = p_client.list_pod(workloadId=workload.id).data pods.extend(pod_list) target_name_list = [] for", "\\ str(source_port) + \"/name.html\" validate_http_response(curl_cmd, target_name_list) def validate_clusterIp(p_client, workload, cluster_ip,", "= multiclusterapps[0] return mapp def validate_mcapp_cluster(app_id, p_client): mcapp = p_client.list_app(name=app_id).data", "ns = nss[0] return ns def wait_for_pod_images(p_client, workload, ns_name, expectedimage,", "'/schemas' c_client = rancher.Client(url=c_url, token=token, verify=False) return c_client def wait_state(client,", "def get_role_nodes(cluster, role): etcd_nodes = [] control_nodes = [] worker_nodes", "= client.list_node(uuid=uuid).data node_count = len(nodes) if node_count == 1: node_status", "\"vol1\" }] con = [{\"name\": \"test1\", \"image\": TEST_IMAGE, \"volumeMounts\": volumeMounts", "kubectl_pod_exec(pod, cmd_get_content) assert output.strip().decode('utf-8') == content def wait_for_mcapp_to_active(client, multiClusterApp, timeout=DEFAULT_MULTI_CLUSTER_APP_TIMEOUT):", "= curl_args + \" http://\" + host_ip + path validate_http_response(cmd,", "ssh.connect(ip, username=\"root\", password=password, port=port) stdin, stdout, stderr = ssh.exec_command(cmd) response", "expectedimage: if time.time() - start > timeout: raise AssertionError( \"Timed", "= client.list_node(clusterId=cluster.id).data node_auto_deleted = False for node in nodes: if", "workloads) start = time.time() fqdn_available = False url = None", "2 if sleep > 2: sleep = 2 try: obj", "== state, timeout) return client.reload(obj) def wait_for_condition(client, resource, check_function, fail_handler=None,", "list[0] return pv def wait_for_pvc_to_be_bound(p_client, pvc_object, timeout=DEFAULT_TIMEOUT): start = time.time()", "\"running\": if time.time() - start > timeout: raise AssertionError( \"Timed", "target_hit_list: target_hit_list.remove(result) print(\"After removing all, the rest is: \", target_hit_list)", "cluster_token = client.wait_success(cluster_token) assert cluster_token.state == 'active' return cluster_token def", "cluster_cleanup(client, cluster, aws_nodes=None): if RANCHER_CLEANUP_CLUSTER: client.delete(cluster) if aws_nodes is not", "clusters.append(client.list_cluster(name=CLUSTER_NAME).data) clusters.append(client.list_cluster(name=CLUSTER_NAME_2).data) assert len(clusters) == 2 return client, clusters def", "+ \":\" + \\ str(source_port) + \"/name.html\" validate_http_response(curl_cmd, target_name_list) def", "= \\ ast.literal_eval(os.environ.get('RANCHER_CLEANUP_CLUSTER', \"True\")) env_file = os.path.join( os.path.dirname(os.path.realpath(__file__)), \"rancher_env.config\") CLUSTER_NAME_2", "'Host: \" + host + \"'\" nodes = get_schedulable_nodes(cluster) target_name_list", "to get to active\") time.sleep(.5) pods = client.list_pod(uuid=pod.uuid).data assert len(pods)", "def get_custom_host_registration_cmd(client, cluster, roles, node): allowed_roles = [\"etcd\", \"worker\", \"controlplane\"]", "+ \" http://\" + host_ip + path validate_http_response(cmd, target_name_list) def", "wait_state(client, obj, state, timeout=DEFAULT_TIMEOUT): wait_for(lambda: client.reload(obj).state == state, timeout) return", "generateKubeConfigOutput = cluster.generateKubeconfig() print(generateKubeConfigOutput.config) file = open(kube_fname, \"w\") file.write(generateKubeConfigOutput.config) file.close()", "\"test\" + \"-\" + str(random_int(10000, 99999)) def create_project_and_ns(token, cluster, project_name=None,", "i in range(0, etcd_count): components.append(\"etcd-\" + str(i)) print(\"components to check", "workload): pods = p_client.list_pod(workloadId=workload.id).data for pod in pods: for o_pod", "!= state: if time.time() - start > timeout: raise AssertionError(", "\"k8s_kube_config\") MACHINE_TIMEOUT = float(os.environ.get('RANCHER_MACHINE_TIMEOUT', \"1200\")) TEST_IMAGE = \"sangeetha/mytestcontainer\" CLUSTER_NAME =", "+ ns_name) if type == \"deployment\" or type == \"statefulSet\":", "\"Running\" return pods_result[\"items\"] def validate_workload_with_sidekicks(p_client, workload, type, ns_name, pod_count=1): workload", "- start > timeout: exceptionMsg = 'Timeout waiting for '", "mapp = multiclusterapps[0] return mapp def validate_mcapp_cluster(app_id, p_client): mcapp =", "project.setpodsecuritypolicytemplate(podSecurityPolicyTemplateId=pspt.id) project = wait_until_available(client, project) assert project.state == 'active' return", "\"\" def random_str(): return 'random-{0}-{1}'.format(random_num(), int(time.time())) def random_num(): return random.randint(0,", "delete_cluster(client, cluster): nodes = client.list_node(clusterId=cluster.id).data # Delete Cluster client.delete(cluster) #", "1000000) def random_int(start, end): return random.randint(start, end) def random_test_name(name=\"test\"): return", "= [\"etcd\", \"worker\", \"controlplane\"] cluster_tokens = client.list_cluster_registration_token( clusterId=cluster.id).data if len(cluster_tokens)", "+ '/schemas' p_client = rancher.Client(url=p_url, token=token, verify=False) return p_client def", "\" + str(components)) for cs in css: component_name = cs[\"metadata\"][\"name\"]", "* len(target_name_list) for i in range(1, count): if len(target_hit_list) ==", "cmd_write) assert output.strip().decode('utf-8') == \"\" def validate_file_content(pod, content, filename): cmd_get_content", "if role == \"worker\": node_list = worker_nodes return node_list def", "cs[\"metadata\"][\"name\"] assert component_name in components components.remove(component_name) assert cs[\"conditions\"][0][\"status\"] == \"True\"", "cmd result = kubectl_pod_exec(client_pod, wget_cmd) result = result.decode() result =", "run_command(curl_cmd) else: wget_cmd = \"wget -qO- \" + cmd result", "print(\"target name list:\" + str(target_name_list)) for node in nodes: host_ip", "port, cmd, password): ssh = paramiko.SSHClient() ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) ssh.connect(ip, username=\"root\", password=password,", "label + \" -n \" + ns_name pods_result = execute_kubectl_cmd(get_pods)", "delete_node(aws_nodes) else: env_details = \"env.CATTLE_TEST_URL='\" + CATTLE_TEST_URL + \"'\\n\" env_details", "+ \" check \" + node.id) if pod.nodeId == node.id:", "\"cronJob\": assert len(wl_result[\"status\"][\"active\"]) >= pod_count return for key, value in", "= kubectl_pod_exec(pod, cmd_write) assert output.strip().decode('utf-8') == \"\" def validate_file_content(pod, content,", "access yet\") if time.time() - start > timeout: raise Exception('Timed", "execute_kubectl_cmd(command, json_out=False, stderr=True) def exec_shell_command(ip, port, cmd, password): ssh =", "'teststess*']}] ip_filter = {} ip_list = [] ip_filter['Name'] = \\", "ns_name = random_name() ns = client.create_namespace(name=ns_name, clusterId=cluster.id, projectId=project.id) wait_for_ns_to_become_active(client, ns)", "\"Timed out waiting for correct pod images\") time.sleep(.5) pods =", "CLUSTER_NAME_2 == \"\": clusters = client.list_cluster().data else: clusters.append(client.list_cluster(name=CLUSTER_NAME).data) clusters.append(client.list_cluster(name=CLUSTER_NAME_2).data) assert", "= p_client1.list_pod(workloadId=workload1.id).data wl2_pods = p_client2.list_pod(workloadId=workload2.id).data for pod in wl1_pods: for", "\" + ns_name execute_kubectl_cmd(get_pods) pods_result = execute_kubectl_cmd(get_pods) assert len(pods_result[\"items\"]) ==", "assert len(workloads) == 1 wl = workloads[0] return wl def", "wait_until_available(client, obj, timeout=DEFAULT_TIMEOUT): start = time.time() sleep = 0.01 while", "e else: return obj delta = time.time() - start if", "== \"cronJob\": time.sleep(wait_for_cron_pods) pods = p_client.list_pod(workloadId=workload.id).data assert len(pods) == pod_count", "wl def wait_for_ingress_to_active(client, ingress, timeout=DEFAULT_TIMEOUT): start = time.time() ingresses =", "pods[\"items\"]: curpodnames.append(pod[\"metadata\"][\"name\"]) for expectedpod in expectedpods[\"items\"]: assert expectedpod[\"metadata\"][\"name\"] in curpodnames", "pods.extend(pod_list) target_name_list = [] for pod in pods: target_name_list.append(pod.name) print(\"target", "node.requestedHostname not in exception_list: node = wait_for_node_status(client, node, \"active\") if", "client.list_workload(uuid=workload.uuid).data assert len(workloads) == 1 wl = workloads[0] while wl.transitioning", "active\") time.sleep(.5) nodes = client.list_node(uuid=uuid).data node_count = len(nodes) def wait_for_cluster_node_count(client,", "get_cluster_type(client, cluster) print(cluster_type) if get_cluster_type(client, cluster) in [\"Imported\", \"Custom\"]: nodes", "+= \"env.CLUSTER_NAME='\" + cluster.name + \"'\\n\" create_config_file(env_details) def create_config_file(env_details): file", "\"name\": \"vol1\" }] con = [{\"name\": \"test1\", \"image\": TEST_IMAGE, \"volumeMounts\":", "client.list_node(uuid=uuid).data node_count = len(nodes) def wait_for_cluster_node_count(client, cluster, expected_node_count, timeout=300): start", "AssertionError( \"Timed out waiting for endpoint to be available\") time.sleep(.5)", "120 DEFAULT_MULTI_CLUSTER_APP_TIMEOUT = 300 CATTLE_TEST_URL = os.environ.get('CATTLE_TEST_URL', \"http://localhost:80\") ADMIN_TOKEN =", "0 def validate_dns_record(pod, record, expected): # requires pod with `dig`", "numofpods, timeout=DEFAULT_TIMEOUT): start = time.time() for key, value in workload.workloadLabels.items():", "validate_mcapp_cluster(app_id, p_client): mcapp = p_client.list_app(name=app_id).data assert len(mcapp) == 1 app", "waiting for state to get to bound\") time.sleep(.5) list =", "workload = client.list_workload(uuid=workload.uuid).data[0] assert workload.containers[0].image == expectedImage validate_pod_images(expectedImage, workload, ns.name)", "+= \" --\" + role additional_options = \" --address \"", "== 0: break if client_pod is None: curl_cmd = \"curl", "workload1, p_client2, workload2, allow_connectivity=True): wl1_pods = p_client1.list_pod(workloadId=workload1.id).data wl2_pods = p_client2.list_pod(workloadId=workload2.id).data", "+ workload.name + \" -n \" + ns_name) if type", "pods: check_connectivity_between_pods(pod, o_pod) def check_connectivity_between_pods(pod1, pod2, allow_connectivity=True): pod_ip = pod2.status.podIp", "client = get_client_for_token(token) p = create_project(client, cluster, project_name) c_client =", "target_name_list.append(pod.name) break host_ip = node.externalIpAddress curl_cmd = \" http://\" +", "anymore -\" + uuid) return None return node def wait_for_node_to_be_deleted(client,", "assert \"running\" in pod[\"status\"][\"containerStatuses\"][0][\"state\"] assert \"running\" in pod[\"status\"][\"containerStatuses\"][1][\"state\"] def validate_workload_paused(p_client,", "else: return obj delta = time.time() - start if delta", "in pods: target_name_list.append(pod.name) print(\"target name list:\" + str(target_name_list)) return target_name_list", "!= expectedimage: if time.time() - start > timeout: raise AssertionError(", "\" --\" + role additional_options = \" --address \" +", "start = time.time() time.sleep(2) list = c_client.list_persistent_volume(uuid=pv_object.uuid).data assert len(list) ==", "cmd) print(\"Actual ping Response from \" + pod1.name + \":\"", "pods_result[\"items\"] def validate_workload_with_sidekicks(p_client, workload, type, ns_name, pod_count=1): workload = wait_for_wl_to_active(p_client,", "== 'active' return project def create_ns(client, cluster, project, ns_name=None): if", "= create_ns(c_client, cluster, p, ns_name) return p, ns def create_project(client,", "projectId=project.id, roleTemplateId=role_template_id, subjectKind=\"User\", userId=user.id) return prtb def change_member_role_in_cluster(client, user, crtb,", "pod.namespaceId + \" -- \" + cmd return execute_kubectl_cmd(command, json_out=False,", "if delta > timeout: msg = 'Timeout waiting for [{}:{}]", "target_name_list) def validate_lb(p_client, workload): url = get_endpoint_url_for_workload(p_client, workload) target_name_list =", "= \"\" def random_str(): return 'random-{0}-{1}'.format(random_num(), int(time.time())) def random_num(): return", "{} ip_list = [] ip_filter['Name'] = \\ 'network-interface.addresses.association.public-ip' ip_filter['Values'] =", "= nss[0] return ns def wait_for_pod_images(p_client, workload, ns_name, expectedimage, numofpods,", "= [] for pod in pods[\"items\"]: curpodnames.append(pod[\"metadata\"][\"name\"]) for expectedpod in", "workload, type, ns_name, pod_count=1): workload = wait_for_wl_to_active(p_client, workload) assert workload.state", "-c 1 -W 1 {0}'.format(host) ping_output = kubectl_pod_exec(pod, cmd) ping_validation_pass", "role in roles: assert role in allowed_roles cmd += \"", "\"active\": if time.time() - start > timeout: raise AssertionError( \"Timed", "kubectl_k8s_version == expected_k8s_version, \\ \"kubectl version: \" + kubectl_k8s_version +", "def write_content_to_file(pod, content, filename): cmd_write = \"/bin/bash -c 'echo {1}", "count): if len(target_hit_list) == 0: break if client_pod is None:", "node = wait_for_node_status(client, node, \"active\") if node is None: print(\"Need", "assert len(pods) == 1 p = pods[0] return p def", "== expectedImage validate_pod_images(expectedImage, workload, ns.name) def execute_kubectl_cmd(cmd, json_out=True, stderr=False): command", "= client.list_node(clusterId=cluster.id).data filters = [ {'Name': 'tag:Name', 'Values': ['testcustom*', 'teststess*']}]", "clusters.append(client.list_cluster(name=CLUSTER_NAME_2).data) assert len(clusters) == 2 return client, clusters def validate_multi_cluster_app_cluster(app_id1,", "len(components) == 0 def validate_dns_record(pod, record, expected): # requires pod", "= client.create_project_role_template_binding( projectId=project.id, roleTemplateId=role_template_id, subjectKind=\"User\", userId=user.id) return prtb def change_member_role_in_cluster(client,", "Cluster client.delete(cluster) # Delete nodes(in cluster) from AWS for Imported", "= True retry_count += 1 print(\"Retry Count:\" + str(retry_count)) if", "= time.time() pods = p_client.list_pod(workloadId=workload.id).data while len(pods) != pod_count: if", "ingresses[0] return wl def wait_for_wl_transitioning(client, workload, timeout=DEFAULT_TIMEOUT, state=\"error\"): start =", "for key, value in workload.workloadLabels.items(): label = key + \"=\"", "pod_count) assert len(pods) == pod_count for pod in pods: wait_for_pod_to_running(p_client,", "1: node_status = nodes[0].state else: print(\"Node does not exist anymore", "kubectl_pod_exec(pod1, cmd) print(\"Actual ping Response from \" + pod1.name +", "= get_project_client_for_token(project, ADMIN_TOKEN) con = [{\"name\": \"test1\", \"image\": TEST_IMAGE}] name", "\" + ns_name pods = execute_kubectl_cmd(get_pods) curpodnames = [] for", "False for node in nodes: if node.requestedHostname not in exception_list:", "len(nodes) > 0: if nodes[0].nodeTemplateId is None: return \"Custom\" for", "cmd return execute_kubectl_cmd(command, json_out=False, stderr=True) def exec_shell_command(ip, port, cmd, password):", "return False except requests.ConnectionError: print(\"Connection Error - \" + url)", "return True def validate_http_response(cmd, target_name_list, client_pod=None): target_hit_list = target_name_list[:] count", "in range(0, etcd_count): components.append(\"etcd-\" + str(i)) print(\"components to check -", "ingress_list = p_client.list_ingress(uuid=ingress.uuid).data assert len(ingress_list) == 1 ingress = ingress_list[0]", "--internal-address \" + node.private_ip_address cmd += additional_options return cmd def", "cluster, exception_list=nodes_not_in_active_state) return cluster def wait_until_available(client, obj, timeout=DEFAULT_TIMEOUT): start =", "return pods_result[\"items\"] def validate_workload_with_sidekicks(p_client, workload, type, ns_name, pod_count=1): workload =", "= c_client.list_persistent_volume(uuid=pv_object.uuid).data assert len(list) == 1 pv = list[0] while", "host = '{0}.{1}.svc.cluster.local'.format( record[\"name\"], record[\"namespaceId\"]) validate_dns_entry(pod, host, expected) def validate_dns_entry(pod,", "client.reload(ns) assert ns.state == 'active' return ns def assign_members_to_cluster(client, user,", "key + \"=\" + value get_pods = \"get pods -l\"", "get_target_names(p_client, workloads): pods = [] for workload in workloads: pod_list", "while wl.state != \"active\": if time.time() - start > timeout:", "ns_name=None): client = get_client_for_token(token) p = create_project(client, cluster, project_name) c_client", "test_pods: validate_http_response(curl_cmd, target_name_list, pod) def wait_for_pv_to_be_available(c_client, pv_object, timeout=DEFAULT_TIMEOUT): start =", "public_endpoint.keys(): url += public_endpoint[\"path\"] time.sleep(10) validate_http_response(url, target_name_list) def get_target_names(p_client, workloads):", "= time.time() assert len(mcapps) == 1 mapp = mcapps[0] print(mapp.state)", "for state to get to available\") time.sleep(.5) list = c_client.list_persistent_volume(uuid=pv_object.uuid).data", "\" + pod_ip response = kubectl_pod_exec(pod1, cmd) print(\"Actual ping Response", "\"/bin/bash -c 'echo {1} > {0}'\".format(filename, content) output = kubectl_pod_exec(pod,", "+ cmd result = kubectl_pod_exec(client_pod, wget_cmd) result = result.decode() result", "for node in nodes: host_ip = node.externalIpAddress curl_cmd = \"", "sleep > 2: sleep = 2 try: obj = client.reload(obj)", "scheduled wait time if type == \"cronJob\": time.sleep(wait_for_cron_pods) pods =", "+ result) assert result in target_name_list if result in target_hit_list:", "version, \\ \"cluster_k8s_version: \" + cluster_k8s_version + \\ \" Expected:", "= p_client.list_pod(workloadId=workload.id).data nodes = get_schedulable_nodes(cluster) for node in nodes: target_name_list", "for state to get to active\") time.sleep(.5) pods = p_client.list_pod(workloadId=workload.id).data", "if type == \"cronJob\": time.sleep(wait_for_cron_pods) pods = p_client.list_pod(workloadId=workload.id).data assert len(pods)", "etcd_nodes = [] control_nodes = [] worker_nodes = [] node_list", "get to active\") time.sleep(.5) nss = client.list_namespace(uuid=ns.uuid).data assert len(nss) ==", "state): uuid = node.uuid start = time.time() nodes = client.list_node(uuid=uuid).data", "time.sleep(wait_for_cron_pods) pods = p_client.list_pod(workloadId=workload.id).data assert len(pods) == pod_count for pod", "\"targetPort\": \"80\"}]} ingress = p_client.create_ingress(name=name, namespaceId=ns.id, rules=[rule]) wait_for_ingress_to_active(p_client, ingress) validate_ingress(p_client,", "wait_until_available(client, project) assert project.state == 'active' return project def create_ns(client,", "== 1 ns = nss[0] return ns def wait_for_pod_images(p_client, workload,", "+ value get_pods = \"get pods -l\" + label +", "projectId=project.id) wait_for_ns_to_become_active(client, ns) ns = client.reload(ns) assert ns.state == 'active'", "target_name_list = [] for pod in pods: target_name_list.append(pod.name) print(\"target name", "workload.state == \"active\" # For cronjob, wait for the first", "' + x.state, timeout=MACHINE_TIMEOUT) assert cluster.state == \"active\" wait_for_nodes_to_become_active(client, cluster,", "workloadStatus == expectedstatus def validate_pod_images(expectedimage, workload, ns_name): for key, value", "= workloads[0] return wl def wait_for_pod_to_running(client, pod, timeout=DEFAULT_TIMEOUT): start =", "+ str(random_int(10000, 99999)) def get_admin_client(): return rancher.Client(url=CATTLE_API_URL, token=ADMIN_TOKEN, verify=False) def", "start > timeout: raise AssertionError( \"Timed out waiting for endpoint", "TEST_IMAGE cmd = 'ping -c 1 -W 1 {0}'.format(host) ping_output", "is None: return \"Custom\" for cluster_config in cluster_configs: if cluster_config", "\"volumeMounts\": volumeMounts }] if is_daemonSet: workload = p_client.create_workload(name=wl_name, containers=con, namespaceId=ns_id,", "-l\" + label + \" -n \" + ns_name pods", "== \"control\": node_list = control_nodes if role == \"worker\": node_list", "workload, ns.name) def execute_kubectl_cmd(cmd, json_out=True, stderr=False): command = 'kubectl --kubeconfig", "time.sleep(.5) list = c_client.list_persistent_volume(uuid=pv_object.uuid).data assert len(list) == 1 pv =", "con = [{\"name\": \"test1\", \"image\": TEST_IMAGE}] name = random_test_name(\"default\") workload", "if fail_handler: exceptionMsg = exceptionMsg + fail_handler(resource) raise Exception(exceptionMsg) time.sleep(.5)", "node list\") node_auto_deleted = True retry_count += 1 print(\"Retry Count:\"", "get_admin_client_and_cluster(): client = get_admin_client() if CLUSTER_NAME == \"\": clusters =", "== \"active\" # For cronjob, wait for the first pod", "url = \"http://\" url = url + workload.publicEndpoints[0][\"addresses\"][0] + \":\"", "def check_connectivity_between_workload_pods(p_client, workload): pods = p_client.list_pod(workloadId=workload.id).data for pod in pods:", "[\"etcd\", \"worker\", \"controlplane\"] cluster_tokens = client.list_cluster_registration_token( clusterId=cluster.id).data if len(cluster_tokens) >", "create_config_file(env_details): file = open(env_file, \"w\") file.write(env_details) file.close() def validate_hostPort(p_client, workload,", "= False url = None while not fqdn_available: if time.time()", "+ pod_ip response = kubectl_pod_exec(pod1, cmd) print(\"Actual ping Response from", "time.time() - start > timeout: if timeout_message: raise Exception(timeout_message) else:", "client.delete(cluster) # Delete nodes(in cluster) from AWS for Imported and", "check_intermediate_state=check_intermediate_state, intermediate_state=intermediate_state, nodes_not_in_active_state=nodes_not_in_active_state) # Create Daemon set workload and have", "= wait_until_available(client, p) assert p.state == 'active' return p def", "to active\") time.sleep(.5) nodes = client.list_node(uuid=uuid).data node_count = len(nodes) def", "return \"Custom\" for cluster_config in cluster_configs: if cluster_config in cluster:", "start = time.time() while not fqdn_available: if time.time() - start", "ns_name=None): if ns_name is None: ns_name = random_name() ns =", "out waiting for correct pod images\") time.sleep(.5) pods = execute_kubectl_cmd(get_pods)", "= p_client.create_workload(name=wl_name, containers=con, namespaceId=ns_id, volumes=volumes, daemonSetConfig={}) else: workload = p_client.create_workload(name=wl_name,", "\"True\")) env_file = os.path.join( os.path.dirname(os.path.realpath(__file__)), \"rancher_env.config\") CLUSTER_NAME_2 = \"\" def", "\"-\" + str(random_int(10000, 99999)) def get_admin_client(): return rancher.Client(url=CATTLE_API_URL, token=ADMIN_TOKEN, verify=False)", "\"\" or CLUSTER_NAME_2 == \"\": clusters = client.list_cluster().data else: clusters.append(client.list_cluster(name=CLUSTER_NAME).data)", "str(response) and \" 0% packet loss\" in str(response) else: assert", "cluster_token = cluster_tokens[0] else: cluster_token = create_custom_host_registration_token(client, cluster) cmd =", "target_name_list = get_target_names(p_client, workloads) start = time.time() fqdn_available = False", "component_name = cs[\"metadata\"][\"name\"] assert component_name in components components.remove(component_name) assert cs[\"conditions\"][0][\"status\"]", "out waiting for state to get to active\") time.sleep(5) nodes", "execute_kubectl_cmd(cmd, json_out=True, stderr=False): command = 'kubectl --kubeconfig {0} {1}'.format( kube_fname,", "+ cmd return execute_kubectl_cmd(command, json_out=False, stderr=True) def exec_shell_command(ip, port, cmd,", "get_custom_host_registration_cmd(client, cluster, roles, node): allowed_roles = [\"etcd\", \"worker\", \"controlplane\"] cluster_tokens", "len(workload.publicEndpoints) > 0 url = \"http://\" url = url +", "!= state: if time.time() - start > MACHINE_TIMEOUT: raise AssertionError(", "= k8s_version[\"serverVersion\"][\"gitVersion\"] assert kubectl_k8s_version == expected_k8s_version, \\ \"kubectl version: \"", "time.sleep(.5) pods = execute_kubectl_cmd(get_pods) pod = pods[\"items\"][x] podimage = pod[\"spec\"][\"containers\"][0][\"image\"]", "= [] for pod in pods: print(pod.nodeId + \" check", "+= \"env.ADMIN_TOKEN='\" + ADMIN_TOKEN + \"'\\n\" env_details += \"env.CLUSTER_NAME='\" +", "execute_kubectl_cmd(get_pods) curpodnames = [] for pod in pods[\"items\"]: curpodnames.append(pod[\"metadata\"][\"name\"]) for", "pvc = list[0] return pvc def create_wl_with_nfs(p_client, ns_id, pvc_name, wl_name,", "# requires pod with `dig` available - TEST_IMAGE host =", "allow_connectivity: assert pod_ip in str(response) and \" 0% packet loss\"", "wait_for_mcapp_cluster_level_to_active(client, app_id, timeout=DEFAULT_MULTI_CLUSTER_APP_TIMEOUT): mcapps = client.list_app(name=app_id).data start = time.time() assert", "pods = wait_for_pods_in_workload(p_client, workload, pod_count) assert len(pods) == pod_count for", "check_for_no_access(url): time.sleep(.5) print(\"No access yet\") if time.time() - start >", "return p_client def get_cluster_client_for_token(cluster, token): c_url = cluster.links['self'] + '/schemas'", "= client.create_project(name=random_name(), clusterId=cluster.id) p = wait_until_available(client, p) assert p.state ==", "2: sleep = 2 try: obj = client.reload(obj) except ApiError", "pods = p_client.list_pod(workloadId=workload.id).data assert len(pods) == pod_count for pod in", "in pods: print(pod.nodeId + \" check \" + node.id) if", "time.time() nodes = client.list_node(clusterId=cluster.id).data node_count = len(nodes) while node_count !=", "wl1_pods: for o_pod in wl2_pods: check_connectivity_between_pods(pod, o_pod, allow_connectivity) def check_connectivity_between_workload_pods(p_client,", "= os.path.join(os.path.dirname(os.path.realpath(__file__)), \"k8s_kube_config\") MACHINE_TIMEOUT = float(os.environ.get('RANCHER_MACHINE_TIMEOUT', \"1200\")) TEST_IMAGE = \"sangeetha/mytestcontainer\"", "len(workloads) == 1 wl = workloads[0] return wl def wait_for_pod_to_running(client,", "to be available\") time.sleep(.5) ingress_list = p_client.list_ingress(uuid=ingress.uuid).data assert len(ingress_list) ==", "target_name_list if result in target_hit_list: target_hit_list.remove(result) print(\"After removing all, the", "str(random_int(10000, 99999)) def get_admin_client(): return rancher.Client(url=CATTLE_API_URL, token=ADMIN_TOKEN, verify=False) def get_client_for_token(token):", "if node.controlPlane: control_nodes.append(node) if node.worker: worker_nodes.append(node) if role == \"etcd\":", "\"\": check_cluster_version(cluster, k8s_version) if hasattr(cluster, 'rancherKubernetesEngineConfig'): check_cluster_state(len(get_role_nodes(cluster, \"etcd\"))) project, ns", "schedulable_nodes = [] for node in nodes: if node.worker: schedulable_nodes.append(node)", "pv = list[0] return pv def wait_for_pvc_to_be_bound(p_client, pvc_object, timeout=DEFAULT_TIMEOUT): start", "expectedimage def validate_pods_are_running_by_id(expectedpods, workload, ns_name): for key, value in workload.workloadLabels.items():", "if node.worker: schedulable_nodes.append(node) return schedulable_nodes def get_role_nodes(cluster, role): etcd_nodes =", "version: \" + kubectl_k8s_version + \\ \" Expected: \" +", "= False for expected_value in expected: if expected_value in str(ping_output):", "k8s_version[\"serverVersion\"][\"gitVersion\"] assert kubectl_k8s_version == expected_k8s_version, \\ \"kubectl version: \" +", "wait_for_condition(client, resource, check_function, fail_handler=None, timeout=DEFAULT_TIMEOUT): start = time.time() resource =", "node_count == 1: node_status = nodes[0].state else: print(\"Node does not", "return name + \"-\" + str(random_int(10000, 99999)) def get_admin_client(): return", "wl1_pods = p_client1.list_pod(workloadId=workload1.id).data wl2_pods = p_client2.list_pod(workloadId=workload2.id).data for pod in wl1_pods:", "= client.list_cluster().data else: clusters = client.list_cluster(name=CLUSTER_NAME).data assert len(clusters) > 0", "= result.decode() result = result.rstrip() print(\"cmd: \\t\" + cmd) print(\"result:", "= \" http://\" + host_ip + \":\" + \\ str(source_port)", "prtb = client.update( prtb, roleTemplateId=role_template_id, userId=user.id) return prtb def create_kubeconfig(cluster):", "namespaceId=ns.id, rules=[rule]) wait_for_ingress_to_active(p_client, ingress) validate_ingress(p_client, cluster, [workload], host, path) return", "= \"exec \" + pod.name + \" -n \" +", "assert ping_validation_pass is True assert \" 0% packet loss\" in", "[workload.id], \"targetPort\": \"80\"}]} ingress = p_client.create_ingress(name=name, namespaceId=ns.id, rules=[rule]) wait_for_ingress_to_active(p_client, ingress)", "if \"path\" in public_endpoint.keys(): url += public_endpoint[\"path\"] time.sleep(10) validate_http_response(url, target_name_list)", "p_url = project.links['self'] + '/schemas' p_client = rancher.Client(url=p_url, token=token, verify=False)", "import requests import ast import paramiko import rancher from rancher", "clusterId=cluster.id, projectId=project.id) wait_for_ns_to_become_active(client, ns) ns = client.reload(ns) assert ns.state ==", "client.list_node(clusterId=cluster.id).data for node in nodes: if node.etcd: etcd_nodes.append(node) if node.controlPlane:", "curl_args + \" http://\" + host_ip + path validate_http_response(cmd, target_name_list)", "client.list_node(clusterId=cluster.id).data node_count = len(nodes) while node_count != expected_node_count: if time.time()", "execute_kubectl_cmd(get_pods) pods_result = execute_kubectl_cmd(get_pods) assert len(pods_result[\"items\"]) == pod_count for pod", "assert len(apps) == 1 mapp = apps[0] return mapp def", "= False url = \"\" start = time.time() while not", "str(retry_count)) if node_auto_deleted and retry_count < 5: wait_for_nodes_to_become_active(client, cluster, exception_list,", "== version, \\ \"cluster_k8s_version: \" + cluster_k8s_version + \\ \"", "of nodes getting auto deleted when they are part of", "state, timeout) return client.reload(obj) def wait_for_condition(client, resource, check_function, fail_handler=None, timeout=DEFAULT_TIMEOUT):", "pods = execute_kubectl_cmd(get_pods) for pod in pods[\"items\"]: assert pod[\"spec\"][\"containers\"][0][\"image\"] ==", "= [ \"amazonElasticContainerServiceConfig\", \"azureKubernetesServiceConfig\", \"googleKubernetesEngineConfig\", \"rancherKubernetesEngineConfig\" ] if \"rancherKubernetesEngineConfig\" in", "cmd = \"ping -c 1 -W 1 \" + pod_ip", "{1}'.format( kube_fname, cmd) if json_out: command += ' -o json'", "== \"True\" assert cs[\"conditions\"][0][\"type\"] == \"Healthy\" assert len(components) == 0", "== \"worker\": node_list = worker_nodes return node_list def validate_ingress(p_client, cluster,", "created after # scheduled wait time if type == \"cronJob\":" ]
[ "determine if the input string is valid. # An input", "brackets. # Open brackets must be closed in the correct", "elif ch in dict.keys(): if len(stack) == 0 or (stack.pop()", "main(): s = Solution() print(s.isValid(\"()\")) print(s.isValid(\"()[]{}\")) print(s.isValid(\"(]\")) print(s.isValid(\"([)]\")) print(s.isValid(\"{[]}\")) if", "4: # Input: \"([)]\" # Output: false # Example 5:", "\"([)]\" # Output: false # Example 5: # Input: \"{[]}\"", "Example 2: # Input: \"()[]{}\" # Output: true # Example", "an empty string is also considered valid. # Example 1:", "false # Example 5: # Input: \"{[]}\" # Output: true", "ch in s: if ch in dict.values(): stack.append(ch) elif ch", "by the same type of brackets. # Open brackets must", "string is also considered valid. # Example 1: # Input:", "valid. # Example 1: # Input: \"()\" # Output: true", "# Output: false # Example 4: # Input: \"([)]\" #", "print(s.isValid(\"()\")) print(s.isValid(\"()[]{}\")) print(s.isValid(\"(]\")) print(s.isValid(\"([)]\")) print(s.isValid(\"{[]}\")) if __name__ == \"__main__\": main()", "in the correct order. # Note that an empty string", "the correct order. # Note that an empty string is", "# Input: \"()\" # Output: true # Example 2: #", "\"()\" # Output: true # Example 2: # Input: \"()[]{}\"", "'[' and ']', # determine if the input string is", "# Input: \"(]\" # Output: false # Example 4: #", "if len(stack) == 0 or (stack.pop() != dict[ch]): return False", "Example 1: # Input: \"()\" # Output: true # Example", "type of brackets. # Open brackets must be closed in", "dict.keys(): if len(stack) == 0 or (stack.pop() != dict[ch]): return", "considered valid. # Example 1: # Input: \"()\" # Output:", "# Output: true # Example 2: # Input: \"()[]{}\" #", "of brackets. # Open brackets must be closed in the", "\"\"\" :type s: str :rtype: bool \"\"\" dict = {')':'(',']':'[','}':'{'}", "Input: \"()[]{}\" # Output: true # Example 3: # Input:", "3: # Input: \"(]\" # Output: false # Example 4:", "dict = {')':'(',']':'[','}':'{'} stack = [] for ch in s:", "')', '{', '}', '[' and ']', # determine if the", "# Example 4: # Input: \"([)]\" # Output: false #", "be closed by the same type of brackets. # Open", "Input: \"(]\" # Output: false # Example 4: # Input:", "\"{[]}\" # Output: true class Solution(object): def isValid(self, s): \"\"\"", "order. # Note that an empty string is also considered", "true # Example 3: # Input: \"(]\" # Output: false", "# Example 1: # Input: \"()\" # Output: true #", "Example 3: # Input: \"(]\" # Output: false # Example", "in dict.values(): stack.append(ch) elif ch in dict.keys(): if len(stack) ==", "Input: \"()\" # Output: true # Example 2: # Input:", "str :rtype: bool \"\"\" dict = {')':'(',']':'[','}':'{'} stack = []", "is valid. # An input string is valid if: #", "just the characters '(', ')', '{', '}', '[' and ']',", "isValid(self, s): \"\"\" :type s: str :rtype: bool \"\"\" dict", "0 or (stack.pop() != dict[ch]): return False return len(stack) ==", "']', # determine if the input string is valid. #", "dict[ch]): return False return len(stack) == 0 def main(): s", "'(', ')', '{', '}', '[' and ']', # determine if", "# determine if the input string is valid. # An", "Output: true class Solution(object): def isValid(self, s): \"\"\" :type s:", "# Output: false # Example 5: # Input: \"{[]}\" #", ":rtype: bool \"\"\" dict = {')':'(',']':'[','}':'{'} stack = [] for", "and ']', # determine if the input string is valid.", "input string is valid if: # Open brackets must be", "the input string is valid. # An input string is", "# Open brackets must be closed in the correct order.", "'{', '}', '[' and ']', # determine if the input", "must be closed by the same type of brackets. #", "that an empty string is also considered valid. # Example", "input string is valid. # An input string is valid", "return False return len(stack) == 0 def main(): s =", "is also considered valid. # Example 1: # Input: \"()\"", "== 0 or (stack.pop() != dict[ch]): return False return len(stack)", "or (stack.pop() != dict[ch]): return False return len(stack) == 0", "{')':'(',']':'[','}':'{'} stack = [] for ch in s: if ch", "s: if ch in dict.values(): stack.append(ch) elif ch in dict.keys():", "[] for ch in s: if ch in dict.values(): stack.append(ch)", "\"(]\" # Output: false # Example 4: # Input: \"([)]\"", "closed by the same type of brackets. # Open brackets", "# Example 2: # Input: \"()[]{}\" # Output: true #", "# Output: true class Solution(object): def isValid(self, s): \"\"\" :type", "# Input: \"{[]}\" # Output: true class Solution(object): def isValid(self,", "Output: false # Example 5: # Input: \"{[]}\" # Output:", "# Given a string containing just the characters '(', ')',", "dict.values(): stack.append(ch) elif ch in dict.keys(): if len(stack) == 0", "# Output: true # Example 3: # Input: \"(]\" #", "ch in dict.keys(): if len(stack) == 0 or (stack.pop() !=", "be closed in the correct order. # Note that an", "s): \"\"\" :type s: str :rtype: bool \"\"\" dict =", "Note that an empty string is also considered valid. #", "= [] for ch in s: if ch in dict.values():", "\"\"\" dict = {')':'(',']':'[','}':'{'} stack = [] for ch in", "return len(stack) == 0 def main(): s = Solution() print(s.isValid(\"()\"))", "# Example 5: # Input: \"{[]}\" # Output: true class", "the same type of brackets. # Open brackets must be", "# Example 3: # Input: \"(]\" # Output: false #", "correct order. # Note that an empty string is also", "same type of brackets. # Open brackets must be closed", "empty string is also considered valid. # Example 1: #", "!= dict[ch]): return False return len(stack) == 0 def main():", "len(stack) == 0 or (stack.pop() != dict[ch]): return False return", "Input: \"{[]}\" # Output: true class Solution(object): def isValid(self, s):", "brackets must be closed in the correct order. # Note", "the characters '(', ')', '{', '}', '[' and ']', #", "brackets must be closed by the same type of brackets.", "# Input: \"()[]{}\" # Output: true # Example 3: #", "valid. # An input string is valid if: # Open", "(stack.pop() != dict[ch]): return False return len(stack) == 0 def", "containing just the characters '(', ')', '{', '}', '[' and", "Given a string containing just the characters '(', ')', '{',", "Example 5: # Input: \"{[]}\" # Output: true class Solution(object):", "s = Solution() print(s.isValid(\"()\")) print(s.isValid(\"()[]{}\")) print(s.isValid(\"(]\")) print(s.isValid(\"([)]\")) print(s.isValid(\"{[]}\")) if __name__", "true # Example 2: # Input: \"()[]{}\" # Output: true", "if ch in dict.values(): stack.append(ch) elif ch in dict.keys(): if", "stack = [] for ch in s: if ch in", "if: # Open brackets must be closed by the same", "An input string is valid if: # Open brackets must", "5: # Input: \"{[]}\" # Output: true class Solution(object): def", "true class Solution(object): def isValid(self, s): \"\"\" :type s: str", "in s: if ch in dict.values(): stack.append(ch) elif ch in", "0 def main(): s = Solution() print(s.isValid(\"()\")) print(s.isValid(\"()[]{}\")) print(s.isValid(\"(]\")) print(s.isValid(\"([)]\"))", "string is valid if: # Open brackets must be closed", "Open brackets must be closed by the same type of", ":type s: str :rtype: bool \"\"\" dict = {')':'(',']':'[','}':'{'} stack", "len(stack) == 0 def main(): s = Solution() print(s.isValid(\"()\")) print(s.isValid(\"()[]{}\"))", "Example 4: # Input: \"([)]\" # Output: false # Example", "must be closed in the correct order. # Note that", "2: # Input: \"()[]{}\" # Output: true # Example 3:", "\"()[]{}\" # Output: true # Example 3: # Input: \"(]\"", "bool \"\"\" dict = {')':'(',']':'[','}':'{'} stack = [] for ch", "= {')':'(',']':'[','}':'{'} stack = [] for ch in s: if", "Input: \"([)]\" # Output: false # Example 5: # Input:", "False return len(stack) == 0 def main(): s = Solution()", "closed in the correct order. # Note that an empty", "== 0 def main(): s = Solution() print(s.isValid(\"()\")) print(s.isValid(\"()[]{}\")) print(s.isValid(\"(]\"))", "is valid if: # Open brackets must be closed by", "false # Example 4: # Input: \"([)]\" # Output: false", "Solution(object): def isValid(self, s): \"\"\" :type s: str :rtype: bool", "valid if: # Open brackets must be closed by the", "in dict.keys(): if len(stack) == 0 or (stack.pop() != dict[ch]):", "Output: false # Example 4: # Input: \"([)]\" # Output:", "ch in dict.values(): stack.append(ch) elif ch in dict.keys(): if len(stack)", "# Open brackets must be closed by the same type", "Solution() print(s.isValid(\"()\")) print(s.isValid(\"()[]{}\")) print(s.isValid(\"(]\")) print(s.isValid(\"([)]\")) print(s.isValid(\"{[]}\")) if __name__ == \"__main__\":", "characters '(', ')', '{', '}', '[' and ']', # determine", "s: str :rtype: bool \"\"\" dict = {')':'(',']':'[','}':'{'} stack =", "also considered valid. # Example 1: # Input: \"()\" #", "def isValid(self, s): \"\"\" :type s: str :rtype: bool \"\"\"", "# An input string is valid if: # Open brackets", "Output: true # Example 3: # Input: \"(]\" # Output:", "Open brackets must be closed in the correct order. #", "'}', '[' and ']', # determine if the input string", "for ch in s: if ch in dict.values(): stack.append(ch) elif", "string containing just the characters '(', ')', '{', '}', '['", "1: # Input: \"()\" # Output: true # Example 2:", "Output: true # Example 2: # Input: \"()[]{}\" # Output:", "string is valid. # An input string is valid if:", "# Note that an empty string is also considered valid.", "def main(): s = Solution() print(s.isValid(\"()\")) print(s.isValid(\"()[]{}\")) print(s.isValid(\"(]\")) print(s.isValid(\"([)]\")) print(s.isValid(\"{[]}\"))", "= Solution() print(s.isValid(\"()\")) print(s.isValid(\"()[]{}\")) print(s.isValid(\"(]\")) print(s.isValid(\"([)]\")) print(s.isValid(\"{[]}\")) if __name__ ==", "if the input string is valid. # An input string", "stack.append(ch) elif ch in dict.keys(): if len(stack) == 0 or", "a string containing just the characters '(', ')', '{', '}',", "# Input: \"([)]\" # Output: false # Example 5: #", "class Solution(object): def isValid(self, s): \"\"\" :type s: str :rtype:" ]
[ "response = ({\"status\": \"OK\", \"message\": \"Service is running\"}, 200) except", "\"Service is running\"} def get_database_health(token_info=None, user=None): \"\"\"Get a health report", "\"OK\", \"message\": \"Service is running\"} def get_database_health(token_info=None, user=None): \"\"\"Get a", "\"\"\" response = ({\"status\": \"Pending\", \"message\": \"Fetching service status\"}, 200)", "from sqlalchemy.exc import InterfaceError from sqlalchemy.exc import OperationalError try: from", "handle various responses try: if not IS_CONNECTED: response = ({\"status\":", "configuration\"}, 500) except Exception as e: response = ({\"status\": \"Error\",", "Exception as e: response = ({\"status\": \"Error\", \"message\": str(e)}, 500)", "def get_database_health(token_info=None, user=None): \"\"\"Get a health report for the database", "\"frontend\": current_app.config.get(\"FRONTEND_URL\", \"http://localhost:3000\"), \"backend\": current_app.config.get(\"BACKEND_URL\", \"http://localhost:8080\"), \"api_ui\": current_app.config.get(\"BACKEND_URL\", \"http://localhost:8080\") +", "InterfaceError from sqlalchemy.exc import OperationalError try: from ibutsu_server.db.model import Result", "health report for the database :rtype: Health \"\"\" response =", "\"Error\", \"message\": \"Unable to connect to the database\"}, 500) except", "responses try: if not IS_CONNECTED: response = ({\"status\": \"Error\", \"message\":", "a health report for the database :rtype: Health \"\"\" response", "and handle various responses try: if not IS_CONNECTED: response =", "try: if not IS_CONNECTED: response = ({\"status\": \"Error\", \"message\": \"Incomplete", "= ({\"status\": \"Error\", \"message\": \"Incomplete database configuration\"}, 500) else: Result.query.first()", ":rtype: Health \"\"\" return {\"status\": \"OK\", \"message\": \"Service is running\"}", "\"message\": \"Service is running\"} def get_database_health(token_info=None, user=None): \"\"\"Get a health", "return {\"status\": \"OK\", \"message\": \"Service is running\"} def get_database_health(token_info=None, user=None):", "= ({\"status\": \"Error\", \"message\": \"Unable to connect to the database\"},", "ibutsu_server.db.model import Result IS_CONNECTED = True except ImportError: IS_CONNECTED =", "\"Error\", \"message\": str(e)}, 500) return response def get_health_info(token_info=None, user=None): \"\"\"Get", "is running\"}, 200) except OperationalError: response = ({\"status\": \"Error\", \"message\":", "\"Service is running\"}, 200) except OperationalError: response = ({\"status\": \"Error\",", "\"OK\", \"message\": \"Service is running\"}, 200) except OperationalError: response =", "import current_app from sqlalchemy.exc import InterfaceError from sqlalchemy.exc import OperationalError", "service status\"}, 200) # Try to connect to the database,", "user=None): \"\"\"Get the information about this server :rtype: HealthInfo \"\"\"", "is running\"} def get_database_health(token_info=None, user=None): \"\"\"Get a health report for", "\"message\": str(e)}, 500) return response def get_health_info(token_info=None, user=None): \"\"\"Get the", "\"\"\"Get the information about this server :rtype: HealthInfo \"\"\" return", "import InterfaceError from sqlalchemy.exc import OperationalError try: from ibutsu_server.db.model import", "for the database :rtype: Health \"\"\" response = ({\"status\": \"Pending\",", "response = ({\"status\": \"Pending\", \"message\": \"Fetching service status\"}, 200) #", "\"message\": \"Unable to connect to the database\"}, 500) except InterfaceError:", "the database\"}, 500) except InterfaceError: response = ({\"status\": \"Error\", \"message\":", "user=None): \"\"\"Get a health report :rtype: Health \"\"\" return {\"status\":", "({\"status\": \"Error\", \"message\": \"Incomplete database configuration\"}, 500) else: Result.query.first() response", "health report :rtype: Health \"\"\" return {\"status\": \"OK\", \"message\": \"Service", "the information about this server :rtype: HealthInfo \"\"\" return {", "information about this server :rtype: HealthInfo \"\"\" return { \"frontend\":", "Health \"\"\" response = ({\"status\": \"Pending\", \"message\": \"Fetching service status\"},", "except ImportError: IS_CONNECTED = False def get_health(token_info=None, user=None): \"\"\"Get a", "the database :rtype: Health \"\"\" response = ({\"status\": \"Pending\", \"message\":", "500) return response def get_health_info(token_info=None, user=None): \"\"\"Get the information about", "\"Error\", \"message\": \"Incomplete database configuration\"}, 500) else: Result.query.first() response =", "response = ({\"status\": \"Error\", \"message\": \"Incorrect connection configuration\"}, 500) except", "({\"status\": \"Pending\", \"message\": \"Fetching service status\"}, 200) # Try to", "connect to the database, and handle various responses try: if", "to the database\"}, 500) except InterfaceError: response = ({\"status\": \"Error\",", "running\"} def get_database_health(token_info=None, user=None): \"\"\"Get a health report for the", "Try to connect to the database, and handle various responses", "200) # Try to connect to the database, and handle", "this server :rtype: HealthInfo \"\"\" return { \"frontend\": current_app.config.get(\"FRONTEND_URL\", \"http://localhost:3000\"),", "\"\"\" return { \"frontend\": current_app.config.get(\"FRONTEND_URL\", \"http://localhost:3000\"), \"backend\": current_app.config.get(\"BACKEND_URL\", \"http://localhost:8080\"), \"api_ui\":", "str(e)}, 500) return response def get_health_info(token_info=None, user=None): \"\"\"Get the information", "connect to the database\"}, 500) except InterfaceError: response = ({\"status\":", "\"\"\"Get a health report for the database :rtype: Health \"\"\"", "sqlalchemy.exc import OperationalError try: from ibutsu_server.db.model import Result IS_CONNECTED =", "\"http://localhost:3000\"), \"backend\": current_app.config.get(\"BACKEND_URL\", \"http://localhost:8080\"), \"api_ui\": current_app.config.get(\"BACKEND_URL\", \"http://localhost:8080\") + \"/api/ui/\", }", "database configuration\"}, 500) else: Result.query.first() response = ({\"status\": \"OK\", \"message\":", "def get_health_info(token_info=None, user=None): \"\"\"Get the information about this server :rtype:", "IS_CONNECTED = False def get_health(token_info=None, user=None): \"\"\"Get a health report", "\"message\": \"Incomplete database configuration\"}, 500) else: Result.query.first() response = ({\"status\":", "get_health_info(token_info=None, user=None): \"\"\"Get the information about this server :rtype: HealthInfo", "various responses try: if not IS_CONNECTED: response = ({\"status\": \"Error\",", "InterfaceError: response = ({\"status\": \"Error\", \"message\": \"Incorrect connection configuration\"}, 500)", "def get_health(token_info=None, user=None): \"\"\"Get a health report :rtype: Health \"\"\"", "user=None): \"\"\"Get a health report for the database :rtype: Health", "except OperationalError: response = ({\"status\": \"Error\", \"message\": \"Unable to connect", "else: Result.query.first() response = ({\"status\": \"OK\", \"message\": \"Service is running\"},", "get_database_health(token_info=None, user=None): \"\"\"Get a health report for the database :rtype:", "running\"}, 200) except OperationalError: response = ({\"status\": \"Error\", \"message\": \"Unable", "configuration\"}, 500) else: Result.query.first() response = ({\"status\": \"OK\", \"message\": \"Service", "database :rtype: Health \"\"\" response = ({\"status\": \"Pending\", \"message\": \"Fetching", "from flask import current_app from sqlalchemy.exc import InterfaceError from sqlalchemy.exc", "HealthInfo \"\"\" return { \"frontend\": current_app.config.get(\"FRONTEND_URL\", \"http://localhost:3000\"), \"backend\": current_app.config.get(\"BACKEND_URL\", \"http://localhost:8080\"),", "= ({\"status\": \"Error\", \"message\": \"Incorrect connection configuration\"}, 500) except Exception", "response = ({\"status\": \"Error\", \"message\": \"Unable to connect to the", "except InterfaceError: response = ({\"status\": \"Error\", \"message\": \"Incorrect connection configuration\"},", "\"Incorrect connection configuration\"}, 500) except Exception as e: response =", "500) except Exception as e: response = ({\"status\": \"Error\", \"message\":", "return { \"frontend\": current_app.config.get(\"FRONTEND_URL\", \"http://localhost:3000\"), \"backend\": current_app.config.get(\"BACKEND_URL\", \"http://localhost:8080\"), \"api_ui\": current_app.config.get(\"BACKEND_URL\",", "\"Pending\", \"message\": \"Fetching service status\"}, 200) # Try to connect", "# Try to connect to the database, and handle various", "IS_CONNECTED = True except ImportError: IS_CONNECTED = False def get_health(token_info=None,", "\"\"\"Get a health report :rtype: Health \"\"\" return {\"status\": \"OK\",", "= ({\"status\": \"Pending\", \"message\": \"Fetching service status\"}, 200) # Try", "database\"}, 500) except InterfaceError: response = ({\"status\": \"Error\", \"message\": \"Incorrect", "True except ImportError: IS_CONNECTED = False def get_health(token_info=None, user=None): \"\"\"Get", "= True except ImportError: IS_CONNECTED = False def get_health(token_info=None, user=None):", "if not IS_CONNECTED: response = ({\"status\": \"Error\", \"message\": \"Incomplete database", "\"Error\", \"message\": \"Incorrect connection configuration\"}, 500) except Exception as e:", "= ({\"status\": \"OK\", \"message\": \"Service is running\"}, 200) except OperationalError:", "({\"status\": \"OK\", \"message\": \"Service is running\"}, 200) except OperationalError: response", "\"Unable to connect to the database\"}, 500) except InterfaceError: response", "Result IS_CONNECTED = True except ImportError: IS_CONNECTED = False def", "False def get_health(token_info=None, user=None): \"\"\"Get a health report :rtype: Health", "500) else: Result.query.first() response = ({\"status\": \"OK\", \"message\": \"Service is", "\"\"\" return {\"status\": \"OK\", \"message\": \"Service is running\"} def get_database_health(token_info=None,", "to connect to the database, and handle various responses try:", "sqlalchemy.exc import InterfaceError from sqlalchemy.exc import OperationalError try: from ibutsu_server.db.model", "{\"status\": \"OK\", \"message\": \"Service is running\"} def get_database_health(token_info=None, user=None): \"\"\"Get", "\"message\": \"Incorrect connection configuration\"}, 500) except Exception as e: response", "ImportError: IS_CONNECTED = False def get_health(token_info=None, user=None): \"\"\"Get a health", "= False def get_health(token_info=None, user=None): \"\"\"Get a health report :rtype:", "response = ({\"status\": \"Error\", \"message\": str(e)}, 500) return response def", "as e: response = ({\"status\": \"Error\", \"message\": str(e)}, 500) return", "about this server :rtype: HealthInfo \"\"\" return { \"frontend\": current_app.config.get(\"FRONTEND_URL\",", "{ \"frontend\": current_app.config.get(\"FRONTEND_URL\", \"http://localhost:3000\"), \"backend\": current_app.config.get(\"BACKEND_URL\", \"http://localhost:8080\"), \"api_ui\": current_app.config.get(\"BACKEND_URL\", \"http://localhost:8080\")", "database, and handle various responses try: if not IS_CONNECTED: response", "return response def get_health_info(token_info=None, user=None): \"\"\"Get the information about this", "\"message\": \"Service is running\"}, 200) except OperationalError: response = ({\"status\":", "from sqlalchemy.exc import OperationalError try: from ibutsu_server.db.model import Result IS_CONNECTED", "500) except InterfaceError: response = ({\"status\": \"Error\", \"message\": \"Incorrect connection", "Health \"\"\" return {\"status\": \"OK\", \"message\": \"Service is running\"} def", "response = ({\"status\": \"Error\", \"message\": \"Incomplete database configuration\"}, 500) else:", "report for the database :rtype: Health \"\"\" response = ({\"status\":", "import Result IS_CONNECTED = True except ImportError: IS_CONNECTED = False", "report :rtype: Health \"\"\" return {\"status\": \"OK\", \"message\": \"Service is", "= ({\"status\": \"Error\", \"message\": str(e)}, 500) return response def get_health_info(token_info=None,", "\"Fetching service status\"}, 200) # Try to connect to the", "try: from ibutsu_server.db.model import Result IS_CONNECTED = True except ImportError:", "the database, and handle various responses try: if not IS_CONNECTED:", "current_app.config.get(\"FRONTEND_URL\", \"http://localhost:3000\"), \"backend\": current_app.config.get(\"BACKEND_URL\", \"http://localhost:8080\"), \"api_ui\": current_app.config.get(\"BACKEND_URL\", \"http://localhost:8080\") + \"/api/ui/\",", "OperationalError try: from ibutsu_server.db.model import Result IS_CONNECTED = True except", "<filename>backend/ibutsu_server/controllers/health_controller.py from flask import current_app from sqlalchemy.exc import InterfaceError from", "IS_CONNECTED: response = ({\"status\": \"Error\", \"message\": \"Incomplete database configuration\"}, 500)", "current_app from sqlalchemy.exc import InterfaceError from sqlalchemy.exc import OperationalError try:", "200) except OperationalError: response = ({\"status\": \"Error\", \"message\": \"Unable to", "OperationalError: response = ({\"status\": \"Error\", \"message\": \"Unable to connect to", "\"message\": \"Fetching service status\"}, 200) # Try to connect to", ":rtype: HealthInfo \"\"\" return { \"frontend\": current_app.config.get(\"FRONTEND_URL\", \"http://localhost:3000\"), \"backend\": current_app.config.get(\"BACKEND_URL\",", "get_health(token_info=None, user=None): \"\"\"Get a health report :rtype: Health \"\"\" return", "\"Incomplete database configuration\"}, 500) else: Result.query.first() response = ({\"status\": \"OK\",", "import OperationalError try: from ibutsu_server.db.model import Result IS_CONNECTED = True", "status\"}, 200) # Try to connect to the database, and", "e: response = ({\"status\": \"Error\", \"message\": str(e)}, 500) return response", "to the database, and handle various responses try: if not", "({\"status\": \"Error\", \"message\": str(e)}, 500) return response def get_health_info(token_info=None, user=None):", ":rtype: Health \"\"\" response = ({\"status\": \"Pending\", \"message\": \"Fetching service", "not IS_CONNECTED: response = ({\"status\": \"Error\", \"message\": \"Incomplete database configuration\"},", "response def get_health_info(token_info=None, user=None): \"\"\"Get the information about this server", "server :rtype: HealthInfo \"\"\" return { \"frontend\": current_app.config.get(\"FRONTEND_URL\", \"http://localhost:3000\"), \"backend\":", "from ibutsu_server.db.model import Result IS_CONNECTED = True except ImportError: IS_CONNECTED", "flask import current_app from sqlalchemy.exc import InterfaceError from sqlalchemy.exc import", "({\"status\": \"Error\", \"message\": \"Incorrect connection configuration\"}, 500) except Exception as", "connection configuration\"}, 500) except Exception as e: response = ({\"status\":", "Result.query.first() response = ({\"status\": \"OK\", \"message\": \"Service is running\"}, 200)", "({\"status\": \"Error\", \"message\": \"Unable to connect to the database\"}, 500)", "to connect to the database\"}, 500) except InterfaceError: response =", "a health report :rtype: Health \"\"\" return {\"status\": \"OK\", \"message\":", "except Exception as e: response = ({\"status\": \"Error\", \"message\": str(e)}," ]
[ "metric='euclidean', init='random', return_style='once', num_snapshots=5, verbose=0, random_seed=None, use_interactive=False, viz_timeout=10000, viz_server=\"tcp://localhost:5556\", dump_points=False,", "X into an embedded space and return that transformed output.", "N.require(y, N.float32, ['F_CONTIGUOUS', 'ALIGNED']) # Handle dumping and viz strings", "ctypes.c_float(self.early_exaggeration), # Magnitude Factor ctypes.c_int(self.num_neighbors), # Num Neighbors ctypes.c_int(self.n_iter), #", "dumping and viz strings self.dump_file_ = N.require(ord_string(self.dump_file), N.uint8, ['CONTIGUOUS', 'ALIGNED'])", "L.J.P. t-Distributed Stochastic Neighbor Embedding http://homepage.tudelft.nl/19j49/t-SNE.html \"\"\" import numpy as", "# Magnitude Factor ctypes.c_int, # Num Neighbors ctypes.c_int, # Iterations", "import numpy as N import ctypes import os import pkg_resources", "current barnes-hut implementation does not support projection into dimensions other", "Initialize the variables self.n_components = int(n_components) if self.n_components != 2:", "= [ N.ctypeslib.ndpointer(N.float32, ndim=2, flags='ALIGNED, F_CONTIGUOUS, WRITEABLE'), # result N.ctypeslib.ndpointer(N.float32,", "['CONTIGUOUS', 'ALIGNED']) self._lib.pymodule_bh_tsne( self.embedding, # result self.points, # points self.points.ctypes.shape,", "Visualizing High-Dimensional Data Using t-SNE. Journal of Machine Learning Research", "str(viz_server) self.viz_timeout = int(viz_timeout) # Return style if return_style not", "ctypes.c_float(self.theta), # Theta ctypes.c_float(self.epssq), # epssq ctypes.c_float(self.min_grad_norm), # Minimum gradient", "Iterations no progress ctypes.c_int, # Force Magnify iterations ctypes.c_float, #", "ctypes library # self._gpufaiss_lib = N.ctypeslib.load_library('libgpufaiss', self._path) # Load the", "# Initialization Data ctypes.c_bool, # Dump points N.ctypeslib.ndpointer(N.uint8, flags='ALIGNED, CONTIGUOUS'),", "Build the hooks for the BH T-SNE library self._path =", "ctypes.c_float(self.min_grad_norm), # Minimum gradient norm ctypes.c_int(self.initialization_type), # Initialization types self.init_data,", "result N.ctypeslib.ndpointer(N.float32, ndim=2, flags='ALIGNED, CONTIGUOUS'), # points ctypes.POINTER(N.ctypeslib.c_intp), # dims", "Minimum gradient norm ctypes.c_int(self.initialization_type), # Initialization types self.init_data, # Initialization", "# Iterations ctypes.c_int(self.n_iter_without_progress), # Iterations no progress ctypes.c_int(self.force_magnify_iters), # Force", "'ALIGNED']) self._lib.pymodule_bh_tsne( self.embedding, # result self.points, # points self.points.ctypes.shape, #", "Data Using t-SNE. Journal of Machine Learning Research 9:2579-2605, 2008.", "N.zeros(shape=(X.shape[0],self.n_components)) self.embedding = N.require(self.embedding , N.float32, ['F_CONTIGUOUS', 'ALIGNED', 'WRITEABLE']) #", "# Return style if return_style not in ['once','snapshots']: raise ValueError('Invalid", "Viz Server ctypes.c_int, # Viz timeout ctypes.c_int, # Verbosity ctypes.c_int,", "currently supported. Please use metric=\\'euclidean\\' for now.') else: self.metric =", "n_dimensions) Keyword Arguments: y {None} -- Ignored (default: {None}) \"\"\"", "b = bytearray() arr = b.extend(map(ord, s)) return N.array([x for", "# pre-exaggeration momentum ctypes.c_float(self.post_momentum), # post-exaggeration momentum ctypes.c_float(self.theta), # Theta", "Dump points N.ctypeslib.ndpointer(N.uint8, flags='ALIGNED, CONTIGUOUS'), # Dump File ctypes.c_int, #", "= N.ctypeslib.load_library('libtsnecuda', self._path) # Load the ctypes library # Hook", "# Load the ctypes library # Hook the BH T-SNE", "self.dump_file = str(dump_file) self.dump_points = bool(dump_points) self.dump_interval = int(dump_interval) #", "location # self._faiss_lib = N.ctypeslib.load_library('libfaiss', self._path) # Load the ctypes", "ctypes.c_int(self.num_neighbors), # Num Neighbors ctypes.c_int(self.n_iter), # Iterations ctypes.c_int(self.n_iter_without_progress), # Iterations", "Viz timeout ctypes.c_int, # Verbosity ctypes.c_int, # Print interval ctypes.c_int,", "pre_momentum=0.5, post_momentum=0.8, theta=0.5, epssq=0.0025, n_iter=1000, n_iter_without_progress=1000, min_grad_norm=1e-7, perplexity_epsilon=1e-3, metric='euclidean', init='random',", "nearest neighbors Refs: References [1] <NAME>, L.J.P.; Hinton, G.E. Visualizing", "method for barnes hut T-SNE class. \"\"\" # Initialize the", "flags='ALIGNED, F_CONTIGUOUS'), # Initialization Data ctypes.c_bool, # Dump points N.ctypeslib.ndpointer(N.uint8,", "# Learning Rate ctypes.c_float(self.early_exaggeration), # Magnitude Factor ctypes.c_int(self.num_neighbors), # Num", "flags='ALIGNED, CONTIGUOUS'), # Dump File ctypes.c_int, # Dump interval ctypes.c_bool,", "Refs: References [1] <NAME>, L.J.P.; Hinton, G.E. Visualizing High-Dimensional Data", "the variables self.n_components = int(n_components) if self.n_components != 2: raise", "not in ['random']: raise ValueError('Non-Random initialization is not currently supported.", "Ignored (default: {None}) \"\"\" # Setup points/embedding requirements self.points =", "Neighbors ctypes.c_int, # Iterations ctypes.c_int, # Iterations no progress ctypes.c_int,", "self.learning_rate = float(learning_rate) self.n_iter = int(n_iter) self.n_iter_without_progress = int(n_iter_without_progress) self.min_grad_norm", "y {None} -- Ignored (default: {None}) \"\"\" # Setup points/embedding", "dims ctypes.c_float(self.perplexity), # Perplexity ctypes.c_float(self.learning_rate), # Learning Rate ctypes.c_float(self.early_exaggeration), #", "N.ctypeslib.load_library('libgpufaiss', self._path) # Load the ctypes library self._lib = N.ctypeslib.load_library('libtsnecuda',", "CONTIGUOUS'), # Dump File ctypes.c_int, # Dump interval ctypes.c_bool, #", "self.points, # points self.points.ctypes.shape, # dims ctypes.c_float(self.perplexity), # Perplexity ctypes.c_float(self.learning_rate),", "Viz timeout ctypes.c_int(self.verbose), # Verbosity ctypes.c_int(self.print_interval), # Print interval ctypes.c_int(self.device),", "ctypes.c_float(self.learning_rate), # Learning Rate ctypes.c_float(self.early_exaggeration), # Magnitude Factor ctypes.c_int(self.num_neighbors), #", "# Iterations no progress ctypes.c_int, # Force Magnify iterations ctypes.c_float,", "Setup points/embedding requirements self.points = N.require(X, N.float32, ['CONTIGUOUS', 'ALIGNED']) self.embedding", "['random']: raise ValueError('Non-Random initialization is not currently supported. Please use", "Theta ctypes.c_float(self.epssq), # epssq ctypes.c_float(self.min_grad_norm), # Minimum gradient norm ctypes.c_int(self.initialization_type),", "Dump File ctypes.c_int(self.dump_interval), # Dump interval ctypes.c_bool(self.use_interactive), # Use interactive", "BH T-SNE function self._lib.pymodule_bh_tsne.restype = None self._lib.pymodule_bh_tsne.argtypes = [ N.ctypeslib.ndpointer(N.float32,", "types self.init_data, # Initialization Data ctypes.c_bool(self.dump_points), # Dump points self.dump_file_,", "# Theta ctypes.c_float, # epssq ctypes.c_float, # Minimum gradient norm", "[2] <NAME>, L.J.P. t-Distributed Stochastic Neighbor Embedding http://homepage.tudelft.nl/19j49/t-SNE.html \"\"\" import", "Research 9:2579-2605, 2008. [2] <NAME>, L.J.P. t-Distributed Stochastic Neighbor Embedding", "os import pkg_resources def ord_string(s): b = bytearray() arr =", "style...') elif return_style == 'once': self.return_style = 0 elif return_style", "viz strings self.dump_file_ = N.require(ord_string(self.dump_file), N.uint8, ['CONTIGUOUS', 'ALIGNED']) self.viz_server_ =", "not support projection into dimensions other than 2 for now.')", "(default: {None}) \"\"\" # Setup points/embedding requirements self.points = N.require(X,", "N.ctypeslib.load_library('libfaiss', self._path) # Load the ctypes library # self._gpufaiss_lib =", "points N.ctypeslib.ndpointer(N.uint8, flags='ALIGNED, CONTIGUOUS'), # Dump File ctypes.c_int, # Dump", "learning_rate=200.0, num_neighbors=1023, force_magnify_iters=250, pre_momentum=0.5, post_momentum=0.8, theta=0.5, epssq=0.0025, n_iter=1000, n_iter_without_progress=1000, min_grad_norm=1e-7,", "# Dump interval ctypes.c_bool(self.use_interactive), # Use interactive self.viz_server_, # Viz", "str(dump_file) self.dump_points = bool(dump_points) self.dump_interval = int(dump_interval) # Viz self.use_interactive", "use_interactive=False, viz_timeout=10000, viz_server=\"tcp://localhost:5556\", dump_points=False, dump_file=\"dump.txt\", dump_interval=1, print_interval=10, device=0, ): \"\"\"Initialization", "\"\"\" # Setup points/embedding requirements self.points = N.require(X, N.float32, ['CONTIGUOUS',", "self.early_exaggeration = float(early_exaggeration) self.learning_rate = float(learning_rate) self.n_iter = int(n_iter) self.n_iter_without_progress", "BH T-SNE library self._path = pkg_resources.resource_filename('tsnecuda','') # Load from current", "Iterations no progress ctypes.c_int(self.force_magnify_iters), # Force Magnify iterations ctypes.c_float(self.perplexity_epsilon), #", "CONTIGUOUS'), # Viz Server ctypes.c_int, # Viz timeout ctypes.c_int, #", "T-SNE function self._lib.pymodule_bh_tsne.restype = None self._lib.pymodule_bh_tsne.argtypes = [ N.ctypeslib.ndpointer(N.float32, ndim=2,", "ctypes.c_float, # post-exaggeration momentum ctypes.c_float, # Theta ctypes.c_float, # epssq", "Data ctypes.c_bool, # Dump points N.ctypeslib.ndpointer(N.uint8, flags='ALIGNED, CONTIGUOUS'), # Dump", "\"\"\"Bindings for the Barnes Hut TSNE algorithm with fast nearest", "Initialization types self.init_data, # Initialization Data ctypes.c_bool(self.dump_points), # Dump points", "N.require(ord_string(self.dump_file), N.uint8, ['CONTIGUOUS', 'ALIGNED']) self.viz_server_ = N.require(ord_string(self.viz_server), N.uint8, ['CONTIGUOUS', 'ALIGNED'])", "library # Hook the BH T-SNE function self._lib.pymodule_bh_tsne.restype = None", "= 0 elif return_style == 'snapshots': self.return_style = 1 self.num_snapshots", "self.points = N.require(X, N.float32, ['CONTIGUOUS', 'ALIGNED']) self.embedding = N.zeros(shape=(X.shape[0],self.n_components)) self.embedding", "self._lib.pymodule_bh_tsne( self.embedding, # result self.points, # points self.points.ctypes.shape, # dims", "random_seed=None, use_interactive=False, viz_timeout=10000, viz_server=\"tcp://localhost:5556\", dump_points=False, dump_file=\"dump.txt\", dump_interval=1, print_interval=10, device=0, ):", "timeout ctypes.c_int(self.verbose), # Verbosity ctypes.c_int(self.print_interval), # Print interval ctypes.c_int(self.device), #", "into an embedded space and return that transformed output. Arguments:", "self.n_components != 2: raise ValueError('The current barnes-hut implementation does not", "= float(learning_rate) self.n_iter = int(n_iter) self.n_iter_without_progress = int(n_iter_without_progress) self.min_grad_norm =", "None self._lib.pymodule_bh_tsne.argtypes = [ N.ctypeslib.ndpointer(N.float32, ndim=2, flags='ALIGNED, F_CONTIGUOUS, WRITEABLE'), #", "ctypes.c_int(self.initialization_type), # Initialization types self.init_data, # Initialization Data ctypes.c_bool(self.dump_points), #", "return style...') elif return_style == 'once': self.return_style = 0 elif", "Learning Rate ctypes.c_float, # Magnitude Factor ctypes.c_int, # Num Neighbors", "an embedded space and return that transformed output. Arguments: X", "Print interval ctypes.c_int(self.device), # GPU Device ctypes.c_int(self.return_style), # Return style", "= 1 self.init_data = N.require(N.zeros((1,1)),N.float32,['CONTIGUOUS','ALIGNED']) else: self.initialization_type = 3 self.init_data", "N.require(N.zeros((1,1)),N.float32,['CONTIGUOUS','ALIGNED']) else: self.initialization_type = 3 self.init_data = N.require(y, N.float32, ['F_CONTIGUOUS',", "-- Ignored (default: {None}) \"\"\" # Setup points/embedding requirements self.points", "are not currently supported. Please use metric=\\'euclidean\\' for now.') else:", "not in ['euclidean']: raise ValueError('Non-Euclidean metrics are not currently supported.", "File ctypes.c_int, # Dump interval ctypes.c_bool, # Use interactive N.ctypeslib.ndpointer(N.uint8,", "= 3 self.init_data = N.require(y, N.float32, ['F_CONTIGUOUS', 'ALIGNED']) # Handle", "post_momentum=0.8, theta=0.5, epssq=0.0025, n_iter=1000, n_iter_without_progress=1000, min_grad_norm=1e-7, perplexity_epsilon=1e-3, metric='euclidean', init='random', return_style='once',", "ctypes.c_int, # Print interval ctypes.c_int, # GPU Device ctypes.c_int, #", "Input array, shape: (n_points, n_dimensions) Keyword Arguments: y {None} --", "<NAME>, L.J.P.; Hinton, G.E. Visualizing High-Dimensional Data Using t-SNE. Journal", "= float(min_grad_norm) if metric not in ['euclidean']: raise ValueError('Non-Euclidean metrics", "N.ctypeslib.ndpointer(N.float32, ndim=2, flags='ALIGNED, CONTIGUOUS'), # points ctypes.POINTER(N.ctypeslib.c_intp), # dims ctypes.c_float,", "self.dump_file_ = N.require(ord_string(self.dump_file), N.uint8, ['CONTIGUOUS', 'ALIGNED']) self.viz_server_ = N.require(ord_string(self.viz_server), N.uint8,", "force_magnify_iters=250, pre_momentum=0.5, post_momentum=0.8, theta=0.5, epssq=0.0025, n_iter=1000, n_iter_without_progress=1000, min_grad_norm=1e-7, perplexity_epsilon=1e-3, metric='euclidean',", "self.post_momentum = float(post_momentum) self.theta = float(theta) self.epssq =float(epssq) self.device =", "self.embedding = N.zeros(shape=(X.shape[0],self.n_components)) self.embedding = N.require(self.embedding , N.float32, ['F_CONTIGUOUS', 'ALIGNED',", "self._path) # Load the ctypes library # self._gpufaiss_lib = N.ctypeslib.load_library('libgpufaiss',", "# Dump points self.dump_file_, # Dump File ctypes.c_int(self.dump_interval), # Dump", "# Build the hooks for the BH T-SNE library self._path", "Theta ctypes.c_float, # epssq ctypes.c_float, # Minimum gradient norm ctypes.c_int,", "ctypes.c_int ] # Number of snapshots def fit_transform(self, X, y=None):", "momentum ctypes.c_float, # Theta ctypes.c_float, # epssq ctypes.c_float, # Minimum", "['CONTIGUOUS', 'ALIGNED']) self.viz_server_ = N.require(ord_string(self.viz_server), N.uint8, ['CONTIGUOUS', 'ALIGNED']) self._lib.pymodule_bh_tsne( self.embedding,", "interval ctypes.c_bool(self.use_interactive), # Use interactive self.viz_server_, # Viz Server ctypes.c_int(self.viz_timeout),", "supported. Please use init=\\'random\\' for now.') else: self.init = init", "WRITEABLE'), # result N.ctypeslib.ndpointer(N.float32, ndim=2, flags='ALIGNED, CONTIGUOUS'), # points ctypes.POINTER(N.ctypeslib.c_intp),", "Perplexity search epsilon ctypes.c_float, # pre-exaggeration momentum ctypes.c_float, # post-exaggeration", "ctypes.c_float, # Perplexity ctypes.c_float, # Learning Rate ctypes.c_float, # Magnitude", "= N.require(X, N.float32, ['CONTIGUOUS', 'ALIGNED']) self.embedding = N.zeros(shape=(X.shape[0],self.n_components)) self.embedding =", "is not currently supported. Please use init=\\'random\\' for now.') else:", "init=\\'random\\' for now.') else: self.init = init self.verbose = int(verbose)", "= str(dump_file) self.dump_points = bool(dump_points) self.dump_interval = int(dump_interval) # Viz", "ctypes.c_int(self.n_iter_without_progress), # Iterations no progress ctypes.c_int(self.force_magnify_iters), # Force Magnify iterations", "# Initialization types N.ctypeslib.ndpointer(N.float32, ndim=2, flags='ALIGNED, F_CONTIGUOUS'), # Initialization Data", "iterations ctypes.c_float, # Perplexity search epsilon ctypes.c_float, # pre-exaggeration momentum", "self.n_iter_without_progress = int(n_iter_without_progress) self.min_grad_norm = float(min_grad_norm) if metric not in", "of snapshots def fit_transform(self, X, y=None): \"\"\"Fit X into an", "b] + [0]).astype(N.uint8) class TSNE(object): def __init__(self, n_components=2, perplexity=50.0, early_exaggeration=2.0,", "float(perplexity) self.early_exaggeration = float(early_exaggeration) self.learning_rate = float(learning_rate) self.n_iter = int(n_iter)", "num_neighbors=1023, force_magnify_iters=250, pre_momentum=0.5, post_momentum=0.8, theta=0.5, epssq=0.0025, n_iter=1000, n_iter_without_progress=1000, min_grad_norm=1e-7, perplexity_epsilon=1e-3,", "= pkg_resources.resource_filename('tsnecuda','') # Load from current location # self._faiss_lib =", "ctypes.c_int, # Force Magnify iterations ctypes.c_float, # Perplexity search epsilon", "Initialization Data ctypes.c_bool(self.dump_points), # Dump points self.dump_file_, # Dump File", "[0]).astype(N.uint8) class TSNE(object): def __init__(self, n_components=2, perplexity=50.0, early_exaggeration=2.0, learning_rate=200.0, num_neighbors=1023,", "= int(force_magnify_iters) self.perplexity_epsilon = float(perplexity_epsilon) self.pre_momentum = float(pre_momentum) self.post_momentum =", "Point dumpoing self.dump_file = str(dump_file) self.dump_points = bool(dump_points) self.dump_interval =", "Initialization Data ctypes.c_bool, # Dump points N.ctypeslib.ndpointer(N.uint8, flags='ALIGNED, CONTIGUOUS'), #", "!= 2: raise ValueError('The current barnes-hut implementation does not support", "snapshots def fit_transform(self, X, y=None): \"\"\"Fit X into an embedded", "self.init = init self.verbose = int(verbose) # Initialize non-sklearn variables", "N.array([x for x in b] + [0]).astype(N.uint8) class TSNE(object): def", "implementation does not support projection into dimensions other than 2", "N.ctypeslib.ndpointer(N.uint8, flags='ALIGNED, CONTIGUOUS'), # Dump File ctypes.c_int, # Dump interval", "self.print_interval = int(print_interval) # Point dumpoing self.dump_file = str(dump_file) self.dump_points", "# Perplexity search epsilon ctypes.c_float(self.pre_momentum), # pre-exaggeration momentum ctypes.c_float(self.post_momentum), #", "Data ctypes.c_bool(self.dump_points), # Dump points self.dump_file_, # Dump File ctypes.c_int(self.dump_interval),", "# dims ctypes.c_float(self.perplexity), # Perplexity ctypes.c_float(self.learning_rate), # Learning Rate ctypes.c_float(self.early_exaggeration),", "epsilon ctypes.c_float, # pre-exaggeration momentum ctypes.c_float, # post-exaggeration momentum ctypes.c_float,", "Force Magnify iterations ctypes.c_float, # Perplexity search epsilon ctypes.c_float, #", "\"\"\"Fit X into an embedded space and return that transformed", "initialization is not currently supported. Please use init=\\'random\\' for now.')", "metric=\\'euclidean\\' for now.') else: self.metric = metric if init not", "self.init_data = N.require(N.zeros((1,1)),N.float32,['CONTIGUOUS','ALIGNED']) else: self.initialization_type = 3 self.init_data = N.require(y,", "Magnitude Factor ctypes.c_int, # Num Neighbors ctypes.c_int, # Iterations ctypes.c_int,", "] # Number of snapshots def fit_transform(self, X, y=None): \"\"\"Fit", "# Dump interval ctypes.c_bool, # Use interactive N.ctypeslib.ndpointer(N.uint8, flags='ALIGNED, CONTIGUOUS'),", "else: self.initialization_type = 3 self.init_data = N.require(y, N.float32, ['F_CONTIGUOUS', 'ALIGNED'])", "# GPU Device ctypes.c_int(self.return_style), # Return style ctypes.c_int(self.num_snapshots) ) #", "{array} -- Input array, shape: (n_points, n_dimensions) Keyword Arguments: y", "no progress ctypes.c_int(self.force_magnify_iters), # Force Magnify iterations ctypes.c_float(self.perplexity_epsilon), # Perplexity", "# Viz timeout ctypes.c_int(self.verbose), # Verbosity ctypes.c_int(self.print_interval), # Print interval", "# Use interactive N.ctypeslib.ndpointer(N.uint8, flags='ALIGNED, CONTIGUOUS'), # Viz Server ctypes.c_int,", "transformed output. Arguments: X {array} -- Input array, shape: (n_points,", "= N.require(ord_string(self.viz_server), N.uint8, ['CONTIGUOUS', 'ALIGNED']) self._lib.pymodule_bh_tsne( self.embedding, # result self.points,", "Viz Server ctypes.c_int(self.viz_timeout), # Viz timeout ctypes.c_int(self.verbose), # Verbosity ctypes.c_int(self.print_interval),", "# Iterations no progress ctypes.c_int(self.force_magnify_iters), # Force Magnify iterations ctypes.c_float(self.perplexity_epsilon),", "interactive N.ctypeslib.ndpointer(N.uint8, flags='ALIGNED, CONTIGUOUS'), # Viz Server ctypes.c_int, # Viz", "# self._gpufaiss_lib = N.ctypeslib.load_library('libgpufaiss', self._path) # Load the ctypes library", "int(verbose) # Initialize non-sklearn variables self.num_neighbors = int(num_neighbors) self.force_magnify_iters =", "timeout ctypes.c_int, # Verbosity ctypes.c_int, # Print interval ctypes.c_int, #", "variables self.num_neighbors = int(num_neighbors) self.force_magnify_iters = int(force_magnify_iters) self.perplexity_epsilon = float(perplexity_epsilon)", "the ctypes library self._lib = N.ctypeslib.load_library('libtsnecuda', self._path) # Load the", "with fast nearest neighbors Refs: References [1] <NAME>, L.J.P.; Hinton,", "'ALIGNED']) self.viz_server_ = N.require(ord_string(self.viz_server), N.uint8, ['CONTIGUOUS', 'ALIGNED']) self._lib.pymodule_bh_tsne( self.embedding, #", "3 self.init_data = N.require(y, N.float32, ['F_CONTIGUOUS', 'ALIGNED']) # Handle dumping", "return_style not in ['once','snapshots']: raise ValueError('Invalid return style...') elif return_style", "self.dump_interval = int(dump_interval) # Viz self.use_interactive = bool(use_interactive) self.viz_server =", "barnes-hut implementation does not support projection into dimensions other than", "ndim=2, flags='ALIGNED, F_CONTIGUOUS, WRITEABLE'), # result N.ctypeslib.ndpointer(N.float32, ndim=2, flags='ALIGNED, CONTIGUOUS'),", "Rate ctypes.c_float, # Magnitude Factor ctypes.c_int, # Num Neighbors ctypes.c_int,", "viz_timeout=10000, viz_server=\"tcp://localhost:5556\", dump_points=False, dump_file=\"dump.txt\", dump_interval=1, print_interval=10, device=0, ): \"\"\"Initialization method", "supported. Please use metric=\\'euclidean\\' for now.') else: self.metric = metric", "epssq=0.0025, n_iter=1000, n_iter_without_progress=1000, min_grad_norm=1e-7, perplexity_epsilon=1e-3, metric='euclidean', init='random', return_style='once', num_snapshots=5, verbose=0,", "Iterations ctypes.c_int(self.n_iter_without_progress), # Iterations no progress ctypes.c_int(self.force_magnify_iters), # Force Magnify", "Magnitude Factor ctypes.c_int(self.num_neighbors), # Num Neighbors ctypes.c_int(self.n_iter), # Iterations ctypes.c_int(self.n_iter_without_progress),", "Load the ctypes library # Hook the BH T-SNE function", "= int(device) self.print_interval = int(print_interval) # Point dumpoing self.dump_file =", "ctypes.c_int, # Viz timeout ctypes.c_int, # Verbosity ctypes.c_int, # Print", "F_CONTIGUOUS, WRITEABLE'), # result N.ctypeslib.ndpointer(N.float32, ndim=2, flags='ALIGNED, CONTIGUOUS'), # points", "flags='ALIGNED, F_CONTIGUOUS, WRITEABLE'), # result N.ctypeslib.ndpointer(N.float32, ndim=2, flags='ALIGNED, CONTIGUOUS'), #", "raise ValueError('Non-Random initialization is not currently supported. Please use init=\\'random\\'", "int(num_neighbors) self.force_magnify_iters = int(force_magnify_iters) self.perplexity_epsilon = float(perplexity_epsilon) self.pre_momentum = float(pre_momentum)", "2008. [2] <NAME>, L.J.P. t-Distributed Stochastic Neighbor Embedding http://homepage.tudelft.nl/19j49/t-SNE.html \"\"\"", "pre-exaggeration momentum ctypes.c_float(self.post_momentum), # post-exaggeration momentum ctypes.c_float(self.theta), # Theta ctypes.c_float(self.epssq),", "no progress ctypes.c_int, # Force Magnify iterations ctypes.c_float, # Perplexity", "-- Input array, shape: (n_points, n_dimensions) Keyword Arguments: y {None}", "= N.require(self.embedding , N.float32, ['F_CONTIGUOUS', 'ALIGNED', 'WRITEABLE']) # Handle Initialization", "flags='ALIGNED, CONTIGUOUS'), # points ctypes.POINTER(N.ctypeslib.c_intp), # dims ctypes.c_float, # Perplexity", "ctypes.c_float(self.perplexity_epsilon), # Perplexity search epsilon ctypes.c_float(self.pre_momentum), # pre-exaggeration momentum ctypes.c_float(self.post_momentum),", "for x in b] + [0]).astype(N.uint8) class TSNE(object): def __init__(self,", "N.uint8, ['CONTIGUOUS', 'ALIGNED']) self._lib.pymodule_bh_tsne( self.embedding, # result self.points, # points", "int(force_magnify_iters) self.perplexity_epsilon = float(perplexity_epsilon) self.pre_momentum = float(pre_momentum) self.post_momentum = float(post_momentum)", "== 'once': self.return_style = 0 elif return_style == 'snapshots': self.return_style", "# Perplexity search epsilon ctypes.c_float, # pre-exaggeration momentum ctypes.c_float, #", "# Perplexity ctypes.c_float, # Learning Rate ctypes.c_float, # Magnitude Factor", ", N.float32, ['F_CONTIGUOUS', 'ALIGNED', 'WRITEABLE']) # Handle Initialization if y", "# Initialize the variables self.n_components = int(n_components) if self.n_components !=", "for the BH T-SNE library self._path = pkg_resources.resource_filename('tsnecuda','') # Load", "Magnify iterations ctypes.c_float, # Perplexity search epsilon ctypes.c_float, # pre-exaggeration", "bytearray() arr = b.extend(map(ord, s)) return N.array([x for x in", "current location # self._faiss_lib = N.ctypeslib.load_library('libfaiss', self._path) # Load the", "raise ValueError('Non-Euclidean metrics are not currently supported. Please use metric=\\'euclidean\\'", "other than 2 for now.') self.perplexity = float(perplexity) self.early_exaggeration =", "# Dump points N.ctypeslib.ndpointer(N.uint8, flags='ALIGNED, CONTIGUOUS'), # Dump File ctypes.c_int,", "self._lib.pymodule_bh_tsne.restype = None self._lib.pymodule_bh_tsne.argtypes = [ N.ctypeslib.ndpointer(N.float32, ndim=2, flags='ALIGNED, F_CONTIGUOUS,", "norm ctypes.c_int(self.initialization_type), # Initialization types self.init_data, # Initialization Data ctypes.c_bool(self.dump_points),", "self.dump_points = bool(dump_points) self.dump_interval = int(dump_interval) # Viz self.use_interactive =", "style ctypes.c_int ] # Number of snapshots def fit_transform(self, X,", "Initialize non-sklearn variables self.num_neighbors = int(num_neighbors) self.force_magnify_iters = int(force_magnify_iters) self.perplexity_epsilon", "perplexity_epsilon=1e-3, metric='euclidean', init='random', return_style='once', num_snapshots=5, verbose=0, random_seed=None, use_interactive=False, viz_timeout=10000, viz_server=\"tcp://localhost:5556\",", "self._faiss_lib = N.ctypeslib.load_library('libfaiss', self._path) # Load the ctypes library #", "ctypes library self._lib = N.ctypeslib.load_library('libtsnecuda', self._path) # Load the ctypes", "# Verbosity ctypes.c_int(self.print_interval), # Print interval ctypes.c_int(self.device), # GPU Device", "currently supported. Please use init=\\'random\\' for now.') else: self.init =", "the BH T-SNE function self._lib.pymodule_bh_tsne.restype = None self._lib.pymodule_bh_tsne.argtypes = [", "self.viz_timeout = int(viz_timeout) # Return style if return_style not in", "does not support projection into dimensions other than 2 for", "N.require(X, N.float32, ['CONTIGUOUS', 'ALIGNED']) self.embedding = N.zeros(shape=(X.shape[0],self.n_components)) self.embedding = N.require(self.embedding", "Rate ctypes.c_float(self.early_exaggeration), # Magnitude Factor ctypes.c_int(self.num_neighbors), # Num Neighbors ctypes.c_int(self.n_iter),", "# self._faiss_lib = N.ctypeslib.load_library('libfaiss', self._path) # Load the ctypes library", "__init__(self, n_components=2, perplexity=50.0, early_exaggeration=2.0, learning_rate=200.0, num_neighbors=1023, force_magnify_iters=250, pre_momentum=0.5, post_momentum=0.8, theta=0.5,", "for now.') self.perplexity = float(perplexity) self.early_exaggeration = float(early_exaggeration) self.learning_rate =", "if y is None: self.initialization_type = 1 self.init_data = N.require(N.zeros((1,1)),N.float32,['CONTIGUOUS','ALIGNED'])", "= int(num_snapshots) # Build the hooks for the BH T-SNE", "= int(num_neighbors) self.force_magnify_iters = int(force_magnify_iters) self.perplexity_epsilon = float(perplexity_epsilon) self.pre_momentum =", "import pkg_resources def ord_string(s): b = bytearray() arr = b.extend(map(ord,", "= str(viz_server) self.viz_timeout = int(viz_timeout) # Return style if return_style", "ctypes.c_int(self.viz_timeout), # Viz timeout ctypes.c_int(self.verbose), # Verbosity ctypes.c_int(self.print_interval), # Print", "def ord_string(s): b = bytearray() arr = b.extend(map(ord, s)) return", "N.ctypeslib.ndpointer(N.uint8, flags='ALIGNED, CONTIGUOUS'), # Viz Server ctypes.c_int, # Viz timeout", "# Magnitude Factor ctypes.c_int(self.num_neighbors), # Num Neighbors ctypes.c_int(self.n_iter), # Iterations", "ctypes.c_int, # Verbosity ctypes.c_int, # Print interval ctypes.c_int, # GPU", "ctypes.c_float, # epssq ctypes.c_float, # Minimum gradient norm ctypes.c_int, #", "['F_CONTIGUOUS', 'ALIGNED', 'WRITEABLE']) # Handle Initialization if y is None:", "interactive self.viz_server_, # Viz Server ctypes.c_int(self.viz_timeout), # Viz timeout ctypes.c_int(self.verbose),", "'WRITEABLE']) # Handle Initialization if y is None: self.initialization_type =", "# Initialization types self.init_data, # Initialization Data ctypes.c_bool(self.dump_points), # Dump", "['once','snapshots']: raise ValueError('Invalid return style...') elif return_style == 'once': self.return_style", "G.E. Visualizing High-Dimensional Data Using t-SNE. Journal of Machine Learning", "= int(verbose) # Initialize non-sklearn variables self.num_neighbors = int(num_neighbors) self.force_magnify_iters", "File ctypes.c_int(self.dump_interval), # Dump interval ctypes.c_bool(self.use_interactive), # Use interactive self.viz_server_,", "self.viz_server_, # Viz Server ctypes.c_int(self.viz_timeout), # Viz timeout ctypes.c_int(self.verbose), #", "n_components=2, perplexity=50.0, early_exaggeration=2.0, learning_rate=200.0, num_neighbors=1023, force_magnify_iters=250, pre_momentum=0.5, post_momentum=0.8, theta=0.5, epssq=0.0025,", "= float(perplexity) self.early_exaggeration = float(early_exaggeration) self.learning_rate = float(learning_rate) self.n_iter =", "ValueError('Invalid return style...') elif return_style == 'once': self.return_style = 0", "verbose=0, random_seed=None, use_interactive=False, viz_timeout=10000, viz_server=\"tcp://localhost:5556\", dump_points=False, dump_file=\"dump.txt\", dump_interval=1, print_interval=10, device=0,", "= N.ctypeslib.load_library('libfaiss', self._path) # Load the ctypes library # self._gpufaiss_lib", "Load the ctypes library # self._gpufaiss_lib = N.ctypeslib.load_library('libgpufaiss', self._path) #", "<NAME>, L.J.P. t-Distributed Stochastic Neighbor Embedding http://homepage.tudelft.nl/19j49/t-SNE.html \"\"\" import numpy", "# pre-exaggeration momentum ctypes.c_float, # post-exaggeration momentum ctypes.c_float, # Theta", "ctypes.c_int, # Return style ctypes.c_int ] # Number of snapshots", "ctypes.c_int, # Num Neighbors ctypes.c_int, # Iterations ctypes.c_int, # Iterations", "'once': self.return_style = 0 elif return_style == 'snapshots': self.return_style =", "library self._path = pkg_resources.resource_filename('tsnecuda','') # Load from current location #", "# Return style ctypes.c_int ] # Number of snapshots def", "Load from current location # self._faiss_lib = N.ctypeslib.load_library('libfaiss', self._path) #", "Use interactive N.ctypeslib.ndpointer(N.uint8, flags='ALIGNED, CONTIGUOUS'), # Viz Server ctypes.c_int, #", "# Load the ctypes library self._lib = N.ctypeslib.load_library('libtsnecuda', self._path) #", "use metric=\\'euclidean\\' for now.') else: self.metric = metric if init", "ctypes.c_bool(self.dump_points), # Dump points self.dump_file_, # Dump File ctypes.c_int(self.dump_interval), #", "ctypes.c_float(self.epssq), # epssq ctypes.c_float(self.min_grad_norm), # Minimum gradient norm ctypes.c_int(self.initialization_type), #", "Arguments: y {None} -- Ignored (default: {None}) \"\"\" # Setup", "N.float32, ['F_CONTIGUOUS', 'ALIGNED', 'WRITEABLE']) # Handle Initialization if y is", "self.embedding, # result self.points, # points self.points.ctypes.shape, # dims ctypes.c_float(self.perplexity),", "b.extend(map(ord, s)) return N.array([x for x in b] + [0]).astype(N.uint8)", "Number of snapshots def fit_transform(self, X, y=None): \"\"\"Fit X into", "progress ctypes.c_int(self.force_magnify_iters), # Force Magnify iterations ctypes.c_float(self.perplexity_epsilon), # Perplexity search", "2: raise ValueError('The current barnes-hut implementation does not support projection", "int(viz_timeout) # Return style if return_style not in ['once','snapshots']: raise", "bool(dump_points) self.dump_interval = int(dump_interval) # Viz self.use_interactive = bool(use_interactive) self.viz_server", "t-SNE. Journal of Machine Learning Research 9:2579-2605, 2008. [2] <NAME>,", "Hook the BH T-SNE function self._lib.pymodule_bh_tsne.restype = None self._lib.pymodule_bh_tsne.argtypes =", "ctypes.c_float, # Learning Rate ctypes.c_float, # Magnitude Factor ctypes.c_int, #", "N.require(self.embedding , N.float32, ['F_CONTIGUOUS', 'ALIGNED', 'WRITEABLE']) # Handle Initialization if", "# Minimum gradient norm ctypes.c_int(self.initialization_type), # Initialization types self.init_data, #", "= float(perplexity_epsilon) self.pre_momentum = float(pre_momentum) self.post_momentum = float(post_momentum) self.theta =", "'snapshots': self.return_style = 1 self.num_snapshots = int(num_snapshots) # Build the", "References [1] <NAME>, L.J.P.; Hinton, G.E. Visualizing High-Dimensional Data Using", "TSNE(object): def __init__(self, n_components=2, perplexity=50.0, early_exaggeration=2.0, learning_rate=200.0, num_neighbors=1023, force_magnify_iters=250, pre_momentum=0.5,", "self.theta = float(theta) self.epssq =float(epssq) self.device = int(device) self.print_interval =", "y=None): \"\"\"Fit X into an embedded space and return that", "ctypes.c_int, # Dump interval ctypes.c_bool, # Use interactive N.ctypeslib.ndpointer(N.uint8, flags='ALIGNED,", "self.init_data = N.require(y, N.float32, ['F_CONTIGUOUS', 'ALIGNED']) # Handle dumping and", "Magnify iterations ctypes.c_float(self.perplexity_epsilon), # Perplexity search epsilon ctypes.c_float(self.pre_momentum), # pre-exaggeration", "use init=\\'random\\' for now.') else: self.init = init self.verbose =", "float(min_grad_norm) if metric not in ['euclidean']: raise ValueError('Non-Euclidean metrics are", "# Use interactive self.viz_server_, # Viz Server ctypes.c_int(self.viz_timeout), # Viz", "elif return_style == 'once': self.return_style = 0 elif return_style ==", "return_style='once', num_snapshots=5, verbose=0, random_seed=None, use_interactive=False, viz_timeout=10000, viz_server=\"tcp://localhost:5556\", dump_points=False, dump_file=\"dump.txt\", dump_interval=1,", "if self.n_components != 2: raise ValueError('The current barnes-hut implementation does", "ctypes.c_bool, # Use interactive N.ctypeslib.ndpointer(N.uint8, flags='ALIGNED, CONTIGUOUS'), # Viz Server", "for the Barnes Hut TSNE algorithm with fast nearest neighbors", "Verbosity ctypes.c_int(self.print_interval), # Print interval ctypes.c_int(self.device), # GPU Device ctypes.c_int(self.return_style),", "# Hook the BH T-SNE function self._lib.pymodule_bh_tsne.restype = None self._lib.pymodule_bh_tsne.argtypes", "init self.verbose = int(verbose) # Initialize non-sklearn variables self.num_neighbors =", "# Learning Rate ctypes.c_float, # Magnitude Factor ctypes.c_int, # Num", "= init self.verbose = int(verbose) # Initialize non-sklearn variables self.num_neighbors", "# Dump File ctypes.c_int, # Dump interval ctypes.c_bool, # Use", "return_style == 'snapshots': self.return_style = 1 self.num_snapshots = int(num_snapshots) #", "# epssq ctypes.c_float(self.min_grad_norm), # Minimum gradient norm ctypes.c_int(self.initialization_type), # Initialization", "interval ctypes.c_int(self.device), # GPU Device ctypes.c_int(self.return_style), # Return style ctypes.c_int(self.num_snapshots)", "+ [0]).astype(N.uint8) class TSNE(object): def __init__(self, n_components=2, perplexity=50.0, early_exaggeration=2.0, learning_rate=200.0,", "# post-exaggeration momentum ctypes.c_float, # Theta ctypes.c_float, # epssq ctypes.c_float,", "self._path) # Load the ctypes library # Hook the BH", "early_exaggeration=2.0, learning_rate=200.0, num_neighbors=1023, force_magnify_iters=250, pre_momentum=0.5, post_momentum=0.8, theta=0.5, epssq=0.0025, n_iter=1000, n_iter_without_progress=1000,", "ctypes library # Hook the BH T-SNE function self._lib.pymodule_bh_tsne.restype =", "Handle Initialization if y is None: self.initialization_type = 1 self.init_data", "Perplexity ctypes.c_float(self.learning_rate), # Learning Rate ctypes.c_float(self.early_exaggeration), # Magnitude Factor ctypes.c_int(self.num_neighbors),", "epssq ctypes.c_float(self.min_grad_norm), # Minimum gradient norm ctypes.c_int(self.initialization_type), # Initialization types", "self.init_data, # Initialization Data ctypes.c_bool(self.dump_points), # Dump points self.dump_file_, #", "the ctypes library # Hook the BH T-SNE function self._lib.pymodule_bh_tsne.restype", "min_grad_norm=1e-7, perplexity_epsilon=1e-3, metric='euclidean', init='random', return_style='once', num_snapshots=5, verbose=0, random_seed=None, use_interactive=False, viz_timeout=10000,", "Neighbor Embedding http://homepage.tudelft.nl/19j49/t-SNE.html \"\"\" import numpy as N import ctypes", "Please use metric=\\'euclidean\\' for now.') else: self.metric = metric if", "['CONTIGUOUS', 'ALIGNED']) self.embedding = N.zeros(shape=(X.shape[0],self.n_components)) self.embedding = N.require(self.embedding , N.float32,", "types N.ctypeslib.ndpointer(N.float32, ndim=2, flags='ALIGNED, F_CONTIGUOUS'), # Initialization Data ctypes.c_bool, #", "# points ctypes.POINTER(N.ctypeslib.c_intp), # dims ctypes.c_float, # Perplexity ctypes.c_float, #", "dump_interval=1, print_interval=10, device=0, ): \"\"\"Initialization method for barnes hut T-SNE", "def fit_transform(self, X, y=None): \"\"\"Fit X into an embedded space", "self._path) # Load the ctypes library self._lib = N.ctypeslib.load_library('libtsnecuda', self._path)", "# Load from current location # self._faiss_lib = N.ctypeslib.load_library('libfaiss', self._path)", "hut T-SNE class. \"\"\" # Initialize the variables self.n_components =", "self.return_style = 0 elif return_style == 'snapshots': self.return_style = 1", "# Num Neighbors ctypes.c_int, # Iterations ctypes.c_int, # Iterations no", "momentum ctypes.c_float(self.theta), # Theta ctypes.c_float(self.epssq), # epssq ctypes.c_float(self.min_grad_norm), # Minimum", "Arguments: X {array} -- Input array, shape: (n_points, n_dimensions) Keyword", "Num Neighbors ctypes.c_int(self.n_iter), # Iterations ctypes.c_int(self.n_iter_without_progress), # Iterations no progress", "GPU Device ctypes.c_int(self.return_style), # Return style ctypes.c_int(self.num_snapshots) ) # Number", "of Machine Learning Research 9:2579-2605, 2008. [2] <NAME>, L.J.P. t-Distributed", "= 1 self.num_snapshots = int(num_snapshots) # Build the hooks for", "# Handle Initialization if y is None: self.initialization_type = 1", "library self._lib = N.ctypeslib.load_library('libtsnecuda', self._path) # Load the ctypes library", "Viz self.use_interactive = bool(use_interactive) self.viz_server = str(viz_server) self.viz_timeout = int(viz_timeout)", "Perplexity ctypes.c_float, # Learning Rate ctypes.c_float, # Magnitude Factor ctypes.c_int,", "x in b] + [0]).astype(N.uint8) class TSNE(object): def __init__(self, n_components=2,", "self.initialization_type = 1 self.init_data = N.require(N.zeros((1,1)),N.float32,['CONTIGUOUS','ALIGNED']) else: self.initialization_type = 3", "= int(n_iter) self.n_iter_without_progress = int(n_iter_without_progress) self.min_grad_norm = float(min_grad_norm) if metric", "N.float32, ['F_CONTIGUOUS', 'ALIGNED']) # Handle dumping and viz strings self.dump_file_", "and viz strings self.dump_file_ = N.require(ord_string(self.dump_file), N.uint8, ['CONTIGUOUS', 'ALIGNED']) self.viz_server_", "# result N.ctypeslib.ndpointer(N.float32, ndim=2, flags='ALIGNED, CONTIGUOUS'), # points ctypes.POINTER(N.ctypeslib.c_intp), #", "== 'snapshots': self.return_style = 1 self.num_snapshots = int(num_snapshots) # Build", "float(perplexity_epsilon) self.pre_momentum = float(pre_momentum) self.post_momentum = float(post_momentum) self.theta = float(theta)", "= bool(dump_points) self.dump_interval = int(dump_interval) # Viz self.use_interactive = bool(use_interactive)", "self.min_grad_norm = float(min_grad_norm) if metric not in ['euclidean']: raise ValueError('Non-Euclidean", "Hut TSNE algorithm with fast nearest neighbors Refs: References [1]", "that transformed output. Arguments: X {array} -- Input array, shape:", "raise ValueError('Invalid return style...') elif return_style == 'once': self.return_style =", "Minimum gradient norm ctypes.c_int, # Initialization types N.ctypeslib.ndpointer(N.float32, ndim=2, flags='ALIGNED,", "Force Magnify iterations ctypes.c_float(self.perplexity_epsilon), # Perplexity search epsilon ctypes.c_float(self.pre_momentum), #", "ctypes.c_int(self.dump_interval), # Dump interval ctypes.c_bool(self.use_interactive), # Use interactive self.viz_server_, #", "ctypes.c_int, # GPU Device ctypes.c_int, # Return style ctypes.c_int ]", "{None} -- Ignored (default: {None}) \"\"\" # Setup points/embedding requirements", "# points self.points.ctypes.shape, # dims ctypes.c_float(self.perplexity), # Perplexity ctypes.c_float(self.learning_rate), #", "is None: self.initialization_type = 1 self.init_data = N.require(N.zeros((1,1)),N.float32,['CONTIGUOUS','ALIGNED']) else: self.initialization_type", "the Barnes Hut TSNE algorithm with fast nearest neighbors Refs:", "= float(early_exaggeration) self.learning_rate = float(learning_rate) self.n_iter = int(n_iter) self.n_iter_without_progress =", "float(learning_rate) self.n_iter = int(n_iter) self.n_iter_without_progress = int(n_iter_without_progress) self.min_grad_norm = float(min_grad_norm)", "library # self._gpufaiss_lib = N.ctypeslib.load_library('libgpufaiss', self._path) # Load the ctypes", "self._lib.pymodule_bh_tsne.argtypes = [ N.ctypeslib.ndpointer(N.float32, ndim=2, flags='ALIGNED, F_CONTIGUOUS, WRITEABLE'), # result", "ctypes.c_int(self.n_iter), # Iterations ctypes.c_int(self.n_iter_without_progress), # Iterations no progress ctypes.c_int(self.force_magnify_iters), #", "projection into dimensions other than 2 for now.') self.perplexity =", "# Viz Server ctypes.c_int(self.viz_timeout), # Viz timeout ctypes.c_int(self.verbose), # Verbosity", "self.num_neighbors = int(num_neighbors) self.force_magnify_iters = int(force_magnify_iters) self.perplexity_epsilon = float(perplexity_epsilon) self.pre_momentum", "['F_CONTIGUOUS', 'ALIGNED']) # Handle dumping and viz strings self.dump_file_ =", "self.use_interactive = bool(use_interactive) self.viz_server = str(viz_server) self.viz_timeout = int(viz_timeout) #", "# Number of snapshots def fit_transform(self, X, y=None): \"\"\"Fit X", "# Setup points/embedding requirements self.points = N.require(X, N.float32, ['CONTIGUOUS', 'ALIGNED'])", "# dims ctypes.c_float, # Perplexity ctypes.c_float, # Learning Rate ctypes.c_float,", "TSNE algorithm with fast nearest neighbors Refs: References [1] <NAME>,", "s)) return N.array([x for x in b] + [0]).astype(N.uint8) class", "ctypes.c_float, # Theta ctypes.c_float, # epssq ctypes.c_float, # Minimum gradient", "theta=0.5, epssq=0.0025, n_iter=1000, n_iter_without_progress=1000, min_grad_norm=1e-7, perplexity_epsilon=1e-3, metric='euclidean', init='random', return_style='once', num_snapshots=5,", "Neighbors ctypes.c_int(self.n_iter), # Iterations ctypes.c_int(self.n_iter_without_progress), # Iterations no progress ctypes.c_int(self.force_magnify_iters),", "ctypes.c_int(self.force_magnify_iters), # Force Magnify iterations ctypes.c_float(self.perplexity_epsilon), # Perplexity search epsilon", "interval ctypes.c_bool, # Use interactive N.ctypeslib.ndpointer(N.uint8, flags='ALIGNED, CONTIGUOUS'), # Viz", "print_interval=10, device=0, ): \"\"\"Initialization method for barnes hut T-SNE class.", "now.') else: self.metric = metric if init not in ['random']:", "=float(epssq) self.device = int(device) self.print_interval = int(print_interval) # Point dumpoing", "CONTIGUOUS'), # points ctypes.POINTER(N.ctypeslib.c_intp), # dims ctypes.c_float, # Perplexity ctypes.c_float,", "# Perplexity ctypes.c_float(self.learning_rate), # Learning Rate ctypes.c_float(self.early_exaggeration), # Magnitude Factor", "Device ctypes.c_int(self.return_style), # Return style ctypes.c_int(self.num_snapshots) ) # Number of", "if return_style not in ['once','snapshots']: raise ValueError('Invalid return style...') elif", "http://homepage.tudelft.nl/19j49/t-SNE.html \"\"\" import numpy as N import ctypes import os", "num_snapshots=5, verbose=0, random_seed=None, use_interactive=False, viz_timeout=10000, viz_server=\"tcp://localhost:5556\", dump_points=False, dump_file=\"dump.txt\", dump_interval=1, print_interval=10,", "= float(post_momentum) self.theta = float(theta) self.epssq =float(epssq) self.device = int(device)", "for now.') else: self.metric = metric if init not in", "# Num Neighbors ctypes.c_int(self.n_iter), # Iterations ctypes.c_int(self.n_iter_without_progress), # Iterations no", "self.n_iter = int(n_iter) self.n_iter_without_progress = int(n_iter_without_progress) self.min_grad_norm = float(min_grad_norm) if", "Factor ctypes.c_int, # Num Neighbors ctypes.c_int, # Iterations ctypes.c_int, #", "non-sklearn variables self.num_neighbors = int(num_neighbors) self.force_magnify_iters = int(force_magnify_iters) self.perplexity_epsilon =", "self.device = int(device) self.print_interval = int(print_interval) # Point dumpoing self.dump_file", "1 self.num_snapshots = int(num_snapshots) # Build the hooks for the", "[ N.ctypeslib.ndpointer(N.float32, ndim=2, flags='ALIGNED, F_CONTIGUOUS, WRITEABLE'), # result N.ctypeslib.ndpointer(N.float32, ndim=2,", "Journal of Machine Learning Research 9:2579-2605, 2008. [2] <NAME>, L.J.P.", "= N.require(y, N.float32, ['F_CONTIGUOUS', 'ALIGNED']) # Handle dumping and viz", "self.initialization_type = 3 self.init_data = N.require(y, N.float32, ['F_CONTIGUOUS', 'ALIGNED']) #", "float(early_exaggeration) self.learning_rate = float(learning_rate) self.n_iter = int(n_iter) self.n_iter_without_progress = int(n_iter_without_progress)", "t-Distributed Stochastic Neighbor Embedding http://homepage.tudelft.nl/19j49/t-SNE.html \"\"\" import numpy as N", "dump_file=\"dump.txt\", dump_interval=1, print_interval=10, device=0, ): \"\"\"Initialization method for barnes hut", "not in ['once','snapshots']: raise ValueError('Invalid return style...') elif return_style ==", "the ctypes library # self._gpufaiss_lib = N.ctypeslib.load_library('libgpufaiss', self._path) # Load", "= int(n_iter_without_progress) self.min_grad_norm = float(min_grad_norm) if metric not in ['euclidean']:", "progress ctypes.c_int, # Force Magnify iterations ctypes.c_float, # Perplexity search", "Learning Research 9:2579-2605, 2008. [2] <NAME>, L.J.P. t-Distributed Stochastic Neighbor", "Initialization if y is None: self.initialization_type = 1 self.init_data =", "= N.zeros(shape=(X.shape[0],self.n_components)) self.embedding = N.require(self.embedding , N.float32, ['F_CONTIGUOUS', 'ALIGNED', 'WRITEABLE'])", "ctypes.c_int, # Iterations no progress ctypes.c_int, # Force Magnify iterations", "ndim=2, flags='ALIGNED, F_CONTIGUOUS'), # Initialization Data ctypes.c_bool, # Dump points", "= None self._lib.pymodule_bh_tsne.argtypes = [ N.ctypeslib.ndpointer(N.float32, ndim=2, flags='ALIGNED, F_CONTIGUOUS, WRITEABLE'),", "\"\"\" import numpy as N import ctypes import os import", "# Point dumpoing self.dump_file = str(dump_file) self.dump_points = bool(dump_points) self.dump_interval", "= N.ctypeslib.load_library('libgpufaiss', self._path) # Load the ctypes library self._lib =", "array, shape: (n_points, n_dimensions) Keyword Arguments: y {None} -- Ignored", "Num Neighbors ctypes.c_int, # Iterations ctypes.c_int, # Iterations no progress", "ctypes.c_bool, # Dump points N.ctypeslib.ndpointer(N.uint8, flags='ALIGNED, CONTIGUOUS'), # Dump File", "barnes hut T-SNE class. \"\"\" # Initialize the variables self.n_components", "High-Dimensional Data Using t-SNE. Journal of Machine Learning Research 9:2579-2605,", "ctypes.c_float(self.pre_momentum), # pre-exaggeration momentum ctypes.c_float(self.post_momentum), # post-exaggeration momentum ctypes.c_float(self.theta), #", "momentum ctypes.c_float(self.post_momentum), # post-exaggeration momentum ctypes.c_float(self.theta), # Theta ctypes.c_float(self.epssq), #", "ndim=2, flags='ALIGNED, CONTIGUOUS'), # points ctypes.POINTER(N.ctypeslib.c_intp), # dims ctypes.c_float, #", "N.uint8, ['CONTIGUOUS', 'ALIGNED']) self.viz_server_ = N.require(ord_string(self.viz_server), N.uint8, ['CONTIGUOUS', 'ALIGNED']) self._lib.pymodule_bh_tsne(", "{None}) \"\"\" # Setup points/embedding requirements self.points = N.require(X, N.float32,", "in b] + [0]).astype(N.uint8) class TSNE(object): def __init__(self, n_components=2, perplexity=50.0,", "not currently supported. Please use init=\\'random\\' for now.') else: self.init", "= bytearray() arr = b.extend(map(ord, s)) return N.array([x for x", "# Theta ctypes.c_float(self.epssq), # epssq ctypes.c_float(self.min_grad_norm), # Minimum gradient norm", "arr = b.extend(map(ord, s)) return N.array([x for x in b]", "fit_transform(self, X, y=None): \"\"\"Fit X into an embedded space and", "'ALIGNED']) self.embedding = N.zeros(shape=(X.shape[0],self.n_components)) self.embedding = N.require(self.embedding , N.float32, ['F_CONTIGUOUS',", "Use interactive self.viz_server_, # Viz Server ctypes.c_int(self.viz_timeout), # Viz timeout", "as N import ctypes import os import pkg_resources def ord_string(s):", "float(theta) self.epssq =float(epssq) self.device = int(device) self.print_interval = int(print_interval) #", "# Handle dumping and viz strings self.dump_file_ = N.require(ord_string(self.dump_file), N.uint8,", "init='random', return_style='once', num_snapshots=5, verbose=0, random_seed=None, use_interactive=False, viz_timeout=10000, viz_server=\"tcp://localhost:5556\", dump_points=False, dump_file=\"dump.txt\",", "ctypes.c_float, # Perplexity search epsilon ctypes.c_float, # pre-exaggeration momentum ctypes.c_float,", "result self.points, # points self.points.ctypes.shape, # dims ctypes.c_float(self.perplexity), # Perplexity", "# Minimum gradient norm ctypes.c_int, # Initialization types N.ctypeslib.ndpointer(N.float32, ndim=2,", "def __init__(self, n_components=2, perplexity=50.0, early_exaggeration=2.0, learning_rate=200.0, num_neighbors=1023, force_magnify_iters=250, pre_momentum=0.5, post_momentum=0.8,", "dumpoing self.dump_file = str(dump_file) self.dump_points = bool(dump_points) self.dump_interval = int(dump_interval)", "now.') else: self.init = init self.verbose = int(verbose) # Initialize", "# Print interval ctypes.c_int, # GPU Device ctypes.c_int, # Return", "self.num_snapshots = int(num_snapshots) # Build the hooks for the BH", "self.epssq =float(epssq) self.device = int(device) self.print_interval = int(print_interval) # Point", "hooks for the BH T-SNE library self._path = pkg_resources.resource_filename('tsnecuda','') #", "self.force_magnify_iters = int(force_magnify_iters) self.perplexity_epsilon = float(perplexity_epsilon) self.pre_momentum = float(pre_momentum) self.post_momentum", "self.perplexity = float(perplexity) self.early_exaggeration = float(early_exaggeration) self.learning_rate = float(learning_rate) self.n_iter", "metric not in ['euclidean']: raise ValueError('Non-Euclidean metrics are not currently", "Learning Rate ctypes.c_float(self.early_exaggeration), # Magnitude Factor ctypes.c_int(self.num_neighbors), # Num Neighbors", "style if return_style not in ['once','snapshots']: raise ValueError('Invalid return style...')", "= int(dump_interval) # Viz self.use_interactive = bool(use_interactive) self.viz_server = str(viz_server)", "L.J.P.; Hinton, G.E. Visualizing High-Dimensional Data Using t-SNE. Journal of", "0 elif return_style == 'snapshots': self.return_style = 1 self.num_snapshots =", "metric if init not in ['random']: raise ValueError('Non-Random initialization is", "ord_string(s): b = bytearray() arr = b.extend(map(ord, s)) return N.array([x", "# GPU Device ctypes.c_int, # Return style ctypes.c_int ] #", "from current location # self._faiss_lib = N.ctypeslib.load_library('libfaiss', self._path) # Load", "post-exaggeration momentum ctypes.c_float, # Theta ctypes.c_float, # epssq ctypes.c_float, #", "ctypes.c_int, # Initialization types N.ctypeslib.ndpointer(N.float32, ndim=2, flags='ALIGNED, F_CONTIGUOUS'), # Initialization", "): \"\"\"Initialization method for barnes hut T-SNE class. \"\"\" #", "float(post_momentum) self.theta = float(theta) self.epssq =float(epssq) self.device = int(device) self.print_interval", "ctypes.c_int(self.return_style), # Return style ctypes.c_int(self.num_snapshots) ) # Number of snapshots", "self.dump_file_, # Dump File ctypes.c_int(self.dump_interval), # Dump interval ctypes.c_bool(self.use_interactive), #", "1 self.init_data = N.require(N.zeros((1,1)),N.float32,['CONTIGUOUS','ALIGNED']) else: self.initialization_type = 3 self.init_data =", "# post-exaggeration momentum ctypes.c_float(self.theta), # Theta ctypes.c_float(self.epssq), # epssq ctypes.c_float(self.min_grad_norm),", "y is None: self.initialization_type = 1 self.init_data = N.require(N.zeros((1,1)),N.float32,['CONTIGUOUS','ALIGNED']) else:", "embedded space and return that transformed output. Arguments: X {array}", "n_iter_without_progress=1000, min_grad_norm=1e-7, perplexity_epsilon=1e-3, metric='euclidean', init='random', return_style='once', num_snapshots=5, verbose=0, random_seed=None, use_interactive=False,", "epssq ctypes.c_float, # Minimum gradient norm ctypes.c_int, # Initialization types", "# Verbosity ctypes.c_int, # Print interval ctypes.c_int, # GPU Device", "not currently supported. Please use metric=\\'euclidean\\' for now.') else: self.metric", "Verbosity ctypes.c_int, # Print interval ctypes.c_int, # GPU Device ctypes.c_int,", "'ALIGNED']) # Handle dumping and viz strings self.dump_file_ = N.require(ord_string(self.dump_file),", "(n_points, n_dimensions) Keyword Arguments: y {None} -- Ignored (default: {None})", "N import ctypes import os import pkg_resources def ord_string(s): b", "norm ctypes.c_int, # Initialization types N.ctypeslib.ndpointer(N.float32, ndim=2, flags='ALIGNED, F_CONTIGUOUS'), #", "T-SNE library self._path = pkg_resources.resource_filename('tsnecuda','') # Load from current location", "search epsilon ctypes.c_float, # pre-exaggeration momentum ctypes.c_float, # post-exaggeration momentum", "Return style if return_style not in ['once','snapshots']: raise ValueError('Invalid return", "Server ctypes.c_int(self.viz_timeout), # Viz timeout ctypes.c_int(self.verbose), # Verbosity ctypes.c_int(self.print_interval), #", "self.pre_momentum = float(pre_momentum) self.post_momentum = float(post_momentum) self.theta = float(theta) self.epssq", "self.points.ctypes.shape, # dims ctypes.c_float(self.perplexity), # Perplexity ctypes.c_float(self.learning_rate), # Learning Rate", "# Force Magnify iterations ctypes.c_float, # Perplexity search epsilon ctypes.c_float,", "into dimensions other than 2 for now.') self.perplexity = float(perplexity)", "points ctypes.POINTER(N.ctypeslib.c_intp), # dims ctypes.c_float, # Perplexity ctypes.c_float, # Learning", "the BH T-SNE library self._path = pkg_resources.resource_filename('tsnecuda','') # Load from", "import os import pkg_resources def ord_string(s): b = bytearray() arr", "GPU Device ctypes.c_int, # Return style ctypes.c_int ] # Number", "# Viz timeout ctypes.c_int, # Verbosity ctypes.c_int, # Print interval", "None: self.initialization_type = 1 self.init_data = N.require(N.zeros((1,1)),N.float32,['CONTIGUOUS','ALIGNED']) else: self.initialization_type =", "self.metric = metric if init not in ['random']: raise ValueError('Non-Random", "device=0, ): \"\"\"Initialization method for barnes hut T-SNE class. \"\"\"", "\"\"\" # Initialize the variables self.n_components = int(n_components) if self.n_components", "int(dump_interval) # Viz self.use_interactive = bool(use_interactive) self.viz_server = str(viz_server) self.viz_timeout", "int(n_iter_without_progress) self.min_grad_norm = float(min_grad_norm) if metric not in ['euclidean']: raise", "# Initialize non-sklearn variables self.num_neighbors = int(num_neighbors) self.force_magnify_iters = int(force_magnify_iters)", "return that transformed output. Arguments: X {array} -- Input array,", "search epsilon ctypes.c_float(self.pre_momentum), # pre-exaggeration momentum ctypes.c_float(self.post_momentum), # post-exaggeration momentum", "ctypes.c_int(self.verbose), # Verbosity ctypes.c_int(self.print_interval), # Print interval ctypes.c_int(self.device), # GPU", "ValueError('Non-Random initialization is not currently supported. Please use init=\\'random\\' for", "viz_server=\"tcp://localhost:5556\", dump_points=False, dump_file=\"dump.txt\", dump_interval=1, print_interval=10, device=0, ): \"\"\"Initialization method for", "Load the ctypes library self._lib = N.ctypeslib.load_library('libtsnecuda', self._path) # Load", "else: self.init = init self.verbose = int(verbose) # Initialize non-sklearn", "N.ctypeslib.ndpointer(N.float32, ndim=2, flags='ALIGNED, F_CONTIGUOUS'), # Initialization Data ctypes.c_bool, # Dump", "Hinton, G.E. Visualizing High-Dimensional Data Using t-SNE. Journal of Machine", "dimensions other than 2 for now.') self.perplexity = float(perplexity) self.early_exaggeration", "T-SNE class. \"\"\" # Initialize the variables self.n_components = int(n_components)", "and return that transformed output. Arguments: X {array} -- Input", "requirements self.points = N.require(X, N.float32, ['CONTIGUOUS', 'ALIGNED']) self.embedding = N.zeros(shape=(X.shape[0],self.n_components))", "ctypes.c_float(self.post_momentum), # post-exaggeration momentum ctypes.c_float(self.theta), # Theta ctypes.c_float(self.epssq), # epssq", "iterations ctypes.c_float(self.perplexity_epsilon), # Perplexity search epsilon ctypes.c_float(self.pre_momentum), # pre-exaggeration momentum", "= int(print_interval) # Point dumpoing self.dump_file = str(dump_file) self.dump_points =", "dims ctypes.c_float, # Perplexity ctypes.c_float, # Learning Rate ctypes.c_float, #", "elif return_style == 'snapshots': self.return_style = 1 self.num_snapshots = int(num_snapshots)", "function self._lib.pymodule_bh_tsne.restype = None self._lib.pymodule_bh_tsne.argtypes = [ N.ctypeslib.ndpointer(N.float32, ndim=2, flags='ALIGNED,", "than 2 for now.') self.perplexity = float(perplexity) self.early_exaggeration = float(early_exaggeration)", "# epssq ctypes.c_float, # Minimum gradient norm ctypes.c_int, # Initialization", "ctypes.c_float(self.perplexity), # Perplexity ctypes.c_float(self.learning_rate), # Learning Rate ctypes.c_float(self.early_exaggeration), # Magnitude", "support projection into dimensions other than 2 for now.') self.perplexity", "= int(n_components) if self.n_components != 2: raise ValueError('The current barnes-hut", "Server ctypes.c_int, # Viz timeout ctypes.c_int, # Verbosity ctypes.c_int, #", "float(pre_momentum) self.post_momentum = float(post_momentum) self.theta = float(theta) self.epssq =float(epssq) self.device", "Please use init=\\'random\\' for now.') else: self.init = init self.verbose", "ctypes import os import pkg_resources def ord_string(s): b = bytearray()", "= bool(use_interactive) self.viz_server = str(viz_server) self.viz_timeout = int(viz_timeout) # Return", "if init not in ['random']: raise ValueError('Non-Random initialization is not", "Iterations ctypes.c_int, # Iterations no progress ctypes.c_int, # Force Magnify", "if metric not in ['euclidean']: raise ValueError('Non-Euclidean metrics are not", "self.embedding = N.require(self.embedding , N.float32, ['F_CONTIGUOUS', 'ALIGNED', 'WRITEABLE']) # Handle", "in ['euclidean']: raise ValueError('Non-Euclidean metrics are not currently supported. Please", "perplexity=50.0, early_exaggeration=2.0, learning_rate=200.0, num_neighbors=1023, force_magnify_iters=250, pre_momentum=0.5, post_momentum=0.8, theta=0.5, epssq=0.0025, n_iter=1000,", "class TSNE(object): def __init__(self, n_components=2, perplexity=50.0, early_exaggeration=2.0, learning_rate=200.0, num_neighbors=1023, force_magnify_iters=250,", "neighbors Refs: References [1] <NAME>, L.J.P.; Hinton, G.E. Visualizing High-Dimensional", "X, y=None): \"\"\"Fit X into an embedded space and return", "self._gpufaiss_lib = N.ctypeslib.load_library('libgpufaiss', self._path) # Load the ctypes library self._lib", "N.ctypeslib.ndpointer(N.float32, ndim=2, flags='ALIGNED, F_CONTIGUOUS, WRITEABLE'), # result N.ctypeslib.ndpointer(N.float32, ndim=2, flags='ALIGNED,", "N.ctypeslib.load_library('libtsnecuda', self._path) # Load the ctypes library # Hook the", "Barnes Hut TSNE algorithm with fast nearest neighbors Refs: References", "in ['once','snapshots']: raise ValueError('Invalid return style...') elif return_style == 'once':", "X {array} -- Input array, shape: (n_points, n_dimensions) Keyword Arguments:", "9:2579-2605, 2008. [2] <NAME>, L.J.P. t-Distributed Stochastic Neighbor Embedding http://homepage.tudelft.nl/19j49/t-SNE.html", "ctypes.c_int(self.device), # GPU Device ctypes.c_int(self.return_style), # Return style ctypes.c_int(self.num_snapshots) )", "ValueError('The current barnes-hut implementation does not support projection into dimensions", "fast nearest neighbors Refs: References [1] <NAME>, L.J.P.; Hinton, G.E.", "self.viz_server = str(viz_server) self.viz_timeout = int(viz_timeout) # Return style if", "Handle dumping and viz strings self.dump_file_ = N.require(ord_string(self.dump_file), N.uint8, ['CONTIGUOUS',", "Initialization types N.ctypeslib.ndpointer(N.float32, ndim=2, flags='ALIGNED, F_CONTIGUOUS'), # Initialization Data ctypes.c_bool,", "return_style == 'once': self.return_style = 0 elif return_style == 'snapshots':", "strings self.dump_file_ = N.require(ord_string(self.dump_file), N.uint8, ['CONTIGUOUS', 'ALIGNED']) self.viz_server_ = N.require(ord_string(self.viz_server),", "metrics are not currently supported. Please use metric=\\'euclidean\\' for now.')", "self.return_style = 1 self.num_snapshots = int(num_snapshots) # Build the hooks", "2 for now.') self.perplexity = float(perplexity) self.early_exaggeration = float(early_exaggeration) self.learning_rate", "import ctypes import os import pkg_resources def ord_string(s): b =", "= b.extend(map(ord, s)) return N.array([x for x in b] +", "# Dump File ctypes.c_int(self.dump_interval), # Dump interval ctypes.c_bool(self.use_interactive), # Use", "= metric if init not in ['random']: raise ValueError('Non-Random initialization", "class. \"\"\" # Initialize the variables self.n_components = int(n_components) if", "'ALIGNED', 'WRITEABLE']) # Handle Initialization if y is None: self.initialization_type", "= float(theta) self.epssq =float(epssq) self.device = int(device) self.print_interval = int(print_interval)", "space and return that transformed output. Arguments: X {array} --", "Perplexity search epsilon ctypes.c_float(self.pre_momentum), # pre-exaggeration momentum ctypes.c_float(self.post_momentum), # post-exaggeration", "# Initialization Data ctypes.c_bool(self.dump_points), # Dump points self.dump_file_, # Dump", "# Load the ctypes library # self._gpufaiss_lib = N.ctypeslib.load_library('libgpufaiss', self._path)", "pkg_resources def ord_string(s): b = bytearray() arr = b.extend(map(ord, s))", "points self.points.ctypes.shape, # dims ctypes.c_float(self.perplexity), # Perplexity ctypes.c_float(self.learning_rate), # Learning", "self._path = pkg_resources.resource_filename('tsnecuda','') # Load from current location # self._faiss_lib", "F_CONTIGUOUS'), # Initialization Data ctypes.c_bool, # Dump points N.ctypeslib.ndpointer(N.uint8, flags='ALIGNED,", "int(n_components) if self.n_components != 2: raise ValueError('The current barnes-hut implementation", "self.viz_server_ = N.require(ord_string(self.viz_server), N.uint8, ['CONTIGUOUS', 'ALIGNED']) self._lib.pymodule_bh_tsne( self.embedding, # result", "= float(pre_momentum) self.post_momentum = float(post_momentum) self.theta = float(theta) self.epssq =float(epssq)", "variables self.n_components = int(n_components) if self.n_components != 2: raise ValueError('The", "ctypes.c_int, # Iterations ctypes.c_int, # Iterations no progress ctypes.c_int, #", "# Return style ctypes.c_int(self.num_snapshots) ) # Number of snapshots return", "# Print interval ctypes.c_int(self.device), # GPU Device ctypes.c_int(self.return_style), # Return", "Embedding http://homepage.tudelft.nl/19j49/t-SNE.html \"\"\" import numpy as N import ctypes import", "flags='ALIGNED, CONTIGUOUS'), # Viz Server ctypes.c_int, # Viz timeout ctypes.c_int,", "Return style ctypes.c_int ] # Number of snapshots def fit_transform(self,", "self.verbose = int(verbose) # Initialize non-sklearn variables self.num_neighbors = int(num_neighbors)", "ctypes.c_float, # Magnitude Factor ctypes.c_int, # Num Neighbors ctypes.c_int, #", "Dump File ctypes.c_int, # Dump interval ctypes.c_bool, # Use interactive", "ctypes.c_float, # Minimum gradient norm ctypes.c_int, # Initialization types N.ctypeslib.ndpointer(N.float32,", "= N.require(ord_string(self.dump_file), N.uint8, ['CONTIGUOUS', 'ALIGNED']) self.viz_server_ = N.require(ord_string(self.viz_server), N.uint8, ['CONTIGUOUS',", "raise ValueError('The current barnes-hut implementation does not support projection into", "# Force Magnify iterations ctypes.c_float(self.perplexity_epsilon), # Perplexity search epsilon ctypes.c_float(self.pre_momentum),", "in ['random']: raise ValueError('Non-Random initialization is not currently supported. Please", "Stochastic Neighbor Embedding http://homepage.tudelft.nl/19j49/t-SNE.html \"\"\" import numpy as N import", "int(print_interval) # Point dumpoing self.dump_file = str(dump_file) self.dump_points = bool(dump_points)", "for now.') else: self.init = init self.verbose = int(verbose) #", "ValueError('Non-Euclidean metrics are not currently supported. Please use metric=\\'euclidean\\' for", "Using t-SNE. Journal of Machine Learning Research 9:2579-2605, 2008. [2]", "Keyword Arguments: y {None} -- Ignored (default: {None}) \"\"\" #", "# Iterations ctypes.c_int, # Iterations no progress ctypes.c_int, # Force", "return N.array([x for x in b] + [0]).astype(N.uint8) class TSNE(object):", "for barnes hut T-SNE class. \"\"\" # Initialize the variables", "ctypes.POINTER(N.ctypeslib.c_intp), # dims ctypes.c_float, # Perplexity ctypes.c_float, # Learning Rate", "Print interval ctypes.c_int, # GPU Device ctypes.c_int, # Return style", "points self.dump_file_, # Dump File ctypes.c_int(self.dump_interval), # Dump interval ctypes.c_bool(self.use_interactive),", "Dump points self.dump_file_, # Dump File ctypes.c_int(self.dump_interval), # Dump interval", "ctypes.c_bool(self.use_interactive), # Use interactive self.viz_server_, # Viz Server ctypes.c_int(self.viz_timeout), #", "self.n_components = int(n_components) if self.n_components != 2: raise ValueError('The current", "int(num_snapshots) # Build the hooks for the BH T-SNE library", "shape: (n_points, n_dimensions) Keyword Arguments: y {None} -- Ignored (default:", "N.require(ord_string(self.viz_server), N.uint8, ['CONTIGUOUS', 'ALIGNED']) self._lib.pymodule_bh_tsne( self.embedding, # result self.points, #", "Machine Learning Research 9:2579-2605, 2008. [2] <NAME>, L.J.P. t-Distributed Stochastic", "Dump interval ctypes.c_bool, # Use interactive N.ctypeslib.ndpointer(N.uint8, flags='ALIGNED, CONTIGUOUS'), #", "output. Arguments: X {array} -- Input array, shape: (n_points, n_dimensions)", "# Viz self.use_interactive = bool(use_interactive) self.viz_server = str(viz_server) self.viz_timeout =", "self.perplexity_epsilon = float(perplexity_epsilon) self.pre_momentum = float(pre_momentum) self.post_momentum = float(post_momentum) self.theta", "# result self.points, # points self.points.ctypes.shape, # dims ctypes.c_float(self.perplexity), #", "int(device) self.print_interval = int(print_interval) # Point dumpoing self.dump_file = str(dump_file)", "Return style ctypes.c_int(self.num_snapshots) ) # Number of snapshots return self.embedding", "interval ctypes.c_int, # GPU Device ctypes.c_int, # Return style ctypes.c_int", "gradient norm ctypes.c_int, # Initialization types N.ctypeslib.ndpointer(N.float32, ndim=2, flags='ALIGNED, F_CONTIGUOUS'),", "Dump interval ctypes.c_bool(self.use_interactive), # Use interactive self.viz_server_, # Viz Server", "else: self.metric = metric if init not in ['random']: raise", "epsilon ctypes.c_float(self.pre_momentum), # pre-exaggeration momentum ctypes.c_float(self.post_momentum), # post-exaggeration momentum ctypes.c_float(self.theta),", "Device ctypes.c_int, # Return style ctypes.c_int ] # Number of", "\"\"\"Initialization method for barnes hut T-SNE class. \"\"\" # Initialize", "= int(viz_timeout) # Return style if return_style not in ['once','snapshots']:", "points/embedding requirements self.points = N.require(X, N.float32, ['CONTIGUOUS', 'ALIGNED']) self.embedding =", "init not in ['random']: raise ValueError('Non-Random initialization is not currently", "the hooks for the BH T-SNE library self._path = pkg_resources.resource_filename('tsnecuda','')", "['euclidean']: raise ValueError('Non-Euclidean metrics are not currently supported. Please use", "N.float32, ['CONTIGUOUS', 'ALIGNED']) self.embedding = N.zeros(shape=(X.shape[0],self.n_components)) self.embedding = N.require(self.embedding ,", "n_iter=1000, n_iter_without_progress=1000, min_grad_norm=1e-7, perplexity_epsilon=1e-3, metric='euclidean', init='random', return_style='once', num_snapshots=5, verbose=0, random_seed=None,", "# Viz Server ctypes.c_int, # Viz timeout ctypes.c_int, # Verbosity", "ctypes.c_int(self.print_interval), # Print interval ctypes.c_int(self.device), # GPU Device ctypes.c_int(self.return_style), #", "pre-exaggeration momentum ctypes.c_float, # post-exaggeration momentum ctypes.c_float, # Theta ctypes.c_float,", "now.') self.perplexity = float(perplexity) self.early_exaggeration = float(early_exaggeration) self.learning_rate = float(learning_rate)", "ctypes.c_float, # pre-exaggeration momentum ctypes.c_float, # post-exaggeration momentum ctypes.c_float, #", "bool(use_interactive) self.viz_server = str(viz_server) self.viz_timeout = int(viz_timeout) # Return style", "gradient norm ctypes.c_int(self.initialization_type), # Initialization types self.init_data, # Initialization Data", "= N.require(N.zeros((1,1)),N.float32,['CONTIGUOUS','ALIGNED']) else: self.initialization_type = 3 self.init_data = N.require(y, N.float32,", "self._lib = N.ctypeslib.load_library('libtsnecuda', self._path) # Load the ctypes library #", "momentum ctypes.c_float, # post-exaggeration momentum ctypes.c_float, # Theta ctypes.c_float, #", "algorithm with fast nearest neighbors Refs: References [1] <NAME>, L.J.P.;", "int(n_iter) self.n_iter_without_progress = int(n_iter_without_progress) self.min_grad_norm = float(min_grad_norm) if metric not", "[1] <NAME>, L.J.P.; Hinton, G.E. Visualizing High-Dimensional Data Using t-SNE.", "post-exaggeration momentum ctypes.c_float(self.theta), # Theta ctypes.c_float(self.epssq), # epssq ctypes.c_float(self.min_grad_norm), #", "Factor ctypes.c_int(self.num_neighbors), # Num Neighbors ctypes.c_int(self.n_iter), # Iterations ctypes.c_int(self.n_iter_without_progress), #", "dump_points=False, dump_file=\"dump.txt\", dump_interval=1, print_interval=10, device=0, ): \"\"\"Initialization method for barnes", "numpy as N import ctypes import os import pkg_resources def", "pkg_resources.resource_filename('tsnecuda','') # Load from current location # self._faiss_lib = N.ctypeslib.load_library('libfaiss'," ]
[ "strictly editing users info and courses info from .views import", "not related to strictly editing users info and courses info", "# contains any CRUD not related to strictly editing users", "to strictly editing users info and courses info from .views", "contains any CRUD not related to strictly editing users info", "related to strictly editing users info and courses info from", "CRUD not related to strictly editing users info and courses", "editing users info and courses info from .views import admin", "any CRUD not related to strictly editing users info and" ]
[ "object() def exists(file): if os.path.exists(file): return file ind = file.find('.zip')", "for x in paths_from_eclipse_to_python])) translated = translated_proper_case if eclipse_sep !=", "File f = File(filename) ret = f.getCanonicalPath() if IS_PY2 and", "normcase(r) return r _ZIP_SEARCH_CACHE = {} _NOT_FOUND_SENTINEL = object() def", "like build/bdist.linux-x86_64/egg/<path-inside-egg> f = frame.f_globals['__file__'] if get_abs_path_real_path_and_base_from_file is None: #", "sys.stderr.write( 'pydev debugger: _NormFile changed path (from: %s to %s)\\n'", "% \\ (translated, [x[0] for x in paths_from_eclipse_to_python])) # Note", "f = frame.f_globals['__file__'] if get_abs_path_real_path_and_base_from_file is None: # Interpreter shutdown", "DWORD] GetLongPathName.restype = DWORD GetShortPathName = ctypes.windll.kernel32.GetShortPathNameW GetShortPathName.argtypes = [LPCWSTR,", "abs_path, real_path = initial_norm_paths(join(path, filename)) if exists(real_path): break else: sys.stderr.write('pydev", "Found: %s (%s)' % (filename, type(filename))) abs_path = _NormPath(filename, os.path.abspath)", "r[ind:] if inner_path.startswith('!'): # Note (fabioz): although I can replicate", "def _AbsFile(filename): abs_path, real_path = _NormPaths(filename) return abs_path # Returns", "try: return NORM_PATHS_CONTAINER[filename] except KeyError: if filename.__class__ != str: raise", "get it with the path with # the real case", "path from the client to the debug server translated =", "= _NormFile(filename) # After getting the real path, let's get", "c:\\my_project\\src\\package\\module1.py the PATHS_FROM_ECLIPSE_TO_PYTHON would have to be: PATHS_FROM_ECLIPSE_TO_PYTHON = [(r'c:\\my_project\\src',", "exists(real_path): break else: sys.stderr.write('pydev debugger: Unable to find real location", "translation functions if absolutely needed! def _norm_file_to_server(filename, cache=norm_filename_to_server_container): # Eclipse", "any of those names to a given scope. def _original_file_to_client(filename,", "eclipse installation) import pydevd;pydevd.settrace(host, stdoutToServer, stderrToServer, port, suspend) see parameter", "_original_file_to_client(filename, cache={}): try: return cache[filename] except KeyError: cache[filename] = get_path_with_real_case(_AbsFile(filename))", "E.g.: If the server (your python process) has the structure", "(re)setup how the client <-> server translation works to provide", "to %s)\\n' % ( translated_proper_case, translated)) for i, (eclipse_prefix, python_prefix)", "as PATHS_FROM_ECLIPSE_TO_PYTHON''' global norm_file_to_client global norm_file_to_server global _last_client_server_paths_set _last_client_server_paths_set =", "filled with the appropriate paths. @note: in this context, the", "filenames so that we can be sure that: - The", "except: # Something didn't quite work out, leave no-op conversions", "if isinstance(path1, unicode): path1 = path1.encode(sys.getfilesystemencoding()) path0 = _fix_path(path0, eclipse_sep)", "GetShortPathName = ctypes.windll.kernel32.GetShortPathNameW GetShortPathName.argtypes = [LPCWSTR, LPWSTR, DWORD] GetShortPathName.restype =", "# example: # PATHS_FROM_ECLIPSE_TO_PYTHON = [ # (r'd:\\temp\\temp_workspace_2\\test_python\\src\\yyy\\yyy', # r'd:\\temp\\temp_workspace_2\\test_python\\src\\hhh\\xxx')", "client-server translation) f = frame.f_code.co_filename if f is not None", "let's get it with the path with # the real", "= {} _NOT_FOUND_SENTINEL = object() def exists(file): if os.path.exists(file): return", "real_path = _NormPaths(filename) return real_path def _AbsFile(filename): abs_path, real_path =", "translated = translated.replace(eclipse_sep, python_sep) translated = _NormFile(translated) cache[filename] = translated", "is important! Note that this can be tricky to get", "actually go on and check if we can find it", "get_package_dir(mod_name): for path in sys.path: mod_path = join(path, mod_name.replace('.', '/'))", "won't be found in the server) @note: to enable remote", "line: set PATHS_FROM_ECLIPSE_TO_PYTHON=[['c:\\my_project\\src','/user/projects/my_project/src']] @note: DEBUG_CLIENT_SERVER_TRANSLATION can be set to True", "os preferences. for i, (path0, path1) in enumerate(paths_from_eclipse_to_python[:]): if IS_PY2:", "'''paths is the same format as PATHS_FROM_ECLIPSE_TO_PYTHON''' global norm_file_to_client global", "%s\\n' % (filename,)) abs_path = filename real_path = filename NORM_SEARCH_CACHE[filename]", "assert os in ('WINDOWS', 'UNIX') if prev != os: _ide_os", "the client machine # and the 2nd element is the", "have to be: PATHS_FROM_ECLIPSE_TO_PYTHON = [(r'c:\\my_project\\src', r'/user/projects/my_project/src')] alternatively, this can", "_NormPaths(f) base = basename(real_path) ret = abs_path, real_path, base NORM_PATHS_AND_BASE_CONTAINER[f]", "actually works _get_path_with_real_case(__file__) except: # Something didn't quite work out,", "# pydevd_file_utils.norm_file_to_server # # instead of importing any of those", "breakpoints must be translated (otherwise they won't be found in", "zip_path + '!' if inner_path.startswith('/') or inner_path.startswith('\\\\'): inner_path = inner_path[1:]", "obtain a new normalized copy, just in case # the", "= _NormPaths(f) base = basename(real_path) ret = abs_path, real_path, base", "if loader is not None: for attr in (\"get_filename\", \"_get_filename\"):", "''' global _ide_os prev = _ide_os if os == 'WIN':", "names generated... try: try: code = rPath.func_code except AttributeError: code", "ind = file.find('.egg') if ind != -1: ind += 4", "ctypes.windll.kernel32.GetShortPathNameW GetShortPathName.argtypes = [LPCWSTR, LPWSTR, DWORD] GetShortPathName.restype = DWORD def", "path conversions when doing a remote debugging session in one", "os.path.exists join = os.path.join try: rPath = os.path.realpath # @UndefinedVariable", "IS_PY2: if isinstance(path0, unicode): path0 = path0.encode(sys.getfilesystemencoding()) if isinstance(path1, unicode):", "%s in %s\\n' % \\ (translated, [x[1] for x in", "_pydev_pkgutil_old as pkgutil try: loader = pkgutil.get_loader(mod_name) except: return None", "= _NormFile(translated) cache[filename] = translated return translated def _norm_file_to_client(filename, cache=norm_filename_to_client_container):", "':' and ret[0].islower(): return ret[0].upper() + ret[1:] return ret #", "eclipse_sep) path1 = _fix_path(path1, python_sep) initial_paths[i] = (path0, path1) paths_from_eclipse_to_python[i]", "replacing to client: %s\\n' % (translated,)) # Note: use the", "and then use: # # pydevd_file_utils.norm_file_to_client # pydevd_file_utils.norm_file_to_server # #", "= file.find('.egg') if ind != -1: ind += 4 zip_path", "do a quick test to see if we're working with", "not exists(real_path): # We must actually go on and check", "base = f, f, f else: abs_path, real_path = _NormPaths(f)", "(otherwise breakpoints won't be hit). - Providing means for the", "f is not None: if f.endswith('.pyc'): f = f[:-1] elif", "cannot do a _NormFile here, # only at the beginning", "if IS_WINDOWS: if IS_JYTHON: def normcase(filename): return filename.lower() else: def", "= _norm_file_to_client setup_client_server_paths(PATHS_FROM_ECLIPSE_TO_PYTHON) def _is_int(filename): # isdigit() doesn't support negative", "= path1.encode(sys.getfilesystemencoding()) path0 = _fix_path(path0, eclipse_sep) path1 = _fix_path(path1, python_sep)", "through the PYTHONPATH (and the PATHS_FROM_ECLIPSE_TO_PYTHON only needs to be", "all the paths with breakpoints must be translated (otherwise they", "def normcase(filename): return filename # no-op _ide_os = 'WINDOWS' if", "KeyError: if filename.__class__ != str: raise AssertionError('Paths passed to _NormPaths", "The debugger may still function, but it will work slower", "running and the client is where eclipse is running. E.g.:", "systems without islink support rPath = os.path.abspath # defined as", "then obtain a new normalized copy, just in case #", "# files from eggs in Python 2.7 have paths like", "- Providing means for the user to make path conversions", "not absolute)\\n') sys.stderr.write('pydev debugger: The debugger may still function, but", "then use: # # pydevd_file_utils.norm_file_to_client # pydevd_file_utils.norm_file_to_server # # instead", "# later on). # Note 2: it goes hand-in-hand with", "= f.getCanonicalPath() if IS_PY2 and not isinstance(ret, str): return ret.encode(getfilesystemencoding())", "paths_from_eclipse_to_python])) translated = translated_proper_case if eclipse_sep != python_sep: translated =", "fail if there's something not correct here -- but at", "the client is where eclipse is running. E.g.: If the", "separators are what we expect from the IDE. filename =", "are not absolute)\\n') sys.stderr.write('pydev debugger: The debugger may still function,", "to the server, we do the replace first and only", "if len(ret) > 1 and ret[1] == ':' and ret[0].islower():", "try: int(filename) return True except: return False def is_real_file(filename): #", "filename = convert_to_long_pathname(filename) filename = _os_normcase(filename) return filename.lower() else: def", "import os.path import sys import traceback _os_normcase = os.path.normcase basename", "os.path import sys import traceback _os_normcase = os.path.normcase basename =", "be set with an environment variable from the command line:", "= _original_file_to_server return # only setup translation functions if absolutely", "return f ret = get_abs_path_real_path_and_base_from_file(f) # Also cache based on", "calls) norm_file_to_client = _original_file_to_client norm_file_to_server = _original_file_to_server return # only", "if there's something not correct here -- but at least", "that: - The case of a file will match the", "os: 'UNIX' or 'WINDOWS' ''' global _ide_os prev = _ide_os", "a given scope. def _original_file_to_client(filename, cache={}): try: return cache[filename] except", "= GetLongPathName(filename, buf, MAX_PATH) if rv != 0 and rv", "out, leave no-op conversions in place. if DebugInfoHolder.DEBUG_TRACE_LEVEL > 2:", "quick test to see if we're working with a version", "functions may be rebound, users should always import # pydevd_file_utils", "GetLongPathName = ctypes.windll.kernel32.GetLongPathNameW GetLongPathName.argtypes = [LPCWSTR, LPWSTR, DWORD] GetLongPathName.restype =", "def get_abs_path_real_path_and_base_from_frame(frame): try: return NORM_PATHS_AND_BASE_CONTAINER[frame.f_code.co_filename] except: # This one is", "KeyError: cache[filename] = get_path_with_real_case(_AbsFile(filename)) return cache[filename] _original_file_to_server = _NormFile norm_file_to_client", "abs_path, real_path, base = f, f, f else: abs_path, real_path", "does not need any kind of client-server translation) f =", "in case # the path is different now. translated_proper_case =", "if zip_file_obj is None: return False elif zip_file_obj is _NOT_FOUND_SENTINEL:", "shutdown return f if f is not None: if f.endswith('.pyc'):", "paths that'll actually have breakpoints). ''' from _pydevd_bundle.pydevd_constants import IS_PY2,", "given file f returns tuple of its absolute path, real", "matching prefix for: %s in %s\\n' % \\ (translated, [x[0]", "of importing any of those names to a given scope.", "return ret def get_abs_path_real_path_and_base_from_frame(frame): try: return NORM_PATHS_AND_BASE_CONTAINER[frame.f_code.co_filename] except: # This", "if DEBUG_CLIENT_SERVER_TRANSLATION: sys.stderr.write('pydev debugger: replacing to client: %s\\n' % (translated,))", "if ind != -1: ind += 4 zip_path = r[:ind]", "would be 'NormFileFromEclipseToPython' try: return cache[filename] except KeyError: if eclipse_sep", "= translated return translated norm_file_to_server = _norm_file_to_server norm_file_to_client = _norm_file_to_client", "if DEBUG_CLIENT_SERVER_TRANSLATION: sys.stderr.write('pydev debugger: sent to server: %s\\n' % (translated,))", "Now, let's do a quick test to see if we're", "more details. try: PATHS_FROM_ECLIPSE_TO_PYTHON = json.loads(os.environ.get('PATHS_FROM_ECLIPSE_TO_PYTHON', '[]')) except Exception: sys.stderr.write('Error", "to translate a path from the client to the debug", "DEBUG_CLIENT_SERVER_TRANSLATION: sys.stderr.write('pydev debugger: sent to client: %s\\n' % (translated,)) break", "def set_ide_os(os): ''' We need to set the IDE os", "goes hand-in-hand with 'exists'. inner_path = inner_path[1:] zip_path = zip_path", "_is_int(filename) and not filename.startswith(\"<ipython-input\") # For given file f returns", "# We must actually go on and check if we", "get right when one machine uses a case-independent filesystem and", "the drive letter properly (it'll be unchanged). # Make sure", "although I can replicate this by creating a file ending", "rPath = os.path.abspath # defined as a list of tuples", "return meth(mod_name) return None def get_package_dir(mod_name): for path in sys.path:", "if DEBUG_CLIENT_SERVER_TRANSLATION: sys.stderr.write('pydev debugger: to client: unable to find matching", "= {} NORM_PATHS_AND_BASE_CONTAINER = {} def _NormFile(filename): abs_path, real_path =", "real_path def _NormPath(filename, normpath): r = normpath(filename) ind = r.find('.zip')", "{} norm_filename_to_client_container = {} initial_paths = list(paths) paths_from_eclipse_to_python = initial_paths[:]", "that the separators are what we expect from the IDE.", "if os == 'WIN': # Apparently PyCharm uses 'WIN' (https://github.com/fabioz/PyDev.Debugger/issues/116)", "= os # We need to (re)setup how the client", "rv = GetShortPathName(filename, buf, MAX_PATH) if rv != 0 and", "a path from the client to the debug server translated", "here, # only at the beginning of this method. cache[filename]", "translated to the python process # So, this would be", "the IDE. filename = filename.replace(python_sep, eclipse_sep) # used to translate", "we can be sure that: - The case of a", "a file ending as # .zip! or .egg!, I don't", "added by @jetbrains, but it should probably be reviewed #", "IS_PY3K: import pkgutil else: from _pydev_imps import _pydev_pkgutil_old as pkgutil", "to client: %s\\n' % (translated,)) break else: if DEBUG_CLIENT_SERVER_TRANSLATION: sys.stderr.write('pydev", "list of tuples where the 1st element of the tuple", "return None if loader is not None: for attr in", "'WINDOWS' ''' global _ide_os prev = _ide_os if os ==", "that this can be tricky to get right when one", "be on the server accessible through the PYTHONPATH (and the", "if sys.platform == 'win32': try: import ctypes from ctypes.wintypes import", "is the same format as PATHS_FROM_ECLIPSE_TO_PYTHON''' global norm_file_to_client global norm_file_to_server", "initial_paths[:] # Apply normcase to the existing paths to follow", "to True to debug the result of those translations @note:", "and debugging in another. To do that, the PATHS_FROM_ECLIPSE_TO_PYTHON constant", "zip_file_obj = _ZIP_SEARCH_CACHE.get(zip_path, _NOT_FOUND_SENTINEL) if zip_file_obj is None: return False", "= translated.replace(python_sep, eclipse_sep) # The resulting path is not in", "(from: %s to %s)\\n' % ( translated_proper_case, translated)) for i,", "zip_file_obj.getinfo(inner_path.replace('\\\\', '/')) return join(zip_path, inner_path) except KeyError: return None return", "+ '!' zip_file_obj = _ZIP_SEARCH_CACHE.get(zip_path, _NOT_FOUND_SENTINEL) if zip_file_obj is None:", "to make path conversions when doing a remote debugging session", "if not exists(real_path): # We must actually go on and", "Make sure the drive letter is always uppercase. if len(ret)", "module provides utilities to get the absolute filenames so that", "ctypes.create_unicode_buffer(MAX_PATH) if IS_PY2 and isinstance(filename, str): filename = filename.decode(getfilesystemencoding()) rv", "_original_file_to_client norm_file_to_server = _original_file_to_server return # only setup translation functions", "path in sys.path: abs_path, real_path = initial_norm_paths(join(path, filename)) if exists(real_path):", "python that has no problems # related to the names", "{} initial_norm_paths = _NormPaths def _NormPaths(filename): # Let's redefine _NormPaths", "@note: the case of the paths is important! Note that", "= zip_file_obj except: _ZIP_SEARCH_CACHE[zip_path] = _NOT_FOUND_SENTINEL return False try: if", "IS_WINDOWS: if IS_JYTHON: def normcase(filename): return filename.lower() else: def normcase(filename):", "do that, the PATHS_FROM_ECLIPSE_TO_PYTHON constant must be filled with the", "as requested during the debug session. NORM_PATHS_CONTAINER = {} NORM_PATHS_AND_BASE_CONTAINER", "must be on the server accessible through the PYTHONPATH (and", "not filename.startswith(\"<ipython-input\") # For given file f returns tuple of", "= _convert_to_short_pathname get_path_with_real_case = _get_path_with_real_case elif IS_JYTHON and IS_WINDOWS: def", "return False try: if inner_path.startswith('/') or inner_path.startswith('\\\\'): inner_path = inner_path[1:]", "shutdown return f ret = get_abs_path_real_path_and_base_from_file(f) # Also cache based", "'build\\\\bdist.')): # files from eggs in Python 2.7 have paths", "to client: %s\\n' % (translated,)) # Note: use the non-normalized", "have paths like build/bdist.linux-x86_64/egg/<path-inside-egg> f = frame.f_globals['__file__'] if get_abs_path_real_path_and_base_from_file is", "convert_to_long_pathname(filename) filename = _os_normcase(filename) return filename.lower() else: def normcase(filename): return", "_last_client_server_paths_set _last_client_server_paths_set = paths[:] # Work on the client and", "DEBUG_CLIENT_SERVER_TRANSLATION: sys.stderr.write('pydev debugger: to client: unable to find matching prefix", "else: if not isinstance(PATHS_FROM_ECLIPSE_TO_PYTHON, list): sys.stderr.write('Expected PATHS_FROM_ECLIPSE_TO_PYTHON loaded from environment", "else: # Converting json lists to tuple PATHS_FROM_ECLIPSE_TO_PYTHON = [tuple(x)", "if path.endswith('/') or path.endswith('\\\\'): path = path[:-1] if sep !=", "IS_PY3K, DebugInfoHolder, IS_WINDOWS, IS_JYTHON from _pydev_bundle._pydev_filesystem_encoding import getfilesystemencoding import json", "'_NormPath'. inner_path = inner_path[1:] zip_path = zip_path + '!' zip_file_obj", "ending as # .zip! or .egg!, I don't really know", "on). # Note 2: it goes hand-in-hand with '_NormPath'. inner_path", "tuple is the path in the client machine # and", "def _NormPath(filename, normpath): r = normpath(filename) ind = r.find('.zip') if", "DEBUG_CLIENT_SERVER_TRANSLATION: sys.stderr.write('pydev debugger: sent to server: %s\\n' % (translated,)) break", "to debug the result of those translations @note: the case", "!= str: raise AssertionError('Paths passed to _NormPaths must be str.", "normcase(filename): # `normcase` doesn't lower case on Python 2 for", "= _NormPaths def _NormPaths(filename): # Let's redefine _NormPaths to work", "zip_path = zip_path + '!' if inner_path.startswith('/') or inner_path.startswith('\\\\'): inner_path", "except KeyError: return None return None # Now, let's do", "sys.stderr.flush() NORM_SEARCH_CACHE = {} initial_norm_paths = _NormPaths def _NormPaths(filename): #", "and the client has: c:\\my_project\\src\\package\\module1.py the PATHS_FROM_ECLIPSE_TO_PYTHON would have to", "pkgutil try: loader = pkgutil.get_loader(mod_name) except: return None if loader", "= filename.decode(getfilesystemencoding()) rv = GetShortPathName(filename, buf, MAX_PATH) if rv !=", "is running may be actually different from the client (and", "os.path.abspath # defined as a list of tuples where the", "conversions in place. if DebugInfoHolder.DEBUG_TRACE_LEVEL > 2: traceback.print_exc() else: convert_to_long_pathname", "# Note: as these functions may be rebound, users should", "this can be set with an environment variable from the", "creating a file ending as # .zip! or .egg!, I", "eclipse_prefix + translated_proper_case[len(python_prefix):] if DEBUG_CLIENT_SERVER_TRANSLATION: sys.stderr.write('pydev debugger: sent to client:", "return None return None # Now, let's do a quick", "case-independent, 'normcase()' should be used on the paths defined in", "abs_path, real_path = _NormPaths(filename) return abs_path # Returns tuple of", "always uppercase. if len(ret) > 1 and ret[1] == ':'", "in the python process, so, we cannot do a _NormFile", "isdigit() doesn't support negative numbers try: int(filename) return True except:", "for path in sys.path: abs_path, real_path = initial_norm_paths(join(path, filename)) if", "> 2: traceback.print_exc() else: convert_to_long_pathname = _convert_to_long_pathname convert_to_short_pathname = _convert_to_short_pathname", "only setup translation functions if absolutely needed! def _norm_file_to_server(filename, cache=norm_filename_to_server_container):", "paths[:] # Work on the client and server slashes. python_sep", "_info = zip_file_obj.getinfo(inner_path.replace('\\\\', '/')) return join(zip_path, inner_path) except KeyError: return", "make a difference). NORM_PATHS_AND_BASE_CONTAINER[frame.f_code.co_filename] = ret return ret def get_fullname(mod_name):", "# This one is just internal (so, does not need", "] convert_to_long_pathname = lambda filename:filename convert_to_short_pathname = lambda filename:filename get_path_with_real_case", "debugger: Unable to find real location for: %s\\n' % (filename,))", "if ind != -1: ind += 4 zip_path = file[:ind]", "import zipfile zip_file_obj = zipfile.ZipFile(zip_path, 'r') _ZIP_SEARCH_CACHE[zip_path] = zip_file_obj except:", "zipfile.ZipFile(zip_path, 'r') _ZIP_SEARCH_CACHE[zip_path] = zip_file_obj except: _ZIP_SEARCH_CACHE[zip_path] = _NOT_FOUND_SENTINEL return", "be reviewed # later on). # Note 2: it goes", "environment variable from the command line: set PATHS_FROM_ECLIPSE_TO_PYTHON=[['c:\\my_project\\src','/user/projects/my_project/src']] @note: DEBUG_CLIENT_SERVER_TRANSLATION", "sys.stderr.write('-------------------------------------------------------------------------------\\n') sys.stderr.write('pydev debugger: CRITICAL WARNING: This version of python seems", "go on and check if we can find it as", "= getattr(loader, attr, None) if meth is not None: return", "_NormFile(filename) # After getting the real path, let's get it", "path0.encode(sys.getfilesystemencoding()) if isinstance(path1, unicode): path1 = path1.encode(sys.getfilesystemencoding()) path0 = _fix_path(path0,", "the target machine for the paths that'll actually have breakpoints).", "return filename def _get_path_with_real_case(filename): ret = convert_to_long_pathname(convert_to_short_pathname(filename)) # This doesn't", "ind += 4 zip_path = file[:ind] inner_path = file[ind:] if", "client: %s\\n' % (translated,)) break else: if DEBUG_CLIENT_SERVER_TRANSLATION: sys.stderr.write('pydev debugger:", "if not isinstance(PATHS_FROM_ECLIPSE_TO_PYTHON, list): sys.stderr.write('Expected PATHS_FROM_ECLIPSE_TO_PYTHON loaded from environment variable", "DebugInfoHolder.DEBUG_TRACE_LEVEL > 2: traceback.print_exc() else: convert_to_long_pathname = _convert_to_long_pathname convert_to_short_pathname =", "elif IS_JYTHON and IS_WINDOWS: def get_path_with_real_case(filename): from java.io import File", "it was added by @jetbrains, but it should probably be", "r = join(normcase(zip_path), inner_path) return r r = normcase(r) return", "matching prefix for: %s in %s\\n' % \\ (translated, [x[1]", "reviewed # later on). # Note 2: it goes hand-in-hand", "loading PATHS_FROM_ECLIPSE_TO_PYTHON from environment variable.\\n') traceback.print_exc() PATHS_FROM_ECLIPSE_TO_PYTHON = [] else:", "PATHS_FROM_ECLIPSE_TO_PYTHON = [] else: # Converting json lists to tuple", "return translated norm_file_to_server = _norm_file_to_server norm_file_to_client = _norm_file_to_client setup_client_server_paths(PATHS_FROM_ECLIPSE_TO_PYTHON) def", "return NORM_PATHS_AND_BASE_CONTAINER[frame.f_code.co_filename] except: # This one is just internal (so,", "path.endswith('\\\\'): path = path[:-1] if sep != '/': path =", "sure that: - The case of a file will match", "the path in the client machine # and the 2nd", "the absolute filenames so that we can be sure that:", "= f, f, f else: abs_path, real_path = _NormPaths(f) base", "be hit). - Providing means for the user to make", "uses a case-independent filesystem and the other uses a case-dependent", "Jupyter cells return not _is_int(filename) and not filename.startswith(\"<ipython-input\") # For", "unchanged). # Make sure the drive letter is always uppercase.", "as if it was a relative path for some of", "= filename.encode(getfilesystemencoding()) return filename def _convert_to_short_pathname(filename): buf = ctypes.create_unicode_buffer(MAX_PATH) if", "ind = r.find('.egg') if ind != -1: ind += 4", "path in the server machine. # see module docstring for", "== 'WINDOWS' else '/' norm_filename_to_server_container = {} norm_filename_to_client_container = {}", "the server) @note: to enable remote debugging in the target", "{} initial_paths = list(paths) paths_from_eclipse_to_python = initial_paths[:] # Apply normcase", "result of those translations @note: the case of the paths", "filename: filename = convert_to_long_pathname(filename) filename = _os_normcase(filename) return filename.lower() else:", "machine. # see module docstring for more details. try: PATHS_FROM_ECLIPSE_TO_PYTHON", "= [] else: # Converting json lists to tuple PATHS_FROM_ECLIPSE_TO_PYTHON", "exists = os.path.exists join = os.path.join try: rPath = os.path.realpath", "('WINDOWS', 'UNIX') if prev != os: _ide_os = os #", "but it should probably be reviewed # later on). #", "format as PATHS_FROM_ECLIPSE_TO_PYTHON''' global norm_file_to_client global norm_file_to_server global _last_client_server_paths_set _last_client_server_paths_set", "GetShortPathName(filename, buf, MAX_PATH) if rv != 0 and rv <=", "translated.replace(eclipse_sep, python_sep) translated = _NormFile(translated) cache[filename] = translated return translated", "leave no-op conversions in place. if DebugInfoHolder.DEBUG_TRACE_LEVEL > 2: traceback.print_exc()", "that when going to the server, we do the replace", "for path in sys.path: mod_path = join(path, mod_name.replace('.', '/')) if", "is the path in the client machine # and the", "filesystem and the other uses a case-dependent filesystem (if the", "(if we had it inside build/bdist it can make a", "(https://github.com/fabioz/PyDev.Debugger/issues/116) os = 'WINDOWS' assert os in ('WINDOWS', 'UNIX') if", "str): filename = filename.decode(getfilesystemencoding()) rv = GetShortPathName(filename, buf, MAX_PATH) if", "!= python_sep: translated = translated.replace(eclipse_sep, python_sep) translated = _NormFile(translated) cache[filename]", "inner_path.startswith('/') or inner_path.startswith('\\\\'): inner_path = inner_path[1:] _info = zip_file_obj.getinfo(inner_path.replace('\\\\', '/'))", "docstring for more details. try: PATHS_FROM_ECLIPSE_TO_PYTHON = json.loads(os.environ.get('PATHS_FROM_ECLIPSE_TO_PYTHON', '[]')) except", "# Apply normcase to the existing paths to follow the", "by @jetbrains, but it should probably be reviewed # later", "see parameter docs on pydevd.py @note: for doing a remote", "not is_real_file(f): abs_path, real_path, base = f, f, f else:", "eclipse_sep != python_sep: # Make sure that the separators are", "if isinstance(path0, unicode): path0 = path0.encode(sys.getfilesystemencoding()) if isinstance(path1, unicode): path1", "# instead of importing any of those names to a", "= _convert_to_long_pathname convert_to_short_pathname = _convert_to_short_pathname get_path_with_real_case = _get_path_with_real_case elif IS_JYTHON", "None: for attr in (\"get_filename\", \"_get_filename\"): meth = getattr(loader, attr,", "translated return translated def _norm_file_to_client(filename, cache=norm_filename_to_client_container): # The result of", "remote debugging session in one machine and debugging in another.", "% (translated,)) translated = translated.replace(eclipse_prefix, server_prefix) if DEBUG_CLIENT_SERVER_TRANSLATION: sys.stderr.write('pydev debugger:", "replicate this by creating a file ending as # .zip!", "eclipse_sep != python_sep: translated = translated.replace(python_sep, eclipse_sep) # The resulting", "the eclipse installation) import pydevd;pydevd.settrace(host, stdoutToServer, stderrToServer, port, suspend) see", "_pydev_bundle._pydev_filesystem_encoding import getfilesystemencoding import json import os.path import sys import", "find it as if it was a relative path for", "for: %s in %s\\n' % \\ (translated, [x[0] for x", "if IS_WINDOWS: if translated.lower() != translated_proper_case.lower(): translated_proper_case = translated if", "replace first and only later do the norm file. if", "# the path is different now. translated_proper_case = get_path_with_real_case(translated) translated", "_norm_file_to_client(filename, cache=norm_filename_to_client_container): # The result of this method will be", "os because the host where the code is running may", "( translated_proper_case, translated)) for i, (eclipse_prefix, python_prefix) in enumerate(paths_from_eclipse_to_python): if", "be actually different from the client (and the point is", "and the other uses a case-dependent filesystem (if the system", "+ '!' if inner_path.startswith('/') or inner_path.startswith('\\\\'): inner_path = inner_path[1:] if", "DEBUG_CLIENT_SERVER_TRANSLATION: sys.stderr.write('pydev debugger: replacing to client: %s\\n' % (translated,)) #", "where your python process is running and the client is", "on pydevd.py @note: for doing a remote debugging session, all", "client has: c:\\my_project\\src\\package\\module1.py the PATHS_FROM_ECLIPSE_TO_PYTHON would have to be: PATHS_FROM_ECLIPSE_TO_PYTHON", "(pydev extensions in the eclipse installation) import pydevd;pydevd.settrace(host, stdoutToServer, stderrToServer,", "_convert_to_short_pathname(filename): buf = ctypes.create_unicode_buffer(MAX_PATH) if IS_PY2 and isinstance(filename, str): filename", "filename:filename if sys.platform == 'win32': try: import ctypes from ctypes.wintypes", "tuple PATHS_FROM_ECLIPSE_TO_PYTHON = [tuple(x) for x in PATHS_FROM_ECLIPSE_TO_PYTHON] # example:", "bug: http://bugs.python.org/issue1666807\\n') sys.stderr.write('-------------------------------------------------------------------------------\\n') sys.stderr.flush() NORM_SEARCH_CACHE = {} initial_norm_paths = _NormPaths", "file f returns tuple of its absolute path, real path", "I don't really know what's the real-world case for this", "if inner_path.startswith('/') or inner_path.startswith('\\\\'): inner_path = inner_path[1:] if inner_path: r", "initial_paths = list(paths) paths_from_eclipse_to_python = initial_paths[:] # Apply normcase to", "join(zip_path, inner_path) except KeyError: return None return None # Now,", "%s\\n' % \\ (translated, [x[1] for x in paths_from_eclipse_to_python])) translated", "def _norm_file_to_server(filename, cache=norm_filename_to_server_container): # Eclipse will send the passed filename", "import File f = File(filename) ret = f.getCanonicalPath() if IS_PY2", "except KeyError: if filename.__class__ != str: raise AssertionError('Paths passed to", "would have to be: PATHS_FROM_ECLIPSE_TO_PYTHON = [(r'c:\\my_project\\src', r'/user/projects/my_project/src')] alternatively, this", "= GetShortPathName(filename, buf, MAX_PATH) if rv != 0 and rv", "must be filled with the appropriate paths. @note: in this", "So, this would be 'NormFileFromEclipseToPython' try: return cache[filename] except KeyError:", "!= os: _ide_os = os # We need to (re)setup", "(filename,)) abs_path = filename real_path = filename NORM_SEARCH_CACHE[filename] = abs_path,", "ind == -1: ind = file.find('.egg') if ind != -1:", "ret # Check that it actually works _get_path_with_real_case(__file__) except: #", "be unchanged). # Make sure the drive letter is always", "if meth is not None: return meth(mod_name) return None def", "return cache[filename] except KeyError: if eclipse_sep != python_sep: # Make", "'normcase()' should be used on the paths defined in PATHS_FROM_ECLIPSE_TO_PYTHON).", "probably be reviewed # later on). # Note 2: it", "the paths is important! Note that this can be tricky", "_fix_path(path, sep): if path.endswith('/') or path.endswith('\\\\'): path = path[:-1] if", "= file.find('.zip') if ind == -1: ind = file.find('.egg') if", "translation) f = frame.f_code.co_filename if f is not None and", "This version of python seems to be incorrectly compiled (internal", "to be: PATHS_FROM_ECLIPSE_TO_PYTHON = [(r'c:\\my_project\\src', r'/user/projects/my_project/src')] alternatively, this can be", "for: %s in %s\\n' % \\ (translated, [x[1] for x", "return filename def _convert_to_short_pathname(filename): buf = ctypes.create_unicode_buffer(MAX_PATH) if IS_PY2 and", "preferences. for i, (path0, path1) in enumerate(paths_from_eclipse_to_python[:]): if IS_PY2: if", "quite work out, leave no-op conversions in place. if DebugInfoHolder.DEBUG_TRACE_LEVEL", "eclipse_sep != python_sep: translated = translated.replace(eclipse_sep, python_sep) translated = _NormFile(translated)", "uppercase. if len(ret) > 1 and ret[1] == ':' and", "Don't fail if there's something not correct here -- but", "where the code is running may be actually different from", "os.path.exists(file): return file ind = file.find('.zip') if ind == -1:", "and ret[1] == ':' and ret[0].islower(): return ret[0].upper() + ret[1:]", "= frame.f_code.co_filename if f is not None and f.startswith (('build/bdist.',", "NORM_PATHS_AND_BASE_CONTAINER[frame.f_code.co_filename] except: # This one is just internal (so, does", "not support os.path.realpath # realpath is a no-op on systems", "place. if DebugInfoHolder.DEBUG_TRACE_LEVEL > 2: traceback.print_exc() else: convert_to_long_pathname = _convert_to_long_pathname", "of the tuple is the path in the client machine", "is not in the python process, so, we cannot do", "here -- but at least print it to the user", "abs_path, real_path return abs_path, real_path except: # Don't fail if", "absolutely needed! def _norm_file_to_server(filename, cache=norm_filename_to_server_container): # Eclipse will send the", "code is running may be actually different from the client", "variable to be a list.\\n') PATHS_FROM_ECLIPSE_TO_PYTHON = [] else: #", "def _get_path_with_real_case(filename): ret = convert_to_long_pathname(convert_to_short_pathname(filename)) # This doesn't handle the", "r _ZIP_SEARCH_CACHE = {} _NOT_FOUND_SENTINEL = object() def exists(file): if", "translated)) for i, (eclipse_prefix, python_prefix) in enumerate(paths_from_eclipse_to_python): if translated.startswith(python_prefix): if", "f[:-1] elif f.endswith('$py.class'): f = f[:-len('$py.class')] + '.py' if not", "def is_real_file(filename): # Check for Jupyter cells return not _is_int(filename)", "server). :param os: 'UNIX' or 'WINDOWS' ''' global _ide_os prev", "miss breakpoints.\\n') sys.stderr.write('pydev debugger: Related bug: http://bugs.python.org/issue1666807\\n') sys.stderr.write('-------------------------------------------------------------------------------\\n') sys.stderr.flush() NORM_SEARCH_CACHE", "filename def _NormPaths(filename): try: return NORM_PATHS_CONTAINER[filename] except KeyError: if filename.__class__", "installation) import pydevd;pydevd.settrace(host, stdoutToServer, stderrToServer, port, suspend) see parameter docs", "NORM_PATHS_AND_BASE_CONTAINER = {} def _NormFile(filename): abs_path, real_path = _NormPaths(filename) return", "need to set the IDE os because the host where", "zip_path = zip_path + '!' zip_file_obj = _ZIP_SEARCH_CACHE.get(zip_path, _NOT_FOUND_SENTINEL) if", "The result of this method will be passed to eclipse", "is None: # Interpreter shutdown return f ret = get_abs_path_real_path_and_base_from_file(f)", "target machine (pydev extensions in the eclipse installation) import pydevd;pydevd.settrace(host,", "get_path_with_real_case(translated) translated = _NormFile(translated_proper_case) if IS_WINDOWS: if translated.lower() != translated_proper_case.lower():", "KeyError: abs_path, real_path = initial_norm_paths(filename) if not exists(real_path): # We", "# Interpreter shutdown return f if f is not None:", "= zip_path + '!' zip_file_obj = _ZIP_SEARCH_CACHE.get(zip_path, _NOT_FOUND_SENTINEL) if zip_file_obj", "breakpoints). ''' from _pydevd_bundle.pydevd_constants import IS_PY2, IS_PY3K, DebugInfoHolder, IS_WINDOWS, IS_JYTHON", "= ret return ret def get_abs_path_real_path_and_base_from_frame(frame): try: return NORM_PATHS_AND_BASE_CONTAINER[frame.f_code.co_filename] except:", "import getfilesystemencoding import json import os.path import sys import traceback", "try: return cache[filename] except KeyError: # used to translate a", "doing a remote debugging session, all the pydevd_ files must", "NORM_PATHS_CONTAINER = {} NORM_PATHS_AND_BASE_CONTAINER = {} def _NormFile(filename): abs_path, real_path", "environment variable to be a list.\\n') PATHS_FROM_ECLIPSE_TO_PYTHON = [] else:", "def _NormPaths(filename): try: return NORM_PATHS_CONTAINER[filename] except KeyError: if filename.__class__ !=", "except AttributeError: code = rPath.__code__ if not exists(_NormFile(code.co_filename)): sys.stderr.write('-------------------------------------------------------------------------------\\n') sys.stderr.write('pydev", "negative numbers try: int(filename) return True except: return False def", "None # Now, let's do a quick test to see", "LPWSTR, DWORD] GetLongPathName.restype = DWORD GetShortPathName = ctypes.windll.kernel32.GetShortPathNameW GetShortPathName.argtypes =", "PATHS_FROM_ECLIPSE_TO_PYTHON). @note: all the paths with breakpoints must be translated", "filename.startswith(\"<ipython-input\") # For given file f returns tuple of its", "in sys.path: abs_path, real_path = initial_norm_paths(join(path, filename)) if exists(real_path): break", "try: return cache[filename] except KeyError: cache[filename] = get_path_with_real_case(_AbsFile(filename)) return cache[filename]", "python process, so, we cannot do a _NormFile here, #", "may still function, but it will work slower and may", "os.path.join try: rPath = os.path.realpath # @UndefinedVariable except: # jython", "zipfile zip_file_obj = zipfile.ZipFile(zip_path, 'r') _ZIP_SEARCH_CACHE[zip_path] = zip_file_obj except: _ZIP_SEARCH_CACHE[zip_path]", "returns tuple of its absolute path, real path and base", "# the real case and then obtain a new normalized", "debugging in another. To do that, the PATHS_FROM_ECLIPSE_TO_PYTHON constant must", "IS_PY2 and isinstance(filename, str): filename = filename.decode(getfilesystemencoding()) rv = GetLongPathName(filename,", "traceback.print_exc() # Note: as these functions may be rebound, users", "= _fix_path(path1, python_sep) initial_paths[i] = (path0, path1) paths_from_eclipse_to_python[i] = (normcase(path0),", "= filename.replace(python_sep, eclipse_sep) # used to translate a path from", "abs_path, real_path, base NORM_PATHS_AND_BASE_CONTAINER[f] = ret return ret def get_abs_path_real_path_and_base_from_frame(frame):", "a relative path for some of the paths in the", "context, the server is where your python process is running", "real_path = initial_norm_paths(join(path, filename)) if exists(real_path): break else: sys.stderr.write('pydev debugger:", "= translated_proper_case if eclipse_sep != python_sep: translated = translated.replace(python_sep, eclipse_sep)", "translated return translated norm_file_to_server = _norm_file_to_server norm_file_to_client = _norm_file_to_client setup_client_server_paths(PATHS_FROM_ECLIPSE_TO_PYTHON)", "'NormFileFromPythonToEclipse' try: return cache[filename] except KeyError: # used to translate", "_ZIP_SEARCH_CACHE[zip_path] = zip_file_obj except: _ZIP_SEARCH_CACHE[zip_path] = _NOT_FOUND_SENTINEL return False try:", "is that we want the proper paths to translate from", "it will work slower and may miss breakpoints.\\n') sys.stderr.write('pydev debugger:", "filename = filename.decode(getfilesystemencoding()) rv = GetShortPathName(filename, buf, MAX_PATH) if rv", "server: %s\\n' % (translated,)) break else: if DEBUG_CLIENT_SERVER_TRANSLATION: sys.stderr.write('pydev debugger:", "Apparently PyCharm uses 'WIN' (https://github.com/fabioz/PyDev.Debugger/issues/116) os = 'WINDOWS' assert os", "be filled with the appropriate paths. @note: in this context,", "a _NormFile here, # only at the beginning of this", "the existing paths to follow the os preferences. for i,", "break else: if DEBUG_CLIENT_SERVER_TRANSLATION: sys.stderr.write('pydev debugger: to client: unable to", "= initial_norm_paths(join(path, filename)) if exists(real_path): break else: sys.stderr.write('pydev debugger: Unable", "set with an environment variable from the command line: set", "norm_file_to_server = _original_file_to_server def _fix_path(path, sep): if path.endswith('/') or path.endswith('\\\\'):", "meth = getattr(loader, attr, None) if meth is not None:", "port, suspend) see parameter docs on pydevd.py @note: for doing", "x in paths_from_eclipse_to_python])) # Note that when going to the", "eclipse_prefix, server_prefix in paths_from_eclipse_to_python: if translated.startswith(eclipse_prefix): if DEBUG_CLIENT_SERVER_TRANSLATION: sys.stderr.write('pydev debugger:", "= eclipse_prefix + translated_proper_case[len(python_prefix):] if DEBUG_CLIENT_SERVER_TRANSLATION: sys.stderr.write('pydev debugger: sent to", "machine # and the 2nd element is the path in", "+ translated_proper_case[len(python_prefix):] if DEBUG_CLIENT_SERVER_TRANSLATION: sys.stderr.write('pydev debugger: sent to client: %s\\n'", "the paths defined in PATHS_FROM_ECLIPSE_TO_PYTHON). @note: all the paths with", "structure /user/projects/my_project/src/package/module1.py and the client has: c:\\my_project\\src\\package\\module1.py the PATHS_FROM_ECLIPSE_TO_PYTHON would", "needed! def _norm_file_to_server(filename, cache=norm_filename_to_server_container): # Eclipse will send the passed", "one is just internal (so, does not need any kind", "support rPath = os.path.abspath # defined as a list of", "where the 1st element of the tuple is the path", "python_sep) translated = _NormFile(translated) cache[filename] = translated return translated def", "not isinstance(PATHS_FROM_ECLIPSE_TO_PYTHON, list): sys.stderr.write('Expected PATHS_FROM_ECLIPSE_TO_PYTHON loaded from environment variable to", "'/': path = path.replace('/', sep) return path _last_client_server_paths_set = []", "join = os.path.join try: rPath = os.path.realpath # @UndefinedVariable except:", "% (filename, type(filename))) abs_path = _NormPath(filename, os.path.abspath) real_path = _NormPath(filename,", "import json import os.path import sys import traceback _os_normcase =", "_NOT_FOUND_SENTINEL: try: import zipfile zip_file_obj = zipfile.ZipFile(zip_path, 'r') _ZIP_SEARCH_CACHE[zip_path] =", "LPWSTR, DWORD] GetShortPathName.restype = DWORD def _convert_to_long_pathname(filename): buf = ctypes.create_unicode_buffer(MAX_PATH)", "problems # related to the names generated... try: try: code", "actually different from the client (and the point is that", "the pydevd_ files must be on the server accessible through", "WARNING: This version of python seems to be incorrectly compiled", "meth is not None: return meth(mod_name) return None def get_package_dir(mod_name):", "PATHS_FROM_ECLIPSE_TO_PYTHON''' global norm_file_to_client global norm_file_to_server global _last_client_server_paths_set _last_client_server_paths_set = paths[:]", "norm_file_to_client global norm_file_to_server global _last_client_server_paths_set _last_client_server_paths_set = paths[:] # Work", "= {} initial_paths = list(paths) paths_from_eclipse_to_python = initial_paths[:] # Apply", "exists(real_path): # We must actually go on and check if", "inner_path) return r r = normcase(r) return r _ZIP_SEARCH_CACHE =", "= join(path, mod_name.replace('.', '/')) if os.path.isdir(mod_path): return mod_path return None", "This one is just internal (so, does not need any", ".egg!, I don't really know what's the real-world case for", "= convert_to_long_pathname(filename) filename = _os_normcase(filename) return filename.lower() else: def normcase(filename):", "are what we expect from the IDE. filename = filename.replace(python_sep,", "session, all the pydevd_ files must be on the server", "function, but it will work slower and may miss breakpoints.\\n')", "''' We need to set the IDE os because the", "= normcase(r) return r _ZIP_SEARCH_CACHE = {} _NOT_FOUND_SENTINEL = object()", "uses 'WIN' (https://github.com/fabioz/PyDev.Debugger/issues/116) os = 'WINDOWS' assert os in ('WINDOWS',", "fast access later NORM_PATHS_CONTAINER[filename] = abs_path, real_path return abs_path, real_path", "letter properly (it'll be unchanged). # Make sure the drive", "False def is_real_file(filename): # Check for Jupyter cells return not", "-- but at least print it to the user so", "to work with paths that may be incorrect try: return", "try: loader = pkgutil.get_loader(mod_name) except: return None if loader is", "= [(r'c:\\my_project\\src', r'/user/projects/my_project/src')] alternatively, this can be set with an", "None if loader is not None: for attr in (\"get_filename\",", "x in PATHS_FROM_ECLIPSE_TO_PYTHON] # example: # PATHS_FROM_ECLIPSE_TO_PYTHON = [ #", "of client-server translation) f = frame.f_code.co_filename if f is not", "= ctypes.windll.kernel32.GetShortPathNameW GetShortPathName.argtypes = [LPCWSTR, LPWSTR, DWORD] GetShortPathName.restype = DWORD", "the proper paths to translate from the client to the", "False elif zip_file_obj is _NOT_FOUND_SENTINEL: try: import zipfile zip_file_obj =", "real_path = _NormPaths(filename) return abs_path # Returns tuple of absolute", "DWORD] GetShortPathName.restype = DWORD def _convert_to_long_pathname(filename): buf = ctypes.create_unicode_buffer(MAX_PATH) if", "rPath) # cache it for fast access later NORM_PATHS_CONTAINER[filename] =", "Related bug: http://bugs.python.org/issue1666807\\n') sys.stderr.write('-------------------------------------------------------------------------------\\n') sys.stderr.flush() NORM_SEARCH_CACHE = {} initial_norm_paths =", "filename.encode(getfilesystemencoding()) return filename def _get_path_with_real_case(filename): ret = convert_to_long_pathname(convert_to_short_pathname(filename)) # This", "return ret # Check that it actually works _get_path_with_real_case(__file__) except:", "= _NOT_FOUND_SENTINEL return False try: if inner_path.startswith('/') or inner_path.startswith('\\\\'): inner_path", "f.startswith (('build/bdist.', 'build\\\\bdist.')): # files from eggs in Python 2.7", "unicode): path0 = path0.encode(sys.getfilesystemencoding()) if isinstance(path1, unicode): path1 = path1.encode(sys.getfilesystemencoding())", "if we can find it as if it was a", "return abs_path # Returns tuple of absolute path and real", "+ ret[1:] return ret # Check that it actually works", "ret[1] == ':' and ret[0].islower(): return ret[0].upper() + ret[1:] return", "= _NormPaths(filename) return real_path def _AbsFile(filename): abs_path, real_path = _NormPaths(filename)", "try: code = rPath.func_code except AttributeError: code = rPath.__code__ if", "None: # Interpreter shutdown return f if f is not", "to the server). :param os: 'UNIX' or 'WINDOWS' ''' global", "# Apparently PyCharm uses 'WIN' (https://github.com/fabioz/PyDev.Debugger/issues/116) os = 'WINDOWS' assert", "used to translate a path from the debug server to", "passed to eclipse # So, this would be 'NormFileFromPythonToEclipse' try:", "python_sep: translated = translated.replace(eclipse_sep, python_sep) translated = _NormFile(translated) cache[filename] =", "lists to tuple PATHS_FROM_ECLIPSE_TO_PYTHON = [tuple(x) for x in PATHS_FROM_ECLIPSE_TO_PYTHON]", "(internal generated filenames are not absolute)\\n') sys.stderr.write('pydev debugger: The debugger", "== ':' and ret[0].islower(): return ret[0].upper() + ret[1:] return ret", "used to translate a path from the client to the", "@UndefinedVariable except: # jython does not support os.path.realpath # realpath", "IS_WINDOWS, IS_JYTHON from _pydev_bundle._pydev_filesystem_encoding import getfilesystemencoding import json import os.path", "islink support rPath = os.path.abspath # defined as a list", "# and the 2nd element is the path in the", "if _ide_os == 'WINDOWS' else '/' norm_filename_to_server_container = {} norm_filename_to_client_container", "_fix_path(path0, eclipse_sep) path1 = _fix_path(path1, python_sep) initial_paths[i] = (path0, path1)", "_NormPaths def _NormPaths(filename): # Let's redefine _NormPaths to work with", "2: it goes hand-in-hand with '_NormPath'. inner_path = inner_path[1:] zip_path", "for x in paths_from_eclipse_to_python])) # Note that when going to", "eggs in Python 2.7 have paths like build/bdist.linux-x86_64/egg/<path-inside-egg> f =", "match the actual file in the filesystem (otherwise breakpoints won't", "path is not in the python process, so, we cannot", "_NormPaths(filename): # Let's redefine _NormPaths to work with paths that", "PATHS_FROM_ECLIPSE_TO_PYTHON = [ # (r'd:\\temp\\temp_workspace_2\\test_python\\src\\yyy\\yyy', # r'd:\\temp\\temp_workspace_2\\test_python\\src\\hhh\\xxx') # ] convert_to_long_pathname", "on the target machine for the paths that'll actually have", "%s in %s\\n' % \\ (translated, [x[0] for x in", "paths_from_eclipse_to_python: if translated.startswith(eclipse_prefix): if DEBUG_CLIENT_SERVER_TRANSLATION: sys.stderr.write('pydev debugger: replacing to server:", "norm file. if eclipse_sep != python_sep: translated = translated.replace(eclipse_sep, python_sep)", "os.path.realpath # @UndefinedVariable except: # jython does not support os.path.realpath", "Note (fabioz): although I can replicate this by creating a", "the code is running may be actually different from the", "= [] def setup_client_server_paths(paths): '''paths is the same format as", "_last_client_server_paths_set = [] def setup_client_server_paths(paths): '''paths is the same format", "doesn't support negative numbers try: int(filename) return True except: return", "= path.replace('/', sep) return path _last_client_server_paths_set = [] def setup_client_server_paths(paths):", "or path.endswith('\\\\'): path = path[:-1] if sep != '/': path", "return real_path def _AbsFile(filename): abs_path, real_path = _NormPaths(filename) return abs_path", "ind == -1: ind = r.find('.egg') if ind != -1:", "PATHS_FROM_ECLIPSE_TO_PYTHON constant must be filled with the appropriate paths. @note:", "= frame.f_globals['__file__'] if get_abs_path_real_path_and_base_from_file is None: # Interpreter shutdown return", "PATHS_FROM_ECLIPSE_TO_PYTHON=[['c:\\my_project\\src','/user/projects/my_project/src']] @note: DEBUG_CLIENT_SERVER_TRANSLATION can be set to True to debug", "filename NORM_SEARCH_CACHE[filename] = abs_path, real_path return abs_path, real_path except: #", "-1: ind = file.find('.egg') if ind != -1: ind +=", "set the IDE os because the host where the code", "in ('1', 'true') # Caches filled as requested during the", "to _NormPaths must be str. Found: %s (%s)' % (filename,", "cache[filename] _original_file_to_server = _NormFile norm_file_to_client = _original_file_to_client norm_file_to_server = _original_file_to_server", "if exists(real_path): break else: sys.stderr.write('pydev debugger: Unable to find real", "client machine # and the 2nd element is the path", "norm_file_to_server global _last_client_server_paths_set _last_client_server_paths_set = paths[:] # Work on the", "r''' This module provides utilities to get the absolute filenames", "tricky to get right when one machine uses a case-independent", "CRITICAL WARNING: This version of python seems to be incorrectly", "= _fix_path(path0, eclipse_sep) path1 = _fix_path(path1, python_sep) initial_paths[i] = (path0,", "from the debug server to the client translated = _NormFile(filename)", "sys.stderr.write('Error loading PATHS_FROM_ECLIPSE_TO_PYTHON from environment variable.\\n') traceback.print_exc() PATHS_FROM_ECLIPSE_TO_PYTHON = []", "list): sys.stderr.write('Expected PATHS_FROM_ECLIPSE_TO_PYTHON loaded from environment variable to be a", "debugger may still function, but it will work slower and", "translation step needed (just inline the calls) norm_file_to_client = _original_file_to_client", "be tricky to get right when one machine uses a", "AttributeError: code = rPath.__code__ if not exists(_NormFile(code.co_filename)): sys.stderr.write('-------------------------------------------------------------------------------\\n') sys.stderr.write('pydev debugger:", "if filename.__class__ != str: raise AssertionError('Paths passed to _NormPaths must", "in sys.path: mod_path = join(path, mod_name.replace('.', '/')) if os.path.isdir(mod_path): return", "DEBUG_CLIENT_SERVER_TRANSLATION = os.environ.get('DEBUG_PYDEVD_PATHS_TRANSLATION', 'False').lower() in ('1', 'true') # Caches filled", "IS_WINDOWS else 'UNIX' def set_ide_os(os): ''' We need to set", "NORM_SEARCH_CACHE[filename] = abs_path, real_path return abs_path, real_path except: # Don't", "had it inside build/bdist it can make a difference). NORM_PATHS_AND_BASE_CONTAINER[frame.f_code.co_filename]", "global norm_file_to_client global norm_file_to_server global _last_client_server_paths_set _last_client_server_paths_set = paths[:] #", "if '~' in filename: filename = convert_to_long_pathname(filename) filename = _os_normcase(filename)", "sys.stderr.write('pydev debugger: sent to server: %s\\n' % (translated,)) break else:", "those translations @note: the case of the paths is important!", "for more details. try: PATHS_FROM_ECLIPSE_TO_PYTHON = json.loads(os.environ.get('PATHS_FROM_ECLIPSE_TO_PYTHON', '[]')) except Exception:", "of those names to a given scope. def _original_file_to_client(filename, cache={}):", "so that we can correct that traceback.print_exc() # Note: as", "translated (otherwise they won't be found in the server) @note:", "else: def normcase(filename): # `normcase` doesn't lower case on Python", "the system being debugged is case-independent, 'normcase()' should be used", "if not exists(_NormFile(code.co_filename)): sys.stderr.write('-------------------------------------------------------------------------------\\n') sys.stderr.write('pydev debugger: CRITICAL WARNING: This version", "def get_abs_path_real_path_and_base_from_file(f): try: return NORM_PATHS_AND_BASE_CONTAINER[f] except: if _NormPaths is None:", "the non-normalized version. eclipse_prefix = initial_paths[i][0] translated = eclipse_prefix +", "without islink support rPath = os.path.abspath # defined as a", "with the appropriate paths. @note: in this context, the server", "= 'WINDOWS' assert os in ('WINDOWS', 'UNIX') if prev !=", "for some of the paths in the pythonpath for path", "abs_path, real_path = _NormPaths(f) base = basename(real_path) ret = abs_path,", "filename = buf.value if IS_PY2: filename = filename.encode(getfilesystemencoding()) return filename", "we cannot do a _NormFile here, # only at the", "pydevd_file_utils.norm_file_to_server # # instead of importing any of those names", "# see module docstring for more details. try: PATHS_FROM_ECLIPSE_TO_PYTHON =", "the client <-> server translation works to provide proper separators.", "follow the os preferences. for i, (path0, path1) in enumerate(paths_from_eclipse_to_python[:]):", "_get_path_with_real_case(__file__) except: # Something didn't quite work out, leave no-op", "loader is not None: for attr in (\"get_filename\", \"_get_filename\"): meth", "this can be tricky to get right when one machine", "initial_paths[i] = (path0, path1) paths_from_eclipse_to_python[i] = (normcase(path0), normcase(path1)) if not", "cache based on the frame.f_code.co_filename (if we had it inside", "%s\\n' % (translated,)) # Note: use the non-normalized version. eclipse_prefix", "'UNIX') if prev != os: _ide_os = os # We", "KeyError: if eclipse_sep != python_sep: # Make sure that the", "= lambda filename:filename if sys.platform == 'win32': try: import ctypes", "PYTHONPATH (and the PATHS_FROM_ECLIPSE_TO_PYTHON only needs to be set on", "f[:-len('$py.class')] + '.py' if not is_real_file(f): abs_path, real_path, base =", "'\\\\' if IS_WINDOWS else '/' eclipse_sep = '\\\\' if _ide_os", "= lambda filename:filename get_path_with_real_case = lambda filename:filename if sys.platform ==", "the point is that we want the proper paths to", "pkgutil else: from _pydev_imps import _pydev_pkgutil_old as pkgutil try: loader", "f.endswith('.pyc'): f = f[:-1] elif f.endswith('$py.class'): f = f[:-len('$py.class')] +", "extensions in the eclipse installation) import pydevd;pydevd.settrace(host, stdoutToServer, stderrToServer, port,", "inner_path = inner_path[1:] if inner_path: r = join(normcase(zip_path), inner_path) return", "to the python process # So, this would be 'NormFileFromEclipseToPython'", "if get_abs_path_real_path_and_base_from_file is None: # Interpreter shutdown return f ret", "= ret return ret def get_fullname(mod_name): if IS_PY3K: import pkgutil", "debugging session in one machine and debugging in another. To", "filename = filename.encode(getfilesystemencoding()) return filename def _get_path_with_real_case(filename): ret = convert_to_long_pathname(convert_to_short_pathname(filename))", "of absolute path and real path for given filename def", "prefix for: %s in %s\\n' % \\ (translated, [x[1] for", "else: abs_path, real_path = _NormPaths(f) base = basename(real_path) ret =", "IS_WINDOWS: if translated.lower() != translated_proper_case.lower(): translated_proper_case = translated if DEBUG_CLIENT_SERVER_TRANSLATION:", "debugged is case-independent, 'normcase()' should be used on the paths", "filename = filename.encode(getfilesystemencoding()) return filename def _convert_to_short_pathname(filename): buf = ctypes.create_unicode_buffer(MAX_PATH)", "abs_path # Returns tuple of absolute path and real path", "abs_path, real_path def _NormPath(filename, normpath): r = normpath(filename) ind =", "lambda filename:filename if sys.platform == 'win32': try: import ctypes from", "one machine and debugging in another. To do that, the", "rPath = os.path.realpath # @UndefinedVariable except: # jython does not", "rPath.__code__ if not exists(_NormFile(code.co_filename)): sys.stderr.write('-------------------------------------------------------------------------------\\n') sys.stderr.write('pydev debugger: CRITICAL WARNING: This", "we had it inside build/bdist it can make a difference).", "base name def get_abs_path_real_path_and_base_from_file(f): try: return NORM_PATHS_AND_BASE_CONTAINER[f] except: if _NormPaths", "breakpoints won't be hit). - Providing means for the user", "_norm_file_to_client setup_client_server_paths(PATHS_FROM_ECLIPSE_TO_PYTHON) def _is_int(filename): # isdigit() doesn't support negative numbers", "appropriate paths. @note: in this context, the server is where", "None: return meth(mod_name) return None def get_package_dir(mod_name): for path in", "= json.loads(os.environ.get('PATHS_FROM_ECLIPSE_TO_PYTHON', '[]')) except Exception: sys.stderr.write('Error loading PATHS_FROM_ECLIPSE_TO_PYTHON from environment", "server to the client translated = _NormFile(filename) # After getting", "instead of importing any of those names to a given", "properly (it'll be unchanged). # Make sure the drive letter", "during the debug session. NORM_PATHS_CONTAINER = {} NORM_PATHS_AND_BASE_CONTAINER = {}", "return ret[0].upper() + ret[1:] return ret # Check that it", "Apply normcase to the existing paths to follow the os", "on the paths defined in PATHS_FROM_ECLIPSE_TO_PYTHON). @note: all the paths", "how the client <-> server translation works to provide proper", "-1: ind += 4 zip_path = file[:ind] inner_path = file[ind:]", "DEBUG_CLIENT_SERVER_TRANSLATION: sys.stderr.write('pydev debugger: to server: unable to find matching prefix", "it, so we should do it manually. if '~' in", "path = path[:-1] if sep != '/': path = path.replace('/',", "if translated.lower() != translated_proper_case.lower(): translated_proper_case = translated if DEBUG_CLIENT_SERVER_TRANSLATION: sys.stderr.write(", "attr in (\"get_filename\", \"_get_filename\"): meth = getattr(loader, attr, None) if", "in (\"get_filename\", \"_get_filename\"): meth = getattr(loader, attr, None) if meth", "it manually. if '~' in filename: filename = convert_to_long_pathname(filename) filename", "importing any of those names to a given scope. def", "server translation works to provide proper separators. setup_client_server_paths(_last_client_server_paths_set) DEBUG_CLIENT_SERVER_TRANSLATION =", "has: c:\\my_project\\src\\package\\module1.py the PATHS_FROM_ECLIPSE_TO_PYTHON would have to be: PATHS_FROM_ECLIPSE_TO_PYTHON =", "= _NormFile norm_file_to_client = _original_file_to_client norm_file_to_server = _original_file_to_server def _fix_path(path,", "method will be passed to eclipse # So, this would", "try: return NORM_SEARCH_CACHE[filename] except KeyError: abs_path, real_path = initial_norm_paths(filename) if", "a case-dependent filesystem (if the system being debugged is case-independent,", "be incorrect try: return NORM_SEARCH_CACHE[filename] except KeyError: abs_path, real_path =", "_NormPath(filename, os.path.abspath) real_path = _NormPath(filename, rPath) # cache it for", "PATHS_FROM_ECLIPSE_TO_PYTHON would have to be: PATHS_FROM_ECLIPSE_TO_PYTHON = [(r'c:\\my_project\\src', r'/user/projects/my_project/src')] alternatively,", "set PATHS_FROM_ECLIPSE_TO_PYTHON=[['c:\\my_project\\src','/user/projects/my_project/src']] @note: DEBUG_CLIENT_SERVER_TRANSLATION can be set to True to", "with the path with # the real case and then", "result of this method will be passed to eclipse #", "'WINDOWS' if IS_WINDOWS else 'UNIX' def set_ide_os(os): ''' We need", "the 2nd element is the path in the server machine.", "process # So, this would be 'NormFileFromEclipseToPython' try: return cache[filename]", "rv <= MAX_PATH: filename = buf.value if IS_PY2: filename =", "difference). NORM_PATHS_AND_BASE_CONTAINER[frame.f_code.co_filename] = ret return ret def get_fullname(mod_name): if IS_PY3K:", "and check if we can find it as if it", "abs_path = filename real_path = filename NORM_SEARCH_CACHE[filename] = abs_path, real_path", "debug server to the client translated = _NormFile(filename) # After", "translated_proper_case[len(python_prefix):] if DEBUG_CLIENT_SERVER_TRANSLATION: sys.stderr.write('pydev debugger: sent to client: %s\\n' %", "translated = _NormFile(translated_proper_case) if IS_WINDOWS: if translated.lower() != translated_proper_case.lower(): translated_proper_case", "debugging session, all the pydevd_ files must be on the", "so that we can be sure that: - The case", "DEBUG_CLIENT_SERVER_TRANSLATION: sys.stderr.write( 'pydev debugger: _NormFile changed path (from: %s to", "except KeyError: abs_path, real_path = initial_norm_paths(filename) if not exists(real_path): #", "can replicate this by creating a file ending as #", "convert_to_long_pathname = _convert_to_long_pathname convert_to_short_pathname = _convert_to_short_pathname get_path_with_real_case = _get_path_with_real_case elif", "os in ('WINDOWS', 'UNIX') if prev != os: _ide_os =", "in this context, the server is where your python process", "If the server (your python process) has the structure /user/projects/my_project/src/package/module1.py", "should probably be reviewed # later on). # Note 2:", "is not None: return meth(mod_name) return None def get_package_dir(mod_name): for", "to find matching prefix for: %s in %s\\n' % \\", "realpath is a no-op on systems without islink support rPath", "MAX_PATH, LPCWSTR, LPWSTR, DWORD GetLongPathName = ctypes.windll.kernel32.GetLongPathNameW GetLongPathName.argtypes = [LPCWSTR,", "File(filename) ret = f.getCanonicalPath() if IS_PY2 and not isinstance(ret, str):", "do a _NormFile here, # only at the beginning of", "find matching prefix for: %s in %s\\n' % \\ (translated,", "but it will work slower and may miss breakpoints.\\n') sys.stderr.write('pydev", "We need to (re)setup how the client <-> server translation", "must be translated (otherwise they won't be found in the", "2 for non-English locale, but Java # side does it,", "is different now. translated_proper_case = get_path_with_real_case(translated) translated = _NormFile(translated_proper_case) if", "we want the proper paths to translate from the client", "lambda filename:filename get_path_with_real_case = lambda filename:filename if sys.platform == 'win32':", "(still kept as it was added by @jetbrains, but it", "it with the path with # the real case and", "cache=norm_filename_to_server_container): # Eclipse will send the passed filename to be", "% (translated,)) break else: if DEBUG_CLIENT_SERVER_TRANSLATION: sys.stderr.write('pydev debugger: to client:", "None: if f.endswith('.pyc'): f = f[:-1] elif f.endswith('$py.class'): f =", "translated if DEBUG_CLIENT_SERVER_TRANSLATION: sys.stderr.write( 'pydev debugger: _NormFile changed path (from:", "debugger: _NormFile changed path (from: %s to %s)\\n' % (", "filename.replace(python_sep, eclipse_sep) # used to translate a path from the", "sys.stderr.write('pydev debugger: The debugger may still function, but it will", "path1) paths_from_eclipse_to_python[i] = (normcase(path0), normcase(path1)) if not paths_from_eclipse_to_python: # no", "filled as requested during the debug session. NORM_PATHS_CONTAINER = {}", "python process # So, this would be 'NormFileFromEclipseToPython' try: return", "setup_client_server_paths(_last_client_server_paths_set) DEBUG_CLIENT_SERVER_TRANSLATION = os.environ.get('DEBUG_PYDEVD_PATHS_TRANSLATION', 'False').lower() in ('1', 'true') # Caches", "return join(zip_path, inner_path) except KeyError: return None return None #", "if IS_WINDOWS else '/' eclipse_sep = '\\\\' if _ide_os ==", "= initial_norm_paths(filename) if not exists(real_path): # We must actually go", "norm_file_to_server = _norm_file_to_server norm_file_to_client = _norm_file_to_client setup_client_server_paths(PATHS_FROM_ECLIPSE_TO_PYTHON) def _is_int(filename): #", "can be tricky to get right when one machine uses", "get the absolute filenames so that we can be sure", "will send the passed filename to be translated to the", "of the paths in the pythonpath for path in sys.path:", "get_abs_path_real_path_and_base_from_file(f): try: return NORM_PATHS_AND_BASE_CONTAINER[f] except: if _NormPaths is None: #", "= {} def _NormFile(filename): abs_path, real_path = _NormPaths(filename) return real_path", "send the passed filename to be translated to the python", "for this # (still kept as it was added by", "to the client translated = _NormFile(filename) # After getting the", "other uses a case-dependent filesystem (if the system being debugged", "= rPath.func_code except AttributeError: code = rPath.__code__ if not exists(_NormFile(code.co_filename)):", "sys.stderr.write('pydev debugger: CRITICAL WARNING: This version of python seems to", "[] else: # Converting json lists to tuple PATHS_FROM_ECLIPSE_TO_PYTHON =", "should be used on the paths defined in PATHS_FROM_ECLIPSE_TO_PYTHON). @note:", "real_path return abs_path, real_path except: # Don't fail if there's", "path1 = path1.encode(sys.getfilesystemencoding()) path0 = _fix_path(path0, eclipse_sep) path1 = _fix_path(path1,", "filesystem (otherwise breakpoints won't be hit). - Providing means for", "user so that we can correct that traceback.print_exc() # Note:", "server: unable to find matching prefix for: %s in %s\\n'", "return abs_path, real_path def _NormPath(filename, normpath): r = normpath(filename) ind", "(eclipse_prefix, python_prefix) in enumerate(paths_from_eclipse_to_python): if translated.startswith(python_prefix): if DEBUG_CLIENT_SERVER_TRANSLATION: sys.stderr.write('pydev debugger:", "path _last_client_server_paths_set = [] def setup_client_server_paths(paths): '''paths is the same", "the paths in the pythonpath for path in sys.path: abs_path,", "= os.path.abspath # defined as a list of tuples where", "LPCWSTR, LPWSTR, DWORD GetLongPathName = ctypes.windll.kernel32.GetLongPathNameW GetLongPathName.argtypes = [LPCWSTR, LPWSTR,", "build/bdist it can make a difference). NORM_PATHS_AND_BASE_CONTAINER[frame.f_code.co_filename] = ret return", "debugger: Related bug: http://bugs.python.org/issue1666807\\n') sys.stderr.write('-------------------------------------------------------------------------------\\n') sys.stderr.flush() NORM_SEARCH_CACHE = {} initial_norm_paths", "set_ide_os(os): ''' We need to set the IDE os because", "= get_abs_path_real_path_and_base_from_file(f) # Also cache based on the frame.f_code.co_filename (if", "and real path for given filename def _NormPaths(filename): try: return", "it actually works _get_path_with_real_case(__file__) except: # Something didn't quite work", "_ide_os = 'WINDOWS' if IS_WINDOWS else 'UNIX' def set_ide_os(os): '''", "want the proper paths to translate from the client to", "We need to set the IDE os because the host", "_ide_os = os # We need to (re)setup how the", "= list(paths) paths_from_eclipse_to_python = initial_paths[:] # Apply normcase to the", "paths to translate from the client to the server). :param", "the server (your python process) has the structure /user/projects/my_project/src/package/module1.py and", "in the pythonpath for path in sys.path: abs_path, real_path =", "r'd:\\temp\\temp_workspace_2\\test_python\\src\\hhh\\xxx') # ] convert_to_long_pathname = lambda filename:filename convert_to_short_pathname = lambda", "that we want the proper paths to translate from the", "a file will match the actual file in the filesystem", "real_path, base NORM_PATHS_AND_BASE_CONTAINER[f] = ret return ret def get_abs_path_real_path_and_base_from_frame(frame): try:", "elif f.endswith('$py.class'): f = f[:-len('$py.class')] + '.py' if not is_real_file(f):", "we're working with a version of python that has no", "KeyError: return None return None # Now, let's do a", "if prev != os: _ide_os = os # We need", "os.path.normcase basename = os.path.basename exists = os.path.exists join = os.path.join", "# Make sure the drive letter is always uppercase. if", "= [ # (r'd:\\temp\\temp_workspace_2\\test_python\\src\\yyy\\yyy', # r'd:\\temp\\temp_workspace_2\\test_python\\src\\hhh\\xxx') # ] convert_to_long_pathname =", "return False elif zip_file_obj is _NOT_FOUND_SENTINEL: try: import zipfile zip_file_obj", "!= -1: ind += 4 zip_path = r[:ind] inner_path =", "return translated def _norm_file_to_client(filename, cache=norm_filename_to_client_container): # The result of this", "= (normcase(path0), normcase(path1)) if not paths_from_eclipse_to_python: # no translation step", "frame.f_globals['__file__'] if get_abs_path_real_path_and_base_from_file is None: # Interpreter shutdown return f", "we should do it manually. if '~' in filename: filename", "# pydevd_file_utils.norm_file_to_client # pydevd_file_utils.norm_file_to_server # # instead of importing any", "# # instead of importing any of those names to", "normpath): r = normpath(filename) ind = r.find('.zip') if ind ==", "server accessible through the PYTHONPATH (and the PATHS_FROM_ECLIPSE_TO_PYTHON only needs", "always import # pydevd_file_utils and then use: # # pydevd_file_utils.norm_file_to_client", "'WINDOWS' assert os in ('WINDOWS', 'UNIX') if prev != os:", "# PATHS_FROM_ECLIPSE_TO_PYTHON = [ # (r'd:\\temp\\temp_workspace_2\\test_python\\src\\yyy\\yyy', # r'd:\\temp\\temp_workspace_2\\test_python\\src\\hhh\\xxx') # ]", "except KeyError: cache[filename] = get_path_with_real_case(_AbsFile(filename)) return cache[filename] _original_file_to_server = _NormFile", "if translated.startswith(python_prefix): if DEBUG_CLIENT_SERVER_TRANSLATION: sys.stderr.write('pydev debugger: replacing to client: %s\\n'", "on the client and server slashes. python_sep = '\\\\' if", "path1.encode(sys.getfilesystemencoding()) path0 = _fix_path(path0, eclipse_sep) path1 = _fix_path(path1, python_sep) initial_paths[i]", "For given file f returns tuple of its absolute path,", "MAX_PATH: filename = buf.value if IS_PY2: filename = filename.encode(getfilesystemencoding()) return", "type(filename))) abs_path = _NormPath(filename, os.path.abspath) real_path = _NormPath(filename, rPath) #", "seems to be incorrectly compiled (internal generated filenames are not", "is the path in the server machine. # see module", "return r r = normcase(r) return r _ZIP_SEARCH_CACHE = {}", "if rv != 0 and rv <= MAX_PATH: filename =", "GetLongPathName.restype = DWORD GetShortPathName = ctypes.windll.kernel32.GetShortPathNameW GetShortPathName.argtypes = [LPCWSTR, LPWSTR,", "str. Found: %s (%s)' % (filename, type(filename))) abs_path = _NormPath(filename,", "manually. if '~' in filename: filename = convert_to_long_pathname(filename) filename =", "set to True to debug the result of those translations", "for Jupyter cells return not _is_int(filename) and not filename.startswith(\"<ipython-input\") #", "need to (re)setup how the client <-> server translation works", "@note: to enable remote debugging in the target machine (pydev", "the structure /user/projects/my_project/src/package/module1.py and the client has: c:\\my_project\\src\\package\\module1.py the PATHS_FROM_ECLIPSE_TO_PYTHON", "real path for given filename def _NormPaths(filename): try: return NORM_PATHS_CONTAINER[filename]", "!= python_sep: # Make sure that the separators are what", "_get_path_with_real_case(filename): ret = convert_to_long_pathname(convert_to_short_pathname(filename)) # This doesn't handle the drive", "to provide proper separators. setup_client_server_paths(_last_client_server_paths_set) DEBUG_CLIENT_SERVER_TRANSLATION = os.environ.get('DEBUG_PYDEVD_PATHS_TRANSLATION', 'False').lower() in", "# cache it for fast access later NORM_PATHS_CONTAINER[filename] = abs_path,", "that'll actually have breakpoints). ''' from _pydevd_bundle.pydevd_constants import IS_PY2, IS_PY3K,", "return NORM_PATHS_AND_BASE_CONTAINER[f] except: if _NormPaths is None: # Interpreter shutdown", "loaded from environment variable to be a list.\\n') PATHS_FROM_ECLIPSE_TO_PYTHON =", "paths defined in PATHS_FROM_ECLIPSE_TO_PYTHON). @note: all the paths with breakpoints", "= [tuple(x) for x in PATHS_FROM_ECLIPSE_TO_PYTHON] # example: # PATHS_FROM_ECLIPSE_TO_PYTHON", "uses a case-dependent filesystem (if the system being debugged is", "inner_path.startswith('\\\\'): inner_path = inner_path[1:] _info = zip_file_obj.getinfo(inner_path.replace('\\\\', '/')) return join(zip_path,", "location for: %s\\n' % (filename,)) abs_path = filename real_path =", "of the paths is important! Note that this can be", "be 'NormFileFromEclipseToPython' try: return cache[filename] except KeyError: if eclipse_sep !=", "inner_path = inner_path[1:] _info = zip_file_obj.getinfo(inner_path.replace('\\\\', '/')) return join(zip_path, inner_path)", "debugger: replacing to server: %s\\n' % (translated,)) translated = translated.replace(eclipse_prefix,", "import traceback _os_normcase = os.path.normcase basename = os.path.basename exists =", "_convert_to_long_pathname(filename): buf = ctypes.create_unicode_buffer(MAX_PATH) if IS_PY2 and isinstance(filename, str): filename", "now. translated_proper_case = get_path_with_real_case(translated) translated = _NormFile(translated_proper_case) if IS_WINDOWS: if", "if sep != '/': path = path.replace('/', sep) return path", "NORM_PATHS_AND_BASE_CONTAINER[f] = ret return ret def get_abs_path_real_path_and_base_from_frame(frame): try: return NORM_PATHS_AND_BASE_CONTAINER[frame.f_code.co_filename]", "the actual file in the filesystem (otherwise breakpoints won't be", "path0 = _fix_path(path0, eclipse_sep) path1 = _fix_path(path1, python_sep) initial_paths[i] =", "case-independent filesystem and the other uses a case-dependent filesystem (if", "the server, we do the replace first and only later", "pydevd;pydevd.settrace(host, stdoutToServer, stderrToServer, port, suspend) see parameter docs on pydevd.py", "it goes hand-in-hand with 'exists'. inner_path = inner_path[1:] zip_path =", "# no translation step needed (just inline the calls) norm_file_to_client", "ret.encode(getfilesystemencoding()) return ret if IS_WINDOWS: if IS_JYTHON: def normcase(filename): return", "IS_WINDOWS: def get_path_with_real_case(filename): from java.io import File f = File(filename)", "check if we can find it as if it was", "sys.stderr.write('pydev debugger: to server: unable to find matching prefix for:", "the client and server slashes. python_sep = '\\\\' if IS_WINDOWS", "initial_norm_paths = _NormPaths def _NormPaths(filename): # Let's redefine _NormPaths to", "unable to find matching prefix for: %s in %s\\n' %", "traceback.print_exc() else: convert_to_long_pathname = _convert_to_long_pathname convert_to_short_pathname = _convert_to_short_pathname get_path_with_real_case =", "not exists(_NormFile(code.co_filename)): sys.stderr.write('-------------------------------------------------------------------------------\\n') sys.stderr.write('pydev debugger: CRITICAL WARNING: This version of", "session. NORM_PATHS_CONTAINER = {} NORM_PATHS_AND_BASE_CONTAINER = {} def _NormFile(filename): abs_path,", "set on the target machine for the paths that'll actually", "based on the frame.f_code.co_filename (if we had it inside build/bdist", "PATHS_FROM_ECLIPSE_TO_PYTHON] # example: # PATHS_FROM_ECLIPSE_TO_PYTHON = [ # (r'd:\\temp\\temp_workspace_2\\test_python\\src\\yyy\\yyy', #", "[] def setup_client_server_paths(paths): '''paths is the same format as PATHS_FROM_ECLIPSE_TO_PYTHON'''", "_NormPaths to work with paths that may be incorrect try:", "= [] else: if not isinstance(PATHS_FROM_ECLIPSE_TO_PYTHON, list): sys.stderr.write('Expected PATHS_FROM_ECLIPSE_TO_PYTHON loaded", "enable remote debugging in the target machine (pydev extensions in", "translated_proper_case.lower(): translated_proper_case = translated if DEBUG_CLIENT_SERVER_TRANSLATION: sys.stderr.write( 'pydev debugger: _NormFile", "works to provide proper separators. setup_client_server_paths(_last_client_server_paths_set) DEBUG_CLIENT_SERVER_TRANSLATION = os.environ.get('DEBUG_PYDEVD_PATHS_TRANSLATION', 'False').lower()", "'exists'. inner_path = inner_path[1:] zip_path = zip_path + '!' if", "it inside build/bdist it can make a difference). NORM_PATHS_AND_BASE_CONTAINER[frame.f_code.co_filename] =", "server machine. # see module docstring for more details. try:", "as # .zip! or .egg!, I don't really know what's", "the path is different now. translated_proper_case = get_path_with_real_case(translated) translated =", "0 and rv <= MAX_PATH: filename = buf.value if IS_PY2:", "is where eclipse is running. E.g.: If the server (your", "break else: sys.stderr.write('pydev debugger: Unable to find real location for:", "to translate from the client to the server). :param os:", "or 'WINDOWS' ''' global _ide_os prev = _ide_os if os", "from the IDE. filename = filename.replace(python_sep, eclipse_sep) # used to", "inner_path: r = join(normcase(zip_path), inner_path) return r r = normcase(r)", "to be translated to the python process # So, this", "(normcase(path0), normcase(path1)) if not paths_from_eclipse_to_python: # no translation step needed", "we expect from the IDE. filename = filename.replace(python_sep, eclipse_sep) #", "global _last_client_server_paths_set _last_client_server_paths_set = paths[:] # Work on the client", "if IS_PY2: filename = filename.encode(getfilesystemencoding()) return filename def _get_path_with_real_case(filename): ret", "return f if f is not None: if f.endswith('.pyc'): f", "= translated return translated def _norm_file_to_client(filename, cache=norm_filename_to_client_container): # The result", "this context, the server is where your python process is", "Note that when going to the server, we do the", "`normcase` doesn't lower case on Python 2 for non-English locale,", "normalized copy, just in case # the path is different", "_NormPaths must be str. Found: %s (%s)' % (filename, type(filename)))", "try: return NORM_PATHS_AND_BASE_CONTAINER[frame.f_code.co_filename] except: # This one is just internal", "client is where eclipse is running. E.g.: If the server", "if inner_path: r = join(normcase(zip_path), inner_path) return r r =", "-1: ind += 4 zip_path = r[:ind] inner_path = r[ind:]", "inner_path[1:] _info = zip_file_obj.getinfo(inner_path.replace('\\\\', '/')) return join(zip_path, inner_path) except KeyError:", "see if we're working with a version of python that", "we can correct that traceback.print_exc() # Note: as these functions", "paths_from_eclipse_to_python = initial_paths[:] # Apply normcase to the existing paths", "real_path = _NormPaths(f) base = basename(real_path) ret = abs_path, real_path,", "inner_path.startswith('!'): # Note (fabioz): although I can replicate this by", "the client (and the point is that we want the", "on systems without islink support rPath = os.path.abspath # defined", "[x[1] for x in paths_from_eclipse_to_python])) translated = translated_proper_case if eclipse_sep", "ret def get_abs_path_real_path_and_base_from_frame(frame): try: return NORM_PATHS_AND_BASE_CONTAINER[frame.f_code.co_filename] except: # This one", "from the client (and the point is that we want", "real_path = initial_norm_paths(filename) if not exists(real_path): # We must actually", "else: if DEBUG_CLIENT_SERVER_TRANSLATION: sys.stderr.write('pydev debugger: to client: unable to find", "case and then obtain a new normalized copy, just in", "to eclipse # So, this would be 'NormFileFromPythonToEclipse' try: return", "Check that it actually works _get_path_with_real_case(__file__) except: # Something didn't", "there's something not correct here -- but at least print", "f.getCanonicalPath() if IS_PY2 and not isinstance(ret, str): return ret.encode(getfilesystemencoding()) return", "pydevd_ files must be on the server accessible through the", "= get_path_with_real_case(translated) translated = _NormFile(translated_proper_case) if IS_WINDOWS: if translated.lower() !=", "for fast access later NORM_PATHS_CONTAINER[filename] = abs_path, real_path return abs_path,", "ind != -1: ind += 4 zip_path = r[:ind] inner_path", "'WINDOWS' else '/' norm_filename_to_server_container = {} norm_filename_to_client_container = {} initial_paths", "needed (just inline the calls) norm_file_to_client = _original_file_to_client norm_file_to_server =", "names to a given scope. def _original_file_to_client(filename, cache={}): try: return", "the separators are what we expect from the IDE. filename", "os # We need to (re)setup how the client <->", "# defined as a list of tuples where the 1st", "step needed (just inline the calls) norm_file_to_client = _original_file_to_client norm_file_to_server", "# The resulting path is not in the python process,", "ind = file.find('.zip') if ind == -1: ind = file.find('.egg')", "\\ (translated, [x[0] for x in paths_from_eclipse_to_python])) # Note that", "_pydev_imps import _pydev_pkgutil_old as pkgutil try: loader = pkgutil.get_loader(mod_name) except:", "process is running and the client is where eclipse is", "on and check if we can find it as if", "path0 = path0.encode(sys.getfilesystemencoding()) if isinstance(path1, unicode): path1 = path1.encode(sys.getfilesystemencoding()) path0", "[] else: if not isinstance(PATHS_FROM_ECLIPSE_TO_PYTHON, list): sys.stderr.write('Expected PATHS_FROM_ECLIPSE_TO_PYTHON loaded from", "from _pydev_bundle._pydev_filesystem_encoding import getfilesystemencoding import json import os.path import sys", "case of a file will match the actual file in", "in enumerate(paths_from_eclipse_to_python[:]): if IS_PY2: if isinstance(path0, unicode): path0 = path0.encode(sys.getfilesystemencoding())", "def _NormPaths(filename): # Let's redefine _NormPaths to work with paths", "file ending as # .zip! or .egg!, I don't really", "if ind == -1: ind = file.find('.egg') if ind !=", "of its absolute path, real path and base name def", "client to the debug server translated = normcase(filename) for eclipse_prefix,", "do the replace first and only later do the norm", "if inner_path.startswith('/') or inner_path.startswith('\\\\'): inner_path = inner_path[1:] _info = zip_file_obj.getinfo(inner_path.replace('\\\\',", "the server is where your python process is running and", "filename def _get_path_with_real_case(filename): ret = convert_to_long_pathname(convert_to_short_pathname(filename)) # This doesn't handle", "stderrToServer, port, suspend) see parameter docs on pydevd.py @note: for", "so we should do it manually. if '~' in filename:", "%s to %s)\\n' % ( translated_proper_case, translated)) for i, (eclipse_prefix,", "_NormPaths is None: # Interpreter shutdown return f if f", "and not isinstance(ret, str): return ret.encode(getfilesystemencoding()) return ret if IS_WINDOWS:", "= f[:-1] elif f.endswith('$py.class'): f = f[:-len('$py.class')] + '.py' if", "by creating a file ending as # .zip! or .egg!,", "= inner_path[1:] zip_path = zip_path + '!' if inner_path.startswith('/') or", "generated filenames are not absolute)\\n') sys.stderr.write('pydev debugger: The debugger may", "debugger: sent to client: %s\\n' % (translated,)) break else: if", "= abs_path, real_path return abs_path, real_path except: # Don't fail", "PyCharm uses 'WIN' (https://github.com/fabioz/PyDev.Debugger/issues/116) os = 'WINDOWS' assert os in", "IS_JYTHON: def normcase(filename): return filename.lower() else: def normcase(filename): # `normcase`", "of those translations @note: the case of the paths is", "4 zip_path = r[:ind] inner_path = r[ind:] if inner_path.startswith('!'): #", "it as if it was a relative path for some", "may be rebound, users should always import # pydevd_file_utils and", "os = 'WINDOWS' assert os in ('WINDOWS', 'UNIX') if prev", "Eclipse will send the passed filename to be translated to", "return True except: return False def is_real_file(filename): # Check for", "when doing a remote debugging session in one machine and", "may be incorrect try: return NORM_SEARCH_CACHE[filename] except KeyError: abs_path, real_path", "# `normcase` doesn't lower case on Python 2 for non-English", "can make a difference). NORM_PATHS_AND_BASE_CONTAINER[frame.f_code.co_filename] = ret return ret def", "if os.path.exists(file): return file ind = file.find('.zip') if ind ==", "return None # Now, let's do a quick test to", "sys.stderr.write('pydev debugger: Related bug: http://bugs.python.org/issue1666807\\n') sys.stderr.write('-------------------------------------------------------------------------------\\n') sys.stderr.flush() NORM_SEARCH_CACHE = {}", "that traceback.print_exc() # Note: as these functions may be rebound,", "mod_path = join(path, mod_name.replace('.', '/')) if os.path.isdir(mod_path): return mod_path return", "drive letter properly (it'll be unchanged). # Make sure the", "no problems # related to the names generated... try: try:", "= file[:ind] inner_path = file[ind:] if inner_path.startswith(\"!\"): # Note (fabioz):", "proper separators. setup_client_server_paths(_last_client_server_paths_set) DEBUG_CLIENT_SERVER_TRANSLATION = os.environ.get('DEBUG_PYDEVD_PATHS_TRANSLATION', 'False').lower() in ('1', 'true')", "existing paths to follow the os preferences. for i, (path0,", "the filesystem (otherwise breakpoints won't be hit). - Providing means", "from the client to the debug server translated = normcase(filename)", "absolute filenames so that we can be sure that: -", "normcase(filename): return filename.lower() else: def normcase(filename): # `normcase` doesn't lower", "# jython does not support os.path.realpath # realpath is a", "So, this would be 'NormFileFromPythonToEclipse' try: return cache[filename] except KeyError:", "(translated, [x[0] for x in paths_from_eclipse_to_python])) # Note that when", "# (still kept as it was added by @jetbrains, but", "or inner_path.startswith('\\\\'): inner_path = inner_path[1:] _info = zip_file_obj.getinfo(inner_path.replace('\\\\', '/')) return", "= convert_to_long_pathname(convert_to_short_pathname(filename)) # This doesn't handle the drive letter properly", "compiled (internal generated filenames are not absolute)\\n') sys.stderr.write('pydev debugger: The", "= r[ind:] if inner_path.startswith('!'): # Note (fabioz): although I can", "ret = convert_to_long_pathname(convert_to_short_pathname(filename)) # This doesn't handle the drive letter", "Make sure that the separators are what we expect from", "base = basename(real_path) ret = abs_path, real_path, base NORM_PATHS_AND_BASE_CONTAINER[f] =", "the frame.f_code.co_filename (if we had it inside build/bdist it can", "server translated = normcase(filename) for eclipse_prefix, server_prefix in paths_from_eclipse_to_python: if", "[ # (r'd:\\temp\\temp_workspace_2\\test_python\\src\\yyy\\yyy', # r'd:\\temp\\temp_workspace_2\\test_python\\src\\hhh\\xxx') # ] convert_to_long_pathname = lambda", "tuple of its absolute path, real path and base name", "if IS_PY2: if isinstance(path0, unicode): path0 = path0.encode(sys.getfilesystemencoding()) if isinstance(path1,", "this # (still kept as it was added by @jetbrains,", "later on). # Note 2: it goes hand-in-hand with '_NormPath'.", "the PATHS_FROM_ECLIPSE_TO_PYTHON only needs to be set on the target", "didn't quite work out, leave no-op conversions in place. if", "ret[0].islower(): return ret[0].upper() + ret[1:] return ret # Check that", "ret return ret def get_abs_path_real_path_and_base_from_frame(frame): try: return NORM_PATHS_AND_BASE_CONTAINER[frame.f_code.co_filename] except: #", "passed filename to be translated to the python process #", "> 1 and ret[1] == ':' and ret[0].islower(): return ret[0].upper()", "ind != -1: ind += 4 zip_path = file[:ind] inner_path", "that may be incorrect try: return NORM_SEARCH_CACHE[filename] except KeyError: abs_path,", "copy, just in case # the path is different now.", "something not correct here -- but at least print it", "return ret if IS_WINDOWS: if IS_JYTHON: def normcase(filename): return filename.lower()", "still function, but it will work slower and may miss", "the beginning of this method. cache[filename] = translated return translated", "abs_path, real_path = _NormPaths(filename) return real_path def _AbsFile(filename): abs_path, real_path", "debug session. NORM_PATHS_CONTAINER = {} NORM_PATHS_AND_BASE_CONTAINER = {} def _NormFile(filename):", "(path0, path1) in enumerate(paths_from_eclipse_to_python[:]): if IS_PY2: if isinstance(path0, unicode): path0", "should always import # pydevd_file_utils and then use: # #", "no-op _ide_os = 'WINDOWS' if IS_WINDOWS else 'UNIX' def set_ide_os(os):", "= filename real_path = filename NORM_SEARCH_CACHE[filename] = abs_path, real_path return", "path (from: %s to %s)\\n' % ( translated_proper_case, translated)) for", "normcase(filename): return filename # no-op _ide_os = 'WINDOWS' if IS_WINDOWS", "'False').lower() in ('1', 'true') # Caches filled as requested during", "'\\\\' if _ide_os == 'WINDOWS' else '/' norm_filename_to_server_container = {}", "zip_path = file[:ind] inner_path = file[ind:] if inner_path.startswith(\"!\"): # Note", "alternatively, this can be set with an environment variable from", "and server slashes. python_sep = '\\\\' if IS_WINDOWS else '/'", "filename:filename get_path_with_real_case = lambda filename:filename if sys.platform == 'win32': try:", "the debug session. NORM_PATHS_CONTAINER = {} NORM_PATHS_AND_BASE_CONTAINER = {} def", "# pydevd_file_utils and then use: # # pydevd_file_utils.norm_file_to_client # pydevd_file_utils.norm_file_to_server", "join(normcase(zip_path), inner_path) return r r = normcase(r) return r _ZIP_SEARCH_CACHE", "not _is_int(filename) and not filename.startswith(\"<ipython-input\") # For given file f", "be: PATHS_FROM_ECLIPSE_TO_PYTHON = [(r'c:\\my_project\\src', r'/user/projects/my_project/src')] alternatively, this can be set", "2nd element is the path in the server machine. #", "_os_normcase(filename) return filename.lower() else: def normcase(filename): return filename # no-op", "hand-in-hand with 'exists'. inner_path = inner_path[1:] zip_path = zip_path +", "filename to be translated to the python process # So,", "debugger: The debugger may still function, but it will work", "be incorrectly compiled (internal generated filenames are not absolute)\\n') sys.stderr.write('pydev", "f, f else: abs_path, real_path = _NormPaths(f) base = basename(real_path)", "= r.find('.egg') if ind != -1: ind += 4 zip_path", "what's the real-world case for this # (still kept as", "This doesn't handle the drive letter properly (it'll be unchanged).", "# isdigit() doesn't support negative numbers try: int(filename) return True", "cache[filename] = get_path_with_real_case(_AbsFile(filename)) return cache[filename] _original_file_to_server = _NormFile norm_file_to_client =", "filename.decode(getfilesystemencoding()) rv = GetLongPathName(filename, buf, MAX_PATH) if rv != 0", "if eclipse_sep != python_sep: translated = translated.replace(python_sep, eclipse_sep) # The", "@note: DEBUG_CLIENT_SERVER_TRANSLATION can be set to True to debug the", "paths that may be incorrect try: return NORM_SEARCH_CACHE[filename] except KeyError:", "= 'WINDOWS' if IS_WINDOWS else 'UNIX' def set_ide_os(os): ''' We", "isinstance(path1, unicode): path1 = path1.encode(sys.getfilesystemencoding()) path0 = _fix_path(path0, eclipse_sep) path1", "enumerate(paths_from_eclipse_to_python): if translated.startswith(python_prefix): if DEBUG_CLIENT_SERVER_TRANSLATION: sys.stderr.write('pydev debugger: replacing to client:", "session in one machine and debugging in another. To do", "get_fullname(mod_name): if IS_PY3K: import pkgutil else: from _pydev_imps import _pydev_pkgutil_old", "loader = pkgutil.get_loader(mod_name) except: return None if loader is not", "actual file in the filesystem (otherwise breakpoints won't be hit).", "to server: %s\\n' % (translated,)) break else: if DEBUG_CLIENT_SERVER_TRANSLATION: sys.stderr.write('pydev", "path in sys.path: mod_path = join(path, mod_name.replace('.', '/')) if os.path.isdir(mod_path):", "does not support os.path.realpath # realpath is a no-op on", "be used on the paths defined in PATHS_FROM_ECLIPSE_TO_PYTHON). @note: all", "with '_NormPath'. inner_path = inner_path[1:] zip_path = zip_path + '!'", "code = rPath.__code__ if not exists(_NormFile(code.co_filename)): sys.stderr.write('-------------------------------------------------------------------------------\\n') sys.stderr.write('pydev debugger: CRITICAL", "translated def _norm_file_to_client(filename, cache=norm_filename_to_client_container): # The result of this method", "command line: set PATHS_FROM_ECLIPSE_TO_PYTHON=[['c:\\my_project\\src','/user/projects/my_project/src']] @note: DEBUG_CLIENT_SERVER_TRANSLATION can be set to", "version. eclipse_prefix = initial_paths[i][0] translated = eclipse_prefix + translated_proper_case[len(python_prefix):] if", "inner_path.startswith('/') or inner_path.startswith('\\\\'): inner_path = inner_path[1:] if inner_path: r =", "machine (pydev extensions in the eclipse installation) import pydevd;pydevd.settrace(host, stdoutToServer,", "path1 = _fix_path(path1, python_sep) initial_paths[i] = (path0, path1) paths_from_eclipse_to_python[i] =", "version of python that has no problems # related to", "process, so, we cannot do a _NormFile here, # only", "ret = f.getCanonicalPath() if IS_PY2 and not isinstance(ret, str): return", "_original_file_to_server def _fix_path(path, sep): if path.endswith('/') or path.endswith('\\\\'): path =", "# Check for Jupyter cells return not _is_int(filename) and not", "file in the filesystem (otherwise breakpoints won't be hit). -", "the real-world case for this # (still kept as it", "docs on pydevd.py @note: for doing a remote debugging session,", "client: %s\\n' % (translated,)) # Note: use the non-normalized version.", "# only at the beginning of this method. cache[filename] =", "translated_proper_case if eclipse_sep != python_sep: translated = translated.replace(python_sep, eclipse_sep) #", "files must be on the server accessible through the PYTHONPATH", "slashes. python_sep = '\\\\' if IS_WINDOWS else '/' eclipse_sep =", "return not _is_int(filename) and not filename.startswith(\"<ipython-input\") # For given file", "but Java # side does it, so we should do", "import pkgutil else: from _pydev_imps import _pydev_pkgutil_old as pkgutil try:", "important! Note that this can be tricky to get right", "get_path_with_real_case(filename): from java.io import File f = File(filename) ret =", "Note: use the non-normalized version. eclipse_prefix = initial_paths[i][0] translated =", "Also cache based on the frame.f_code.co_filename (if we had it", "tuples where the 1st element of the tuple is the", "cache it for fast access later NORM_PATHS_CONTAINER[filename] = abs_path, real_path", "NORM_PATHS_CONTAINER[filename] except KeyError: if filename.__class__ != str: raise AssertionError('Paths passed", "def get_path_with_real_case(filename): from java.io import File f = File(filename) ret", "only later do the norm file. if eclipse_sep != python_sep:", "given filename def _NormPaths(filename): try: return NORM_PATHS_CONTAINER[filename] except KeyError: if", "is not None: if f.endswith('.pyc'): f = f[:-1] elif f.endswith('$py.class'):", "any kind of client-server translation) f = frame.f_code.co_filename if f", "[LPCWSTR, LPWSTR, DWORD] GetLongPathName.restype = DWORD GetShortPathName = ctypes.windll.kernel32.GetShortPathNameW GetShortPathName.argtypes", "path[:-1] if sep != '/': path = path.replace('/', sep) return", "debugger: to client: unable to find matching prefix for: %s", "the case of the paths is important! Note that this", "the IDE os because the host where the code is", "= _NormPath(filename, rPath) # cache it for fast access later", "is _NOT_FOUND_SENTINEL: try: import zipfile zip_file_obj = zipfile.ZipFile(zip_path, 'r') _ZIP_SEARCH_CACHE[zip_path]", "Note 2: it goes hand-in-hand with 'exists'. inner_path = inner_path[1:]", "a remote debugging session in one machine and debugging in", "'UNIX' def set_ide_os(os): ''' We need to set the IDE", "os.path.realpath # realpath is a no-op on systems without islink", "the python process, so, we cannot do a _NormFile here,", "that we can be sure that: - The case of", "None) if meth is not None: return meth(mod_name) return None", "doesn't lower case on Python 2 for non-English locale, but", "# After getting the real path, let's get it with", "except: # Don't fail if there's something not correct here", "the path in the server machine. # see module docstring", "path is different now. translated_proper_case = get_path_with_real_case(translated) translated = _NormFile(translated_proper_case)", "absolute path, real path and base name def get_abs_path_real_path_and_base_from_file(f): try:", "prefix for: %s in %s\\n' % \\ (translated, [x[0] for", "right when one machine uses a case-independent filesystem and the", "1 and ret[1] == ':' and ret[0].islower(): return ret[0].upper() +", "!= translated_proper_case.lower(): translated_proper_case = translated if DEBUG_CLIENT_SERVER_TRANSLATION: sys.stderr.write( 'pydev debugger:", "later on). # Note 2: it goes hand-in-hand with 'exists'.", "for the user to make path conversions when doing a", "= _NormPath(filename, os.path.abspath) real_path = _NormPath(filename, rPath) # cache it", "for attr in (\"get_filename\", \"_get_filename\"): meth = getattr(loader, attr, None)", "\\ (translated, [x[1] for x in paths_from_eclipse_to_python])) translated = translated_proper_case", "= normpath(filename) ind = r.find('.zip') if ind == -1: ind", "_ide_os == 'WINDOWS' else '/' norm_filename_to_server_container = {} norm_filename_to_client_container =", "_NOT_FOUND_SENTINEL = object() def exists(file): if os.path.exists(file): return file ind", "is case-independent, 'normcase()' should be used on the paths defined", "paths. @note: in this context, the server is where your", "python_sep) initial_paths[i] = (path0, path1) paths_from_eclipse_to_python[i] = (normcase(path0), normcase(path1)) if", "and isinstance(filename, str): filename = filename.decode(getfilesystemencoding()) rv = GetShortPathName(filename, buf,", "in enumerate(paths_from_eclipse_to_python): if translated.startswith(python_prefix): if DEBUG_CLIENT_SERVER_TRANSLATION: sys.stderr.write('pydev debugger: replacing to", "passed to _NormPaths must be str. Found: %s (%s)' %", "= '\\\\' if IS_WINDOWS else '/' eclipse_sep = '\\\\' if", "the paths with breakpoints must be translated (otherwise they won't", "-1: ind = r.find('.egg') if ind != -1: ind +=", "if eclipse_sep != python_sep: translated = translated.replace(eclipse_sep, python_sep) translated =", "as a list of tuples where the 1st element of", "To do that, the PATHS_FROM_ECLIPSE_TO_PYTHON constant must be filled with", "work slower and may miss breakpoints.\\n') sys.stderr.write('pydev debugger: Related bug:", "# only setup translation functions if absolutely needed! def _norm_file_to_server(filename,", "in Python 2.7 have paths like build/bdist.linux-x86_64/egg/<path-inside-egg> f = frame.f_globals['__file__']", "IS_JYTHON and IS_WINDOWS: def get_path_with_real_case(filename): from java.io import File f", "would be 'NormFileFromPythonToEclipse' try: return cache[filename] except KeyError: # used", "and f.startswith (('build/bdist.', 'build\\\\bdist.')): # files from eggs in Python", "provide proper separators. setup_client_server_paths(_last_client_server_paths_set) DEBUG_CLIENT_SERVER_TRANSLATION = os.environ.get('DEBUG_PYDEVD_PATHS_TRANSLATION', 'False').lower() in ('1',", "def _convert_to_long_pathname(filename): buf = ctypes.create_unicode_buffer(MAX_PATH) if IS_PY2 and isinstance(filename, str):", "sys.stderr.write('pydev debugger: sent to client: %s\\n' % (translated,)) break else:", "len(ret) > 1 and ret[1] == ':' and ret[0].islower(): return", "_os_normcase = os.path.normcase basename = os.path.basename exists = os.path.exists join", "from the command line: set PATHS_FROM_ECLIPSE_TO_PYTHON=[['c:\\my_project\\src','/user/projects/my_project/src']] @note: DEBUG_CLIENT_SERVER_TRANSLATION can be", "else: convert_to_long_pathname = _convert_to_long_pathname convert_to_short_pathname = _convert_to_short_pathname get_path_with_real_case = _get_path_with_real_case", "incorrect try: return NORM_SEARCH_CACHE[filename] except KeyError: abs_path, real_path = initial_norm_paths(filename)", "cache[filename] = translated return translated norm_file_to_server = _norm_file_to_server norm_file_to_client =", "functions if absolutely needed! def _norm_file_to_server(filename, cache=norm_filename_to_server_container): # Eclipse will", "f, f, f else: abs_path, real_path = _NormPaths(f) base =", "separators. setup_client_server_paths(_last_client_server_paths_set) DEBUG_CLIENT_SERVER_TRANSLATION = os.environ.get('DEBUG_PYDEVD_PATHS_TRANSLATION', 'False').lower() in ('1', 'true') #", "name def get_abs_path_real_path_and_base_from_file(f): try: return NORM_PATHS_AND_BASE_CONTAINER[f] except: if _NormPaths is", "= basename(real_path) ret = abs_path, real_path, base NORM_PATHS_AND_BASE_CONTAINER[f] = ret", "getting the real path, let's get it with the path", "translated.startswith(python_prefix): if DEBUG_CLIENT_SERVER_TRANSLATION: sys.stderr.write('pydev debugger: replacing to client: %s\\n' %", "need any kind of client-server translation) f = frame.f_code.co_filename if", "except: return False def is_real_file(filename): # Check for Jupyter cells", "filename:filename convert_to_short_pathname = lambda filename:filename get_path_with_real_case = lambda filename:filename if", "= zipfile.ZipFile(zip_path, 'r') _ZIP_SEARCH_CACHE[zip_path] = zip_file_obj except: _ZIP_SEARCH_CACHE[zip_path] = _NOT_FOUND_SENTINEL", "norm_file_to_client = _original_file_to_client norm_file_to_server = _original_file_to_server return # only setup", "except: # jython does not support os.path.realpath # realpath is", "doesn't handle the drive letter properly (it'll be unchanged). #", "4 zip_path = file[:ind] inner_path = file[ind:] if inner_path.startswith(\"!\"): #", "really know what's the real-world case for this # (still", "debugger: sent to server: %s\\n' % (translated,)) break else: if", "pydevd_file_utils and then use: # # pydevd_file_utils.norm_file_to_client # pydevd_file_utils.norm_file_to_server #", "zip_path = r[:ind] inner_path = r[ind:] if inner_path.startswith('!'): # Note", "DebugInfoHolder, IS_WINDOWS, IS_JYTHON from _pydev_bundle._pydev_filesystem_encoding import getfilesystemencoding import json import", "sys import traceback _os_normcase = os.path.normcase basename = os.path.basename exists", "1st element of the tuple is the path in the", "client and server slashes. python_sep = '\\\\' if IS_WINDOWS else", "= get_path_with_real_case(_AbsFile(filename)) return cache[filename] _original_file_to_server = _NormFile norm_file_to_client = _original_file_to_client", "eclipse # So, this would be 'NormFileFromPythonToEclipse' try: return cache[filename]", "zip_file_obj except: _ZIP_SEARCH_CACHE[zip_path] = _NOT_FOUND_SENTINEL return False try: if inner_path.startswith('/')", "to the user so that we can correct that traceback.print_exc()", "return cache[filename] except KeyError: # used to translate a path", "internal (so, does not need any kind of client-server translation)", "convert_to_short_pathname = _convert_to_short_pathname get_path_with_real_case = _get_path_with_real_case elif IS_JYTHON and IS_WINDOWS:", "don't really know what's the real-world case for this #", "abs_path, real_path return abs_path, real_path def _NormPath(filename, normpath): r =", "translation works to provide proper separators. setup_client_server_paths(_last_client_server_paths_set) DEBUG_CLIENT_SERVER_TRANSLATION = os.environ.get('DEBUG_PYDEVD_PATHS_TRANSLATION',", "isinstance(path0, unicode): path0 = path0.encode(sys.getfilesystemencoding()) if isinstance(path1, unicode): path1 =", "is_real_file(f): abs_path, real_path, base = f, f, f else: abs_path,", "@note: in this context, the server is where your python", "at least print it to the user so that we", "for doing a remote debugging session, all the pydevd_ files", "f ret = get_abs_path_real_path_and_base_from_file(f) # Also cache based on the", "may be actually different from the client (and the point", "return ret def get_fullname(mod_name): if IS_PY3K: import pkgutil else: from", "lower case on Python 2 for non-English locale, but Java", "import _pydev_pkgutil_old as pkgutil try: loader = pkgutil.get_loader(mod_name) except: return", "cache[filename] = translated return translated def _norm_file_to_client(filename, cache=norm_filename_to_client_container): # The", "str: raise AssertionError('Paths passed to _NormPaths must be str. Found:", "Something didn't quite work out, leave no-op conversions in place.", "suspend) see parameter docs on pydevd.py @note: for doing a", "/user/projects/my_project/src/package/module1.py and the client has: c:\\my_project\\src\\package\\module1.py the PATHS_FROM_ECLIPSE_TO_PYTHON would have", "= _ide_os if os == 'WIN': # Apparently PyCharm uses", "= zip_path + '!' if inner_path.startswith('/') or inner_path.startswith('\\\\'): inner_path =", "# Let's redefine _NormPaths to work with paths that may", "is just internal (so, does not need any kind of", "and only later do the norm file. if eclipse_sep !=", "import pydevd;pydevd.settrace(host, stdoutToServer, stderrToServer, port, suspend) see parameter docs on", "zip_file_obj = zipfile.ZipFile(zip_path, 'r') _ZIP_SEARCH_CACHE[zip_path] = zip_file_obj except: _ZIP_SEARCH_CACHE[zip_path] =", "= inner_path[1:] _info = zip_file_obj.getinfo(inner_path.replace('\\\\', '/')) return join(zip_path, inner_path) except", "DEBUG_CLIENT_SERVER_TRANSLATION can be set to True to debug the result", "= filename.decode(getfilesystemencoding()) rv = GetLongPathName(filename, buf, MAX_PATH) if rv !=", "_NormFile changed path (from: %s to %s)\\n' % ( translated_proper_case,", "defined as a list of tuples where the 1st element", "= normcase(filename) for eclipse_prefix, server_prefix in paths_from_eclipse_to_python: if translated.startswith(eclipse_prefix): if", "# used to translate a path from the client to", "server: %s\\n' % (translated,)) translated = translated.replace(eclipse_prefix, server_prefix) if DEBUG_CLIENT_SERVER_TRANSLATION:", "to be a list.\\n') PATHS_FROM_ECLIPSE_TO_PYTHON = [] else: # Converting", "def exists(file): if os.path.exists(file): return file ind = file.find('.zip') if", "_NormPaths(filename) return abs_path # Returns tuple of absolute path and", "as these functions may be rebound, users should always import", "python_prefix) in enumerate(paths_from_eclipse_to_python): if translated.startswith(python_prefix): if DEBUG_CLIENT_SERVER_TRANSLATION: sys.stderr.write('pydev debugger: replacing", "but at least print it to the user so that", "and the 2nd element is the path in the server", "'WIN' (https://github.com/fabioz/PyDev.Debugger/issues/116) os = 'WINDOWS' assert os in ('WINDOWS', 'UNIX')", "= zip_file_obj.getinfo(inner_path.replace('\\\\', '/')) return join(zip_path, inner_path) except KeyError: return None", "== -1: ind = file.find('.egg') if ind != -1: ind", "when going to the server, we do the replace first", "filename = filename.decode(getfilesystemencoding()) rv = GetLongPathName(filename, buf, MAX_PATH) if rv", "absolute path and real path for given filename def _NormPaths(filename):", "sure that the separators are what we expect from the", "= initial_paths[:] # Apply normcase to the existing paths to", "# Converting json lists to tuple PATHS_FROM_ECLIPSE_TO_PYTHON = [tuple(x) for", "return NORM_PATHS_CONTAINER[filename] except KeyError: if filename.__class__ != str: raise AssertionError('Paths", "try: try: code = rPath.func_code except AttributeError: code = rPath.__code__", "unicode): path1 = path1.encode(sys.getfilesystemencoding()) path0 = _fix_path(path0, eclipse_sep) path1 =", "they won't be found in the server) @note: to enable", "def normcase(filename): return filename.lower() else: def normcase(filename): # `normcase` doesn't", "rebound, users should always import # pydevd_file_utils and then use:", "pkgutil.get_loader(mod_name) except: return None if loader is not None: for", "be a list.\\n') PATHS_FROM_ECLIPSE_TO_PYTHON = [] else: # Converting json", "= r[:ind] inner_path = r[ind:] if inner_path.startswith('!'): # Note (fabioz):", "if DEBUG_CLIENT_SERVER_TRANSLATION: sys.stderr.write('pydev debugger: sent to client: %s\\n' % (translated,))", "= abs_path, real_path, base NORM_PATHS_AND_BASE_CONTAINER[f] = ret return ret def", "it for fast access later NORM_PATHS_CONTAINER[filename] = abs_path, real_path return", "= os.environ.get('DEBUG_PYDEVD_PATHS_TRANSLATION', 'False').lower() in ('1', 'true') # Caches filled as", "# Check that it actually works _get_path_with_real_case(__file__) except: # Something", "IS_PY2: filename = filename.encode(getfilesystemencoding()) return filename def _convert_to_short_pathname(filename): buf =", "in the target machine (pydev extensions in the eclipse installation)", "try: if inner_path.startswith('/') or inner_path.startswith('\\\\'): inner_path = inner_path[1:] _info =", "ret = abs_path, real_path, base NORM_PATHS_AND_BASE_CONTAINER[f] = ret return ret", "redefine _NormPaths to work with paths that may be incorrect", "a path from the debug server to the client translated", "with breakpoints must be translated (otherwise they won't be found", "PATHS_FROM_ECLIPSE_TO_PYTHON = [tuple(x) for x in PATHS_FROM_ECLIPSE_TO_PYTHON] # example: #", "= _os_normcase(filename) return filename.lower() else: def normcase(filename): return filename #", "means for the user to make path conversions when doing", "handle the drive letter properly (it'll be unchanged). # Make", "file.find('.egg') if ind != -1: ind += 4 zip_path =", "if translated.startswith(eclipse_prefix): if DEBUG_CLIENT_SERVER_TRANSLATION: sys.stderr.write('pydev debugger: replacing to server: %s\\n'", "and rv <= MAX_PATH: filename = buf.value if IS_PY2: filename", "path and real path for given filename def _NormPaths(filename): try:", "paths in the pythonpath for path in sys.path: abs_path, real_path", "know what's the real-world case for this # (still kept", "is running. E.g.: If the server (your python process) has", "r = normcase(r) return r _ZIP_SEARCH_CACHE = {} _NOT_FOUND_SENTINEL =", "return # only setup translation functions if absolutely needed! def", "2: it goes hand-in-hand with 'exists'. inner_path = inner_path[1:] zip_path", "'/')) return join(zip_path, inner_path) except KeyError: return None return None", "= os.path.normcase basename = os.path.basename exists = os.path.exists join =", "real_path def _AbsFile(filename): abs_path, real_path = _NormPaths(filename) return abs_path #", "if DEBUG_CLIENT_SERVER_TRANSLATION: sys.stderr.write('pydev debugger: replacing to server: %s\\n' % (translated,))", "(your python process) has the structure /user/projects/my_project/src/package/module1.py and the client", "may miss breakpoints.\\n') sys.stderr.write('pydev debugger: Related bug: http://bugs.python.org/issue1666807\\n') sys.stderr.write('-------------------------------------------------------------------------------\\n') sys.stderr.flush()", "sys.path: abs_path, real_path = initial_norm_paths(join(path, filename)) if exists(real_path): break else:", "sys.path: mod_path = join(path, mod_name.replace('.', '/')) if os.path.isdir(mod_path): return mod_path", "list.\\n') PATHS_FROM_ECLIPSE_TO_PYTHON = [] else: # Converting json lists to", "(path0, path1) paths_from_eclipse_to_python[i] = (normcase(path0), normcase(path1)) if not paths_from_eclipse_to_python: #", "# # pydevd_file_utils.norm_file_to_client # pydevd_file_utils.norm_file_to_server # # instead of importing", "'NormFileFromEclipseToPython' try: return cache[filename] except KeyError: if eclipse_sep != python_sep:", "= translated.replace(eclipse_prefix, server_prefix) if DEBUG_CLIENT_SERVER_TRANSLATION: sys.stderr.write('pydev debugger: sent to server:", "_ZIP_SEARCH_CACHE[zip_path] = _NOT_FOUND_SENTINEL return False try: if inner_path.startswith('/') or inner_path.startswith('\\\\'):", "= ctypes.windll.kernel32.GetLongPathNameW GetLongPathName.argtypes = [LPCWSTR, LPWSTR, DWORD] GetLongPathName.restype = DWORD", "translated norm_file_to_server = _norm_file_to_server norm_file_to_client = _norm_file_to_client setup_client_server_paths(PATHS_FROM_ECLIPSE_TO_PYTHON) def _is_int(filename):", "running may be actually different from the client (and the", "have breakpoints). ''' from _pydevd_bundle.pydevd_constants import IS_PY2, IS_PY3K, DebugInfoHolder, IS_WINDOWS,", "what we expect from the IDE. filename = filename.replace(python_sep, eclipse_sep)", "path and base name def get_abs_path_real_path_and_base_from_file(f): try: return NORM_PATHS_AND_BASE_CONTAINER[f] except:", "buf.value if IS_PY2: filename = filename.encode(getfilesystemencoding()) return filename def _get_path_with_real_case(filename):", "@note: for doing a remote debugging session, all the pydevd_", "NORM_SEARCH_CACHE = {} initial_norm_paths = _NormPaths def _NormPaths(filename): # Let's", "variable.\\n') traceback.print_exc() PATHS_FROM_ECLIPSE_TO_PYTHON = [] else: if not isinstance(PATHS_FROM_ECLIPSE_TO_PYTHON, list):", "from environment variable.\\n') traceback.print_exc() PATHS_FROM_ECLIPSE_TO_PYTHON = [] else: if not", "file.find('.zip') if ind == -1: ind = file.find('.egg') if ind", "so, we cannot do a _NormFile here, # only at", "translated.replace(eclipse_prefix, server_prefix) if DEBUG_CLIENT_SERVER_TRANSLATION: sys.stderr.write('pydev debugger: sent to server: %s\\n'", "[x[0] for x in paths_from_eclipse_to_python])) # Note that when going", "the result of those translations @note: the case of the", "if IS_PY2 and isinstance(filename, str): filename = filename.decode(getfilesystemencoding()) rv =", "this method will be passed to eclipse # So, this", "norm_file_to_client = _norm_file_to_client setup_client_server_paths(PATHS_FROM_ECLIPSE_TO_PYTHON) def _is_int(filename): # isdigit() doesn't support", "x in paths_from_eclipse_to_python])) translated = translated_proper_case if eclipse_sep != python_sep:", "path.replace('/', sep) return path _last_client_server_paths_set = [] def setup_client_server_paths(paths): '''paths", "python process is running and the client is where eclipse", "_norm_file_to_server(filename, cache=norm_filename_to_server_container): # Eclipse will send the passed filename to", "replacing to server: %s\\n' % (translated,)) translated = translated.replace(eclipse_prefix, server_prefix)", "NORM_PATHS_AND_BASE_CONTAINER[f] except: if _NormPaths is None: # Interpreter shutdown return", "kind of client-server translation) f = frame.f_code.co_filename if f is", "(r'd:\\temp\\temp_workspace_2\\test_python\\src\\yyy\\yyy', # r'd:\\temp\\temp_workspace_2\\test_python\\src\\hhh\\xxx') # ] convert_to_long_pathname = lambda filename:filename convert_to_short_pathname", "the same format as PATHS_FROM_ECLIPSE_TO_PYTHON''' global norm_file_to_client global norm_file_to_server global", "zip_path + '!' zip_file_obj = _ZIP_SEARCH_CACHE.get(zip_path, _NOT_FOUND_SENTINEL) if zip_file_obj is", "the other uses a case-dependent filesystem (if the system being", "# Caches filled as requested during the debug session. NORM_PATHS_CONTAINER", "sep): if path.endswith('/') or path.endswith('\\\\'): path = path[:-1] if sep", "python process) has the structure /user/projects/my_project/src/package/module1.py and the client has:", "in %s\\n' % \\ (translated, [x[1] for x in paths_from_eclipse_to_python]))", "except: return None if loader is not None: for attr", "the host where the code is running may be actually", "the server). :param os: 'UNIX' or 'WINDOWS' ''' global _ide_os", "server_prefix in paths_from_eclipse_to_python: if translated.startswith(eclipse_prefix): if DEBUG_CLIENT_SERVER_TRANSLATION: sys.stderr.write('pydev debugger: replacing", "translate a path from the debug server to the client", "one machine uses a case-independent filesystem and the other uses", "_NormFile(filename): abs_path, real_path = _NormPaths(filename) return real_path def _AbsFile(filename): abs_path,", "attr, None) if meth is not None: return meth(mod_name) return", "be set to True to debug the result of those", "prev = _ide_os if os == 'WIN': # Apparently PyCharm", "def get_package_dir(mod_name): for path in sys.path: mod_path = join(path, mod_name.replace('.',", "return path _last_client_server_paths_set = [] def setup_client_server_paths(paths): '''paths is the", "IS_JYTHON from _pydev_bundle._pydev_filesystem_encoding import getfilesystemencoding import json import os.path import", "def _convert_to_short_pathname(filename): buf = ctypes.create_unicode_buffer(MAX_PATH) if IS_PY2 and isinstance(filename, str):", "Note 2: it goes hand-in-hand with '_NormPath'. inner_path = inner_path[1:]", "Java # side does it, so we should do it", "- The case of a file will match the actual", "_convert_to_short_pathname get_path_with_real_case = _get_path_with_real_case elif IS_JYTHON and IS_WINDOWS: def get_path_with_real_case(filename):", "os: _ide_os = os # We need to (re)setup how", "file[:ind] inner_path = file[ind:] if inner_path.startswith(\"!\"): # Note (fabioz): although", "needs to be set on the target machine for the", "r = normpath(filename) ind = r.find('.zip') if ind == -1:", "buf, MAX_PATH) if rv != 0 and rv <= MAX_PATH:", "sys.stderr.write('pydev debugger: replacing to server: %s\\n' % (translated,)) translated =", "the user so that we can correct that traceback.print_exc() #", "this would be 'NormFileFromEclipseToPython' try: return cache[filename] except KeyError: if", "version of python seems to be incorrectly compiled (internal generated", "r[:ind] inner_path = r[ind:] if inner_path.startswith('!'): # Note (fabioz): although", "is always uppercase. if len(ret) > 1 and ret[1] ==", "if inner_path.startswith('!'): # Note (fabioz): although I can replicate this", "path, let's get it with the path with # the", "= r.find('.zip') if ind == -1: ind = r.find('.egg') if", "details. try: PATHS_FROM_ECLIPSE_TO_PYTHON = json.loads(os.environ.get('PATHS_FROM_ECLIPSE_TO_PYTHON', '[]')) except Exception: sys.stderr.write('Error loading", "debug the result of those translations @note: the case of", "_ZIP_SEARCH_CACHE = {} _NOT_FOUND_SENTINEL = object() def exists(file): if os.path.exists(file):", "numbers try: int(filename) return True except: return False def is_real_file(filename):", "translations @note: the case of the paths is important! Note", "r r = normcase(r) return r _ZIP_SEARCH_CACHE = {} _NOT_FOUND_SENTINEL", "# related to the names generated... try: try: code =", "filename.__class__ != str: raise AssertionError('Paths passed to _NormPaths must be", "a version of python that has no problems # related", "!= -1: ind += 4 zip_path = file[:ind] inner_path =", "try: return cache[filename] except KeyError: if eclipse_sep != python_sep: #", "at the beginning of this method. cache[filename] = translated return", "except KeyError: # used to translate a path from the", "server_prefix) if DEBUG_CLIENT_SERVER_TRANSLATION: sys.stderr.write('pydev debugger: sent to server: %s\\n' %", "[tuple(x) for x in PATHS_FROM_ECLIPSE_TO_PYTHON] # example: # PATHS_FROM_ECLIPSE_TO_PYTHON =", "DWORD GetLongPathName = ctypes.windll.kernel32.GetLongPathNameW GetLongPathName.argtypes = [LPCWSTR, LPWSTR, DWORD] GetLongPathName.restype", "if DEBUG_CLIENT_SERVER_TRANSLATION: sys.stderr.write( 'pydev debugger: _NormFile changed path (from: %s", "def _fix_path(path, sep): if path.endswith('/') or path.endswith('\\\\'): path = path[:-1]", "filename.lower() else: def normcase(filename): # `normcase` doesn't lower case on", "frame.f_code.co_filename if f is not None and f.startswith (('build/bdist.', 'build\\\\bdist.')):", "real_path except: # Don't fail if there's something not correct", "later NORM_PATHS_CONTAINER[filename] = abs_path, real_path return abs_path, real_path def _NormPath(filename,", "GetShortPathName.argtypes = [LPCWSTR, LPWSTR, DWORD] GetShortPathName.restype = DWORD def _convert_to_long_pathname(filename):", "the tuple is the path in the client machine #", "= os.path.basename exists = os.path.exists join = os.path.join try: rPath", "found in the server) @note: to enable remote debugging in", "to follow the os preferences. for i, (path0, path1) in", "no-op on systems without islink support rPath = os.path.abspath #", "convert_to_short_pathname = lambda filename:filename get_path_with_real_case = lambda filename:filename if sys.platform", "inline the calls) norm_file_to_client = _original_file_to_client norm_file_to_server = _original_file_to_server return", "= translated if DEBUG_CLIENT_SERVER_TRANSLATION: sys.stderr.write( 'pydev debugger: _NormFile changed path", "Unable to find real location for: %s\\n' % (filename,)) abs_path", "real location for: %s\\n' % (filename,)) abs_path = filename real_path", "on). # Note 2: it goes hand-in-hand with 'exists'. inner_path", "ret = get_abs_path_real_path_and_base_from_file(f) # Also cache based on the frame.f_code.co_filename", "path.endswith('/') or path.endswith('\\\\'): path = path[:-1] if sep != '/':", "'pydev debugger: _NormFile changed path (from: %s to %s)\\n' %", "of a file will match the actual file in the", "ret if IS_WINDOWS: if IS_JYTHON: def normcase(filename): return filename.lower() else:", "IDE. filename = filename.replace(python_sep, eclipse_sep) # used to translate a", "f else: abs_path, real_path = _NormPaths(f) base = basename(real_path) ret", "= abs_path, real_path return abs_path, real_path def _NormPath(filename, normpath): r", "jython does not support os.path.realpath # realpath is a no-op", "the paths that'll actually have breakpoints). ''' from _pydevd_bundle.pydevd_constants import", "path for some of the paths in the pythonpath for", "(translated,)) break else: if DEBUG_CLIENT_SERVER_TRANSLATION: sys.stderr.write('pydev debugger: to server: unable", "'r') _ZIP_SEARCH_CACHE[zip_path] = zip_file_obj except: _ZIP_SEARCH_CACHE[zip_path] = _NOT_FOUND_SENTINEL return False", "real_path, base = f, f, f else: abs_path, real_path =", "because the host where the code is running may be", "different now. translated_proper_case = get_path_with_real_case(translated) translated = _NormFile(translated_proper_case) if IS_WINDOWS:", "can be set to True to debug the result of", "to a given scope. def _original_file_to_client(filename, cache={}): try: return cache[filename]", "changed path (from: %s to %s)\\n' % ( translated_proper_case, translated))", "raise AssertionError('Paths passed to _NormPaths must be str. Found: %s", "the 1st element of the tuple is the path in", "f = frame.f_code.co_filename if f is not None and f.startswith", "= os.path.join try: rPath = os.path.realpath # @UndefinedVariable except: #", "Returns tuple of absolute path and real path for given", "= object() def exists(file): if os.path.exists(file): return file ind =", "kept as it was added by @jetbrains, but it should", "% (translated,)) # Note: use the non-normalized version. eclipse_prefix =", "not None: for attr in (\"get_filename\", \"_get_filename\"): meth = getattr(loader,", "be str. Found: %s (%s)' % (filename, type(filename))) abs_path =", "r.find('.egg') if ind != -1: ind += 4 zip_path =", "basename(real_path) ret = abs_path, real_path, base NORM_PATHS_AND_BASE_CONTAINER[f] = ret return", "# (r'd:\\temp\\temp_workspace_2\\test_python\\src\\yyy\\yyy', # r'd:\\temp\\temp_workspace_2\\test_python\\src\\hhh\\xxx') # ] convert_to_long_pathname = lambda filename:filename", "os.environ.get('DEBUG_PYDEVD_PATHS_TRANSLATION', 'False').lower() in ('1', 'true') # Caches filled as requested", "only needs to be set on the target machine for", "is where your python process is running and the client", "inner_path.startswith(\"!\"): # Note (fabioz): although I can replicate this by", "%s\\n' % (translated,)) translated = translated.replace(eclipse_prefix, server_prefix) if DEBUG_CLIENT_SERVER_TRANSLATION: sys.stderr.write('pydev", "sep != '/': path = path.replace('/', sep) return path _last_client_server_paths_set", "machine for the paths that'll actually have breakpoints). ''' from", "return r _ZIP_SEARCH_CACHE = {} _NOT_FOUND_SENTINEL = object() def exists(file):", "Note that this can be tricky to get right when", "= pkgutil.get_loader(mod_name) except: return None if loader is not None:", "case of the paths is important! Note that this can", "NORM_PATHS_AND_BASE_CONTAINER[frame.f_code.co_filename] = ret return ret def get_fullname(mod_name): if IS_PY3K: import", "setup_client_server_paths(paths): '''paths is the same format as PATHS_FROM_ECLIPSE_TO_PYTHON''' global norm_file_to_client", "going to the server, we do the replace first and", "pydevd.py @note: for doing a remote debugging session, all the", "element of the tuple is the path in the client", "the calls) norm_file_to_client = _original_file_to_client norm_file_to_server = _original_file_to_server return #", "isinstance(filename, str): filename = filename.decode(getfilesystemencoding()) rv = GetLongPathName(filename, buf, MAX_PATH)", "process) has the structure /user/projects/my_project/src/package/module1.py and the client has: c:\\my_project\\src\\package\\module1.py", "this would be 'NormFileFromPythonToEclipse' try: return cache[filename] except KeyError: #", "f = f[:-1] elif f.endswith('$py.class'): f = f[:-len('$py.class')] + '.py'", "IS_PY2: filename = filename.encode(getfilesystemencoding()) return filename def _get_path_with_real_case(filename): ret =", "GetLongPathName.argtypes = [LPCWSTR, LPWSTR, DWORD] GetLongPathName.restype = DWORD GetShortPathName =", "_is_int(filename): # isdigit() doesn't support negative numbers try: int(filename) return", "for: %s\\n' % (filename,)) abs_path = filename real_path = filename", "related to the names generated... try: try: code = rPath.func_code", "Caches filled as requested during the debug session. NORM_PATHS_CONTAINER =", "_NormFile here, # only at the beginning of this method.", "python_sep = '\\\\' if IS_WINDOWS else '/' eclipse_sep = '\\\\'", "paths with breakpoints must be translated (otherwise they won't be", "traceback _os_normcase = os.path.normcase basename = os.path.basename exists = os.path.exists", "real_path return abs_path, real_path def _NormPath(filename, normpath): r = normpath(filename)", "break else: if DEBUG_CLIENT_SERVER_TRANSLATION: sys.stderr.write('pydev debugger: to server: unable to", "= file[ind:] if inner_path.startswith(\"!\"): # Note (fabioz): although I can", "We must actually go on and check if we can", "= {} norm_filename_to_client_container = {} initial_paths = list(paths) paths_from_eclipse_to_python =", "initial_norm_paths(filename) if not exists(real_path): # We must actually go on", "import # pydevd_file_utils and then use: # # pydevd_file_utils.norm_file_to_client #", "isinstance(PATHS_FROM_ECLIPSE_TO_PYTHON, list): sys.stderr.write('Expected PATHS_FROM_ECLIPSE_TO_PYTHON loaded from environment variable to be", "True to debug the result of those translations @note: the", "# realpath is a no-op on systems without islink support", "abs_path, real_path = initial_norm_paths(filename) if not exists(real_path): # We must", "to the debug server translated = normcase(filename) for eclipse_prefix, server_prefix", "# .zip! or .egg!, I don't really know what's the", "in paths_from_eclipse_to_python: if translated.startswith(eclipse_prefix): if DEBUG_CLIENT_SERVER_TRANSLATION: sys.stderr.write('pydev debugger: replacing to", "PATHS_FROM_ECLIPSE_TO_PYTHON = json.loads(os.environ.get('PATHS_FROM_ECLIPSE_TO_PYTHON', '[]')) except Exception: sys.stderr.write('Error loading PATHS_FROM_ECLIPSE_TO_PYTHON from", "example: # PATHS_FROM_ECLIPSE_TO_PYTHON = [ # (r'd:\\temp\\temp_workspace_2\\test_python\\src\\yyy\\yyy', # r'd:\\temp\\temp_workspace_2\\test_python\\src\\hhh\\xxx') #", "import IS_PY2, IS_PY3K, DebugInfoHolder, IS_WINDOWS, IS_JYTHON from _pydev_bundle._pydev_filesystem_encoding import getfilesystemencoding", "get_path_with_real_case = _get_path_with_real_case elif IS_JYTHON and IS_WINDOWS: def get_path_with_real_case(filename): from", "translated = translated.replace(eclipse_prefix, server_prefix) if DEBUG_CLIENT_SERVER_TRANSLATION: sys.stderr.write('pydev debugger: sent to", "PATHS_FROM_ECLIPSE_TO_PYTHON from environment variable.\\n') traceback.print_exc() PATHS_FROM_ECLIPSE_TO_PYTHON = [] else: if", "python_sep: translated = translated.replace(python_sep, eclipse_sep) # The resulting path is", "sys.platform == 'win32': try: import ctypes from ctypes.wintypes import MAX_PATH,", "users should always import # pydevd_file_utils and then use: #", "a new normalized copy, just in case # the path", "f is not None and f.startswith (('build/bdist.', 'build\\\\bdist.')): # files", "= inner_path[1:] zip_path = zip_path + '!' zip_file_obj = _ZIP_SEARCH_CACHE.get(zip_path,", "LPWSTR, DWORD GetLongPathName = ctypes.windll.kernel32.GetLongPathNameW GetLongPathName.argtypes = [LPCWSTR, LPWSTR, DWORD]", "%s (%s)' % (filename, type(filename))) abs_path = _NormPath(filename, os.path.abspath) real_path", "<reponame>kirmerzlikin/intellij-community r''' This module provides utilities to get the absolute", "_original_file_to_server = _NormFile norm_file_to_client = _original_file_to_client norm_file_to_server = _original_file_to_server def", "meth(mod_name) return None def get_package_dir(mod_name): for path in sys.path: mod_path", "import MAX_PATH, LPCWSTR, LPWSTR, DWORD GetLongPathName = ctypes.windll.kernel32.GetLongPathNameW GetLongPathName.argtypes =", "The resulting path is not in the python process, so,", "# Note (fabioz): although I can replicate this by creating", "and then obtain a new normalized copy, just in case", "in PATHS_FROM_ECLIPSE_TO_PYTHON] # example: # PATHS_FROM_ECLIPSE_TO_PYTHON = [ # (r'd:\\temp\\temp_workspace_2\\test_python\\src\\yyy\\yyy',", "on the server accessible through the PYTHONPATH (and the PATHS_FROM_ECLIPSE_TO_PYTHON", "# no-op _ide_os = 'WINDOWS' if IS_WINDOWS else 'UNIX' def", "return filename.lower() else: def normcase(filename): return filename # no-op _ide_os", "translated = _NormFile(filename) # After getting the real path, let's", "# Note: use the non-normalized version. eclipse_prefix = initial_paths[i][0] translated", "will match the actual file in the filesystem (otherwise breakpoints", "from java.io import File f = File(filename) ret = f.getCanonicalPath()", "= translated.replace(eclipse_sep, python_sep) translated = _NormFile(translated) cache[filename] = translated return", "from the client to the server). :param os: 'UNIX' or", "enumerate(paths_from_eclipse_to_python[:]): if IS_PY2: if isinstance(path0, unicode): path0 = path0.encode(sys.getfilesystemencoding()) if", "as pkgutil try: loader = pkgutil.get_loader(mod_name) except: return None if", "(and the PATHS_FROM_ECLIPSE_TO_PYTHON only needs to be set on the", "if f is not None and f.startswith (('build/bdist.', 'build\\\\bdist.')): #", "for i, (eclipse_prefix, python_prefix) in enumerate(paths_from_eclipse_to_python): if translated.startswith(python_prefix): if DEBUG_CLIENT_SERVER_TRANSLATION:", "_get_path_with_real_case elif IS_JYTHON and IS_WINDOWS: def get_path_with_real_case(filename): from java.io import", "sys.stderr.write('pydev debugger: Unable to find real location for: %s\\n' %", "the path with # the real case and then obtain", "breakpoints.\\n') sys.stderr.write('pydev debugger: Related bug: http://bugs.python.org/issue1666807\\n') sys.stderr.write('-------------------------------------------------------------------------------\\n') sys.stderr.flush() NORM_SEARCH_CACHE =", "# Eclipse will send the passed filename to be translated", "else: from _pydev_imps import _pydev_pkgutil_old as pkgutil try: loader =", "isinstance(ret, str): return ret.encode(getfilesystemencoding()) return ret if IS_WINDOWS: if IS_JYTHON:", "setup_client_server_paths(PATHS_FROM_ECLIPSE_TO_PYTHON) def _is_int(filename): # isdigit() doesn't support negative numbers try:", "with a version of python that has no problems #", "(translated,)) translated = translated.replace(eclipse_prefix, server_prefix) if DEBUG_CLIENT_SERVER_TRANSLATION: sys.stderr.write('pydev debugger: sent", "= DWORD GetShortPathName = ctypes.windll.kernel32.GetShortPathNameW GetShortPathName.argtypes = [LPCWSTR, LPWSTR, DWORD]", "Converting json lists to tuple PATHS_FROM_ECLIPSE_TO_PYTHON = [tuple(x) for x", "for given filename def _NormPaths(filename): try: return NORM_PATHS_CONTAINER[filename] except KeyError:", "None def get_package_dir(mod_name): for path in sys.path: mod_path = join(path,", "is running and the client is where eclipse is running.", "used on the paths defined in PATHS_FROM_ECLIPSE_TO_PYTHON). @note: all the", "scope. def _original_file_to_client(filename, cache={}): try: return cache[filename] except KeyError: cache[filename]", "\"_get_filename\"): meth = getattr(loader, attr, None) if meth is not", "inner_path = r[ind:] if inner_path.startswith('!'): # Note (fabioz): although I", "def _NormFile(filename): abs_path, real_path = _NormPaths(filename) return real_path def _AbsFile(filename):", "the server machine. # see module docstring for more details.", "translated_proper_case, translated)) for i, (eclipse_prefix, python_prefix) in enumerate(paths_from_eclipse_to_python): if translated.startswith(python_prefix):", "when one machine uses a case-independent filesystem and the other", "# Interpreter shutdown return f ret = get_abs_path_real_path_and_base_from_file(f) # Also", "not isinstance(ret, str): return ret.encode(getfilesystemencoding()) return ret if IS_WINDOWS: if", "file. if eclipse_sep != python_sep: translated = translated.replace(eclipse_sep, python_sep) translated", "eclipse_sep) # The resulting path is not in the python", "{} _NOT_FOUND_SENTINEL = object() def exists(file): if os.path.exists(file): return file", "the pythonpath for path in sys.path: abs_path, real_path = initial_norm_paths(join(path,", "return cache[filename] _original_file_to_server = _NormFile norm_file_to_client = _original_file_to_client norm_file_to_server =", "= _original_file_to_client norm_file_to_server = _original_file_to_server return # only setup translation", "if we're working with a version of python that has", "real_path = _NormPath(filename, rPath) # cache it for fast access", "= (path0, path1) paths_from_eclipse_to_python[i] = (normcase(path0), normcase(path1)) if not paths_from_eclipse_to_python:", "variable from the command line: set PATHS_FROM_ECLIPSE_TO_PYTHON=[['c:\\my_project\\src','/user/projects/my_project/src']] @note: DEBUG_CLIENT_SERVER_TRANSLATION can", "def _norm_file_to_client(filename, cache=norm_filename_to_client_container): # The result of this method will", "# Also cache based on the frame.f_code.co_filename (if we had", "normcase to the existing paths to follow the os preferences.", "eclipse_prefix = initial_paths[i][0] translated = eclipse_prefix + translated_proper_case[len(python_prefix):] if DEBUG_CLIENT_SERVER_TRANSLATION:", "debugger: CRITICAL WARNING: This version of python seems to be", "DWORD GetShortPathName = ctypes.windll.kernel32.GetShortPathNameW GetShortPathName.argtypes = [LPCWSTR, LPWSTR, DWORD] GetShortPathName.restype", "not in the python process, so, we cannot do a", "machine and debugging in another. To do that, the PATHS_FROM_ECLIPSE_TO_PYTHON", "the client has: c:\\my_project\\src\\package\\module1.py the PATHS_FROM_ECLIPSE_TO_PYTHON would have to be:", "# Something didn't quite work out, leave no-op conversions in", "case on Python 2 for non-English locale, but Java #", "won't be hit). - Providing means for the user to", "filename # no-op _ide_os = 'WINDOWS' if IS_WINDOWS else 'UNIX'", "ctypes.wintypes import MAX_PATH, LPCWSTR, LPWSTR, DWORD GetLongPathName = ctypes.windll.kernel32.GetLongPathNameW GetLongPathName.argtypes", "f = f[:-len('$py.class')] + '.py' if not is_real_file(f): abs_path, real_path,", "debugger: to server: unable to find matching prefix for: %s", "is a no-op on systems without islink support rPath =", "def normcase(filename): # `normcase` doesn't lower case on Python 2", "the names generated... try: try: code = rPath.func_code except AttributeError:", "from eggs in Python 2.7 have paths like build/bdist.linux-x86_64/egg/<path-inside-egg> f", "generated... try: try: code = rPath.func_code except AttributeError: code =", "IS_PY2, IS_PY3K, DebugInfoHolder, IS_WINDOWS, IS_JYTHON from _pydev_bundle._pydev_filesystem_encoding import getfilesystemencoding import", "if IS_WINDOWS else 'UNIX' def set_ide_os(os): ''' We need to", "PATHS_FROM_ECLIPSE_TO_PYTHON = [] else: if not isinstance(PATHS_FROM_ECLIPSE_TO_PYTHON, list): sys.stderr.write('Expected PATHS_FROM_ECLIPSE_TO_PYTHON", "is_real_file(filename): # Check for Jupyter cells return not _is_int(filename) and", "path = path.replace('/', sep) return path _last_client_server_paths_set = [] def", "(\"get_filename\", \"_get_filename\"): meth = getattr(loader, attr, None) if meth is", "can be set with an environment variable from the command", "inside build/bdist it can make a difference). NORM_PATHS_AND_BASE_CONTAINER[frame.f_code.co_filename] = ret", "test to see if we're working with a version of", "translated = _NormFile(translated) cache[filename] = translated return translated def _norm_file_to_client(filename,", "remote debugging in the target machine (pydev extensions in the", "inner_path[1:] zip_path = zip_path + '!' if inner_path.startswith('/') or inner_path.startswith('\\\\'):", "in the eclipse installation) import pydevd;pydevd.settrace(host, stdoutToServer, stderrToServer, port, suspend)", "_ide_os prev = _ide_os if os == 'WIN': # Apparently", "None return None # Now, let's do a quick test", "Note: as these functions may be rebound, users should always", "inner_path[1:] zip_path = zip_path + '!' zip_file_obj = _ZIP_SEARCH_CACHE.get(zip_path, _NOT_FOUND_SENTINEL)", "must be str. Found: %s (%s)' % (filename, type(filename))) abs_path", "normcase(filename) for eclipse_prefix, server_prefix in paths_from_eclipse_to_python: if translated.startswith(eclipse_prefix): if DEBUG_CLIENT_SERVER_TRANSLATION:", "%s)\\n' % ( translated_proper_case, translated)) for i, (eclipse_prefix, python_prefix) in", "not None: if f.endswith('.pyc'): f = f[:-1] elif f.endswith('$py.class'): f", "in another. To do that, the PATHS_FROM_ECLIPSE_TO_PYTHON constant must be", "and isinstance(filename, str): filename = filename.decode(getfilesystemencoding()) rv = GetLongPathName(filename, buf,", "# Note 2: it goes hand-in-hand with 'exists'. inner_path =", "sure the drive letter is always uppercase. if len(ret) >", "= inner_path[1:] if inner_path: r = join(normcase(zip_path), inner_path) return r", "the client to the debug server translated = normcase(filename) for", "is not None: for attr in (\"get_filename\", \"_get_filename\"): meth =", "file ind = file.find('.zip') if ind == -1: ind =", "ret def get_fullname(mod_name): if IS_PY3K: import pkgutil else: from _pydev_imps", "_AbsFile(filename): abs_path, real_path = _NormPaths(filename) return abs_path # Returns tuple", "else: def normcase(filename): return filename # no-op _ide_os = 'WINDOWS'", "of this method. cache[filename] = translated return translated norm_file_to_server =", "_NormPath(filename, rPath) # cache it for fast access later NORM_PATHS_CONTAINER[filename]", "abs_path, real_path except: # Don't fail if there's something not", "= _NormFile(translated_proper_case) if IS_WINDOWS: if translated.lower() != translated_proper_case.lower(): translated_proper_case =", "does it, so we should do it manually. if '~'", "(%s)' % (filename, type(filename))) abs_path = _NormPath(filename, os.path.abspath) real_path =", "def _is_int(filename): # isdigit() doesn't support negative numbers try: int(filename)", "= [LPCWSTR, LPWSTR, DWORD] GetLongPathName.restype = DWORD GetShortPathName = ctypes.windll.kernel32.GetShortPathNameW", "'WIN': # Apparently PyCharm uses 'WIN' (https://github.com/fabioz/PyDev.Debugger/issues/116) os = 'WINDOWS'", "just internal (so, does not need any kind of client-server", "The case of a file will match the actual file", "'!' if inner_path.startswith('/') or inner_path.startswith('\\\\'): inner_path = inner_path[1:] if inner_path:", "return abs_path, real_path except: # Don't fail if there's something", "the PYTHONPATH (and the PATHS_FROM_ECLIPSE_TO_PYTHON only needs to be set", "with paths that may be incorrect try: return NORM_SEARCH_CACHE[filename] except", "= os.path.realpath # @UndefinedVariable except: # jython does not support", "server) @note: to enable remote debugging in the target machine", "= [LPCWSTR, LPWSTR, DWORD] GetShortPathName.restype = DWORD def _convert_to_long_pathname(filename): buf", "IS_PY2 and not isinstance(ret, str): return ret.encode(getfilesystemencoding()) return ret if", "file[ind:] if inner_path.startswith(\"!\"): # Note (fabioz): although I can replicate", "is None: return False elif zip_file_obj is _NOT_FOUND_SENTINEL: try: import", "path with # the real case and then obtain a", "in filename: filename = convert_to_long_pathname(filename) filename = _os_normcase(filename) return filename.lower()", "file will match the actual file in the filesystem (otherwise", "(so, does not need any kind of client-server translation) f", "or .egg!, I don't really know what's the real-world case", "''' from _pydevd_bundle.pydevd_constants import IS_PY2, IS_PY3K, DebugInfoHolder, IS_WINDOWS, IS_JYTHON from", "global _ide_os prev = _ide_os if os == 'WIN': #", "on the frame.f_code.co_filename (if we had it inside build/bdist it", "if ind == -1: ind = r.find('.egg') if ind !=", "GetLongPathName(filename, buf, MAX_PATH) if rv != 0 and rv <=", "if DEBUG_CLIENT_SERVER_TRANSLATION: sys.stderr.write('pydev debugger: to server: unable to find matching", "2: traceback.print_exc() else: convert_to_long_pathname = _convert_to_long_pathname convert_to_short_pathname = _convert_to_short_pathname get_path_with_real_case", "use: # # pydevd_file_utils.norm_file_to_client # pydevd_file_utils.norm_file_to_server # # instead of", "'.py' if not is_real_file(f): abs_path, real_path, base = f, f,", "from environment variable to be a list.\\n') PATHS_FROM_ECLIPSE_TO_PYTHON = []", "debug server translated = normcase(filename) for eclipse_prefix, server_prefix in paths_from_eclipse_to_python:", "'!' zip_file_obj = _ZIP_SEARCH_CACHE.get(zip_path, _NOT_FOUND_SENTINEL) if zip_file_obj is None: return", "filename)) if exists(real_path): break else: sys.stderr.write('pydev debugger: Unable to find", "in paths_from_eclipse_to_python])) translated = translated_proper_case if eclipse_sep != python_sep: translated", "import sys import traceback _os_normcase = os.path.normcase basename = os.path.basename", "in the client machine # and the 2nd element is", "Interpreter shutdown return f if f is not None: if", "filename = _os_normcase(filename) return filename.lower() else: def normcase(filename): return filename", "tuple of absolute path and real path for given filename", "first and only later do the norm file. if eclipse_sep", "client to the server). :param os: 'UNIX' or 'WINDOWS' '''", "inner_path[1:] if inner_path: r = join(normcase(zip_path), inner_path) return r r", "= _get_path_with_real_case elif IS_JYTHON and IS_WINDOWS: def get_path_with_real_case(filename): from java.io", "different from the client (and the point is that we", "in %s\\n' % \\ (translated, [x[0] for x in paths_from_eclipse_to_python]))", "is not None and f.startswith (('build/bdist.', 'build\\\\bdist.')): # files from", "correct here -- but at least print it to the", "a difference). NORM_PATHS_AND_BASE_CONTAINER[frame.f_code.co_filename] = ret return ret def get_fullname(mod_name): if", "it can make a difference). NORM_PATHS_AND_BASE_CONTAINER[frame.f_code.co_filename] = ret return ret", "find real location for: %s\\n' % (filename,)) abs_path = filename", "cache[filename] except KeyError: if eclipse_sep != python_sep: # Make sure", "None and f.startswith (('build/bdist.', 'build\\\\bdist.')): # files from eggs in", "_fix_path(path1, python_sep) initial_paths[i] = (path0, path1) paths_from_eclipse_to_python[i] = (normcase(path0), normcase(path1))", "translated = translated_proper_case if eclipse_sep != python_sep: translated = translated.replace(python_sep,", "the user to make path conversions when doing a remote", "this method. cache[filename] = translated return translated norm_file_to_server = _norm_file_to_server", "a case-independent filesystem and the other uses a case-dependent filesystem", "paths like build/bdist.linux-x86_64/egg/<path-inside-egg> f = frame.f_globals['__file__'] if get_abs_path_real_path_and_base_from_file is None:", "will be passed to eclipse # So, this would be", "(if the system being debugged is case-independent, 'normcase()' should be", "# So, this would be 'NormFileFromPythonToEclipse' try: return cache[filename] except", "translated_proper_case = get_path_with_real_case(translated) translated = _NormFile(translated_proper_case) if IS_WINDOWS: if translated.lower()", "from _pydev_imps import _pydev_pkgutil_old as pkgutil try: loader = pkgutil.get_loader(mod_name)", "in the filesystem (otherwise breakpoints won't be hit). - Providing", "of tuples where the 1st element of the tuple is", "convert_to_long_pathname = lambda filename:filename convert_to_short_pathname = lambda filename:filename get_path_with_real_case =", "has the structure /user/projects/my_project/src/package/module1.py and the client has: c:\\my_project\\src\\package\\module1.py the", "try: rPath = os.path.realpath # @UndefinedVariable except: # jython does", "works _get_path_with_real_case(__file__) except: # Something didn't quite work out, leave", "method. cache[filename] = translated return translated norm_file_to_server = _norm_file_to_server norm_file_to_client", "client <-> server translation works to provide proper separators. setup_client_server_paths(_last_client_server_paths_set)", "try: return NORM_PATHS_AND_BASE_CONTAINER[f] except: if _NormPaths is None: # Interpreter", "can find it as if it was a relative path", "we do the replace first and only later do the", "ctypes.windll.kernel32.GetLongPathNameW GetLongPathName.argtypes = [LPCWSTR, LPWSTR, DWORD] GetLongPathName.restype = DWORD GetShortPathName", "def get_fullname(mod_name): if IS_PY3K: import pkgutil else: from _pydev_imps import", "its absolute path, real path and base name def get_abs_path_real_path_and_base_from_file(f):", "debugging in the target machine (pydev extensions in the eclipse", "= buf.value if IS_PY2: filename = filename.encode(getfilesystemencoding()) return filename def", "hand-in-hand with '_NormPath'. inner_path = inner_path[1:] zip_path = zip_path +", "Check for Jupyter cells return not _is_int(filename) and not filename.startswith(\"<ipython-input\")", "ind = r.find('.zip') if ind == -1: ind = r.find('.egg')", "to server: unable to find matching prefix for: %s in", "the debug server translated = normcase(filename) for eclipse_prefix, server_prefix in", "(('build/bdist.', 'build\\\\bdist.')): # files from eggs in Python 2.7 have", "translated.replace(python_sep, eclipse_sep) # The resulting path is not in the", "these functions may be rebound, users should always import #", "initial_paths[i][0] translated = eclipse_prefix + translated_proper_case[len(python_prefix):] if DEBUG_CLIENT_SERVER_TRANSLATION: sys.stderr.write('pydev debugger:", "_NormPaths(filename): try: return NORM_PATHS_CONTAINER[filename] except KeyError: if filename.__class__ != str:", "constant must be filled with the appropriate paths. @note: in", ":param os: 'UNIX' or 'WINDOWS' ''' global _ide_os prev =", "let's do a quick test to see if we're working", "buf.value if IS_PY2: filename = filename.encode(getfilesystemencoding()) return filename def _convert_to_short_pathname(filename):", "translated.startswith(eclipse_prefix): if DEBUG_CLIENT_SERVER_TRANSLATION: sys.stderr.write('pydev debugger: replacing to server: %s\\n' %", "get_path_with_real_case(_AbsFile(filename)) return cache[filename] _original_file_to_server = _NormFile norm_file_to_client = _original_file_to_client norm_file_to_server", "# @UndefinedVariable except: # jython does not support os.path.realpath #", "ret return ret def get_fullname(mod_name): if IS_PY3K: import pkgutil else:", "if inner_path.startswith(\"!\"): # Note (fabioz): although I can replicate this", "if f is not None: if f.endswith('.pyc'): f = f[:-1]", "to get right when one machine uses a case-independent filesystem", "GetShortPathName.restype = DWORD def _convert_to_long_pathname(filename): buf = ctypes.create_unicode_buffer(MAX_PATH) if IS_PY2", "# side does it, so we should do it manually.", "os.path.basename exists = os.path.exists join = os.path.join try: rPath =", "expect from the IDE. filename = filename.replace(python_sep, eclipse_sep) # used", "_NormPath(filename, normpath): r = normpath(filename) ind = r.find('.zip') if ind", "module docstring for more details. try: PATHS_FROM_ECLIPSE_TO_PYTHON = json.loads(os.environ.get('PATHS_FROM_ECLIPSE_TO_PYTHON', '[]'))", "doing a remote debugging session in one machine and debugging", "real-world case for this # (still kept as it was", "KeyError: # used to translate a path from the debug", "False try: if inner_path.startswith('/') or inner_path.startswith('\\\\'): inner_path = inner_path[1:] _info", "python seems to be incorrectly compiled (internal generated filenames are", "client: unable to find matching prefix for: %s in %s\\n'", "basename = os.path.basename exists = os.path.exists join = os.path.join try:", "# This doesn't handle the drive letter properly (it'll be", "no translation step needed (just inline the calls) norm_file_to_client =", "case # the path is different now. translated_proper_case = get_path_with_real_case(translated)", "os.path.abspath) real_path = _NormPath(filename, rPath) # cache it for fast", "DEBUG_CLIENT_SERVER_TRANSLATION: sys.stderr.write('pydev debugger: replacing to server: %s\\n' % (translated,)) translated", "conversions when doing a remote debugging session in one machine", "cache=norm_filename_to_client_container): # The result of this method will be passed", "incorrectly compiled (internal generated filenames are not absolute)\\n') sys.stderr.write('pydev debugger:", "client (and the point is that we want the proper", "None: return False elif zip_file_obj is _NOT_FOUND_SENTINEL: try: import zipfile", "machine uses a case-independent filesystem and the other uses a", "= _original_file_to_server def _fix_path(path, sep): if path.endswith('/') or path.endswith('\\\\'): path", "do it manually. if '~' in filename: filename = convert_to_long_pathname(filename)", "to find real location for: %s\\n' % (filename,)) abs_path =", "= os.path.exists join = os.path.join try: rPath = os.path.realpath #", "except: _ZIP_SEARCH_CACHE[zip_path] = _NOT_FOUND_SENTINEL return False try: if inner_path.startswith('/') or", "norm_file_to_server = _original_file_to_server return # only setup translation functions if", "eclipse_sep) # used to translate a path from the client", "else: if DEBUG_CLIENT_SERVER_TRANSLATION: sys.stderr.write('pydev debugger: to server: unable to find", "norm_filename_to_client_container = {} initial_paths = list(paths) paths_from_eclipse_to_python = initial_paths[:] #", "being debugged is case-independent, 'normcase()' should be used on the", "environment variable.\\n') traceback.print_exc() PATHS_FROM_ECLIPSE_TO_PYTHON = [] else: if not isinstance(PATHS_FROM_ECLIPSE_TO_PYTHON,", "'win32': try: import ctypes from ctypes.wintypes import MAX_PATH, LPCWSTR, LPWSTR,", "%s\\n' % \\ (translated, [x[0] for x in paths_from_eclipse_to_python])) #", "the real case and then obtain a new normalized copy,", "(translated,)) # Note: use the non-normalized version. eclipse_prefix = initial_paths[i][0]", "build/bdist.linux-x86_64/egg/<path-inside-egg> f = frame.f_globals['__file__'] if get_abs_path_real_path_and_base_from_file is None: # Interpreter", "slower and may miss breakpoints.\\n') sys.stderr.write('pydev debugger: Related bug: http://bugs.python.org/issue1666807\\n')", "_original_file_to_server return # only setup translation functions if absolutely needed!", "on Python 2 for non-English locale, but Java # side", "int(filename) return True except: return False def is_real_file(filename): # Check", "path, real path and base name def get_abs_path_real_path_and_base_from_file(f): try: return", "proper paths to translate from the client to the server).", "user to make path conversions when doing a remote debugging", "defined in PATHS_FROM_ECLIPSE_TO_PYTHON). @note: all the paths with breakpoints must", "parameter docs on pydevd.py @note: for doing a remote debugging", "print it to the user so that we can correct", "IS_WINDOWS else '/' eclipse_sep = '\\\\' if _ide_os == 'WINDOWS'", "# Make sure that the separators are what we expect", "locale, but Java # side does it, so we should", "the target machine (pydev extensions in the eclipse installation) import", "translated = normcase(filename) for eclipse_prefix, server_prefix in paths_from_eclipse_to_python: if translated.startswith(eclipse_prefix):", "code = rPath.func_code except AttributeError: code = rPath.__code__ if not", "letter is always uppercase. if len(ret) > 1 and ret[1]", "if it was a relative path for some of the", "'/' eclipse_sep = '\\\\' if _ide_os == 'WINDOWS' else '/'", "goes hand-in-hand with '_NormPath'. inner_path = inner_path[1:] zip_path = zip_path", "ind += 4 zip_path = r[:ind] inner_path = r[ind:] if", "= _ZIP_SEARCH_CACHE.get(zip_path, _NOT_FOUND_SENTINEL) if zip_file_obj is None: return False elif", "_NOT_FOUND_SENTINEL return False try: if inner_path.startswith('/') or inner_path.startswith('\\\\'): inner_path =", "json lists to tuple PATHS_FROM_ECLIPSE_TO_PYTHON = [tuple(x) for x in", "% \\ (translated, [x[1] for x in paths_from_eclipse_to_python])) translated =", "where eclipse is running. E.g.: If the server (your python", "{} def _NormFile(filename): abs_path, real_path = _NormPaths(filename) return real_path def", "else '/' eclipse_sep = '\\\\' if _ide_os == 'WINDOWS' else", "except Exception: sys.stderr.write('Error loading PATHS_FROM_ECLIPSE_TO_PYTHON from environment variable.\\n') traceback.print_exc() PATHS_FROM_ECLIPSE_TO_PYTHON", "least print it to the user so that we can", "that, the PATHS_FROM_ECLIPSE_TO_PYTHON constant must be filled with the appropriate", "paths_from_eclipse_to_python: # no translation step needed (just inline the calls)", "_last_client_server_paths_set = paths[:] # Work on the client and server", "server, we do the replace first and only later do", "in ('WINDOWS', 'UNIX') if prev != os: _ide_os = os", "return cache[filename] except KeyError: cache[filename] = get_path_with_real_case(_AbsFile(filename)) return cache[filename] _original_file_to_server", "global norm_file_to_server global _last_client_server_paths_set _last_client_server_paths_set = paths[:] # Work on", "rv = GetLongPathName(filename, buf, MAX_PATH) if rv != 0 and", "access later NORM_PATHS_CONTAINER[filename] = abs_path, real_path return abs_path, real_path def", "os == 'WIN': # Apparently PyCharm uses 'WIN' (https://github.com/fabioz/PyDev.Debugger/issues/116) os", "exists(file): if os.path.exists(file): return file ind = file.find('.zip') if ind", "= lambda filename:filename convert_to_short_pathname = lambda filename:filename get_path_with_real_case = lambda", "as it was added by @jetbrains, but it should probably", "has no problems # related to the names generated... try:", "def _original_file_to_client(filename, cache={}): try: return cache[filename] except KeyError: cache[filename] =", "beginning of this method. cache[filename] = translated return translated norm_file_to_server", "files from eggs in Python 2.7 have paths like build/bdist.linux-x86_64/egg/<path-inside-egg>", "# Note 2: it goes hand-in-hand with '_NormPath'. inner_path =", "'true') # Caches filled as requested during the debug session.", "get_abs_path_real_path_and_base_from_frame(frame): try: return NORM_PATHS_AND_BASE_CONTAINER[frame.f_code.co_filename] except: # This one is just", "non-English locale, but Java # side does it, so we", "except: if _NormPaths is None: # Interpreter shutdown return f", "later do the norm file. if eclipse_sep != python_sep: translated", "!= '/': path = path.replace('/', sep) return path _last_client_server_paths_set =", "will work slower and may miss breakpoints.\\n') sys.stderr.write('pydev debugger: Related", "server is where your python process is running and the", "(translated, [x[1] for x in paths_from_eclipse_to_python])) translated = translated_proper_case if", "(just inline the calls) norm_file_to_client = _original_file_to_client norm_file_to_server = _original_file_to_server", "base NORM_PATHS_AND_BASE_CONTAINER[f] = ret return ret def get_abs_path_real_path_and_base_from_frame(frame): try: return", "for non-English locale, but Java # side does it, so", "in PATHS_FROM_ECLIPSE_TO_PYTHON). @note: all the paths with breakpoints must be", "# ] convert_to_long_pathname = lambda filename:filename convert_to_short_pathname = lambda filename:filename", "_norm_file_to_server norm_file_to_client = _norm_file_to_client setup_client_server_paths(PATHS_FROM_ECLIPSE_TO_PYTHON) def _is_int(filename): # isdigit() doesn't", "normcase(path1)) if not paths_from_eclipse_to_python: # no translation step needed (just", "to be set on the target machine for the paths", "the passed filename to be translated to the python process", "java.io import File f = File(filename) ret = f.getCanonicalPath() if", "if IS_JYTHON: def normcase(filename): return filename.lower() else: def normcase(filename): #", "to enable remote debugging in the target machine (pydev extensions", "some of the paths in the pythonpath for path in", "'[]')) except Exception: sys.stderr.write('Error loading PATHS_FROM_ECLIPSE_TO_PYTHON from environment variable.\\n') traceback.print_exc()", "the PATHS_FROM_ECLIPSE_TO_PYTHON would have to be: PATHS_FROM_ECLIPSE_TO_PYTHON = [(r'c:\\my_project\\src', r'/user/projects/my_project/src')]", "was a relative path for some of the paths in", "correct that traceback.print_exc() # Note: as these functions may be", "the command line: set PATHS_FROM_ECLIPSE_TO_PYTHON=[['c:\\my_project\\src','/user/projects/my_project/src']] @note: DEBUG_CLIENT_SERVER_TRANSLATION can be set", "it goes hand-in-hand with '_NormPath'. inner_path = inner_path[1:] zip_path =", "path in the client machine # and the 2nd element", "with an environment variable from the command line: set PATHS_FROM_ECLIPSE_TO_PYTHON=[['c:\\my_project\\src','/user/projects/my_project/src']]", "paths_from_eclipse_to_python[i] = (normcase(path0), normcase(path1)) if not paths_from_eclipse_to_python: # no translation", "2.7 have paths like build/bdist.linux-x86_64/egg/<path-inside-egg> f = frame.f_globals['__file__'] if get_abs_path_real_path_and_base_from_file", "f if f is not None: if f.endswith('.pyc'): f =", "the replace first and only later do the norm file.", "case for this # (still kept as it was added", "your python process is running and the client is where", "_ide_os if os == 'WIN': # Apparently PyCharm uses 'WIN'", "server slashes. python_sep = '\\\\' if IS_WINDOWS else '/' eclipse_sep", "if f.endswith('.pyc'): f = f[:-1] elif f.endswith('$py.class'): f = f[:-len('$py.class')]", "new normalized copy, just in case # the path is", "# r'd:\\temp\\temp_workspace_2\\test_python\\src\\hhh\\xxx') # ] convert_to_long_pathname = lambda filename:filename convert_to_short_pathname =", "Interpreter shutdown return f ret = get_abs_path_real_path_and_base_from_file(f) # Also cache", "real path, let's get it with the path with #", "working with a version of python that has no problems", "cache[filename] except KeyError: cache[filename] = get_path_with_real_case(_AbsFile(filename)) return cache[filename] _original_file_to_server =", "cache={}): try: return cache[filename] except KeyError: cache[filename] = get_path_with_real_case(_AbsFile(filename)) return", "i, (eclipse_prefix, python_prefix) in enumerate(paths_from_eclipse_to_python): if translated.startswith(python_prefix): if DEBUG_CLIENT_SERVER_TRANSLATION: sys.stderr.write('pydev", "return ret.encode(getfilesystemencoding()) return ret if IS_WINDOWS: if IS_JYTHON: def normcase(filename):", "rv != 0 and rv <= MAX_PATH: filename = buf.value", "get_abs_path_real_path_and_base_from_file(f) # Also cache based on the frame.f_code.co_filename (if we", "Exception: sys.stderr.write('Error loading PATHS_FROM_ECLIPSE_TO_PYTHON from environment variable.\\n') traceback.print_exc() PATHS_FROM_ECLIPSE_TO_PYTHON =", "the server accessible through the PYTHONPATH (and the PATHS_FROM_ECLIPSE_TO_PYTHON only", "point is that we want the proper paths to translate", "it to the user so that we can correct that", "try: PATHS_FROM_ECLIPSE_TO_PYTHON = json.loads(os.environ.get('PATHS_FROM_ECLIPSE_TO_PYTHON', '[]')) except Exception: sys.stderr.write('Error loading PATHS_FROM_ECLIPSE_TO_PYTHON", "_NormFile norm_file_to_client = _original_file_to_client norm_file_to_server = _original_file_to_server def _fix_path(path, sep):", "given scope. def _original_file_to_client(filename, cache={}): try: return cache[filename] except KeyError:", "filename def _convert_to_short_pathname(filename): buf = ctypes.create_unicode_buffer(MAX_PATH) if IS_PY2 and isinstance(filename,", "== 'WIN': # Apparently PyCharm uses 'WIN' (https://github.com/fabioz/PyDev.Debugger/issues/116) os =", "i, (path0, path1) in enumerate(paths_from_eclipse_to_python[:]): if IS_PY2: if isinstance(path0, unicode):", "translated = translated.replace(python_sep, eclipse_sep) # The resulting path is not", "ret[0].upper() + ret[1:] return ret # Check that it actually", "remote debugging session, all the pydevd_ files must be on", "== 'win32': try: import ctypes from ctypes.wintypes import MAX_PATH, LPCWSTR,", "path from the debug server to the client translated =", "be passed to eclipse # So, this would be 'NormFileFromPythonToEclipse'", "IDE os because the host where the code is running", ".zip! or .egg!, I don't really know what's the real-world", "norm_filename_to_server_container = {} norm_filename_to_client_container = {} initial_paths = list(paths) paths_from_eclipse_to_python", "to client: unable to find matching prefix for: %s in", "an environment variable from the command line: set PATHS_FROM_ECLIPSE_TO_PYTHON=[['c:\\my_project\\src','/user/projects/my_project/src']] @note:", "filenames are not absolute)\\n') sys.stderr.write('pydev debugger: The debugger may still", "side does it, so we should do it manually. if", "sys.stderr.write('pydev debugger: replacing to client: %s\\n' % (translated,)) # Note:", "_NormFile(translated_proper_case) if IS_WINDOWS: if translated.lower() != translated_proper_case.lower(): translated_proper_case = translated", "accessible through the PYTHONPATH (and the PATHS_FROM_ECLIPSE_TO_PYTHON only needs to", "list(paths) paths_from_eclipse_to_python = initial_paths[:] # Apply normcase to the existing", "# Don't fail if there's something not correct here --", "= paths[:] # Work on the client and server slashes.", "that it actually works _get_path_with_real_case(__file__) except: # Something didn't quite", "that we can correct that traceback.print_exc() # Note: as these", "PATHS_FROM_ECLIPSE_TO_PYTHON only needs to be set on the target machine", "True except: return False def is_real_file(filename): # Check for Jupyter", "if DebugInfoHolder.DEBUG_TRACE_LEVEL > 2: traceback.print_exc() else: convert_to_long_pathname = _convert_to_long_pathname convert_to_short_pathname", "support os.path.realpath # realpath is a no-op on systems without", "if IS_PY2 and not isinstance(ret, str): return ret.encode(getfilesystemencoding()) return ret", "stdoutToServer, stderrToServer, port, suspend) see parameter docs on pydevd.py @note:", "@jetbrains, but it should probably be reviewed # later on).", "!= python_sep: translated = translated.replace(python_sep, eclipse_sep) # The resulting path", "support negative numbers try: int(filename) return True except: return False", "= '\\\\' if _ide_os == 'WINDOWS' else '/' norm_filename_to_server_container =", "_ZIP_SEARCH_CACHE.get(zip_path, _NOT_FOUND_SENTINEL) if zip_file_obj is None: return False elif zip_file_obj", "(translated,)) break else: if DEBUG_CLIENT_SERVER_TRANSLATION: sys.stderr.write('pydev debugger: to client: unable", "== -1: ind = r.find('.egg') if ind != -1: ind", "@note: all the paths with breakpoints must be translated (otherwise", "the drive letter is always uppercase. if len(ret) > 1", "filename.lower() else: def normcase(filename): return filename # no-op _ide_os =", "% (translated,)) break else: if DEBUG_CLIENT_SERVER_TRANSLATION: sys.stderr.write('pydev debugger: to server:", "('1', 'true') # Caches filled as requested during the debug", "buf = ctypes.create_unicode_buffer(MAX_PATH) if IS_PY2 and isinstance(filename, str): filename =", "% (filename,)) abs_path = filename real_path = filename NORM_SEARCH_CACHE[filename] =", "do the norm file. if eclipse_sep != python_sep: translated =", "traceback.print_exc() PATHS_FROM_ECLIPSE_TO_PYTHON = [] else: if not isinstance(PATHS_FROM_ECLIPSE_TO_PYTHON, list): sys.stderr.write('Expected", "PATHS_FROM_ECLIPSE_TO_PYTHON loaded from environment variable to be a list.\\n') PATHS_FROM_ECLIPSE_TO_PYTHON", "norm_file_to_client = _original_file_to_client norm_file_to_server = _original_file_to_server def _fix_path(path, sep): if", "frame.f_code.co_filename (if we had it inside build/bdist it can make", "for x in PATHS_FROM_ECLIPSE_TO_PYTHON] # example: # PATHS_FROM_ECLIPSE_TO_PYTHON = [", "= path[:-1] if sep != '/': path = path.replace('/', sep)", "if not is_real_file(f): abs_path, real_path, base = f, f, f", "Python 2.7 have paths like build/bdist.linux-x86_64/egg/<path-inside-egg> f = frame.f_globals['__file__'] if", "This module provides utilities to get the absolute filenames so", "paths_from_eclipse_to_python])) # Note that when going to the server, we", "the client translated = _NormFile(filename) # After getting the real", "to set the IDE os because the host where the", "sys.stderr.write('Expected PATHS_FROM_ECLIPSE_TO_PYTHON loaded from environment variable to be a list.\\n')", "debugger: replacing to client: %s\\n' % (translated,)) # Note: use", "should do it manually. if '~' in filename: filename =", "return None def get_package_dir(mod_name): for path in sys.path: mod_path =", "same format as PATHS_FROM_ECLIPSE_TO_PYTHON''' global norm_file_to_client global norm_file_to_server global _last_client_server_paths_set", "sys.stderr.write('pydev debugger: to client: unable to find matching prefix for:", "utilities to get the absolute filenames so that we can", "AssertionError('Paths passed to _NormPaths must be str. Found: %s (%s)'", "abs_path = _NormPath(filename, os.path.abspath) real_path = _NormPath(filename, rPath) # cache", "that has no problems # related to the names generated...", "exists(_NormFile(code.co_filename)): sys.stderr.write('-------------------------------------------------------------------------------\\n') sys.stderr.write('pydev debugger: CRITICAL WARNING: This version of python", "real case and then obtain a new normalized copy, just", "provides utilities to get the absolute filenames so that we", "isinstance(filename, str): filename = filename.decode(getfilesystemencoding()) rv = GetShortPathName(filename, buf, MAX_PATH)", "sep) return path _last_client_server_paths_set = [] def setup_client_server_paths(paths): '''paths is", "real_path = filename NORM_SEARCH_CACHE[filename] = abs_path, real_path return abs_path, real_path", "path for given filename def _NormPaths(filename): try: return NORM_PATHS_CONTAINER[filename] except", "in one machine and debugging in another. To do that,", "return False def is_real_file(filename): # Check for Jupyter cells return", "to tuple PATHS_FROM_ECLIPSE_TO_PYTHON = [tuple(x) for x in PATHS_FROM_ECLIPSE_TO_PYTHON] #", "= ctypes.create_unicode_buffer(MAX_PATH) if IS_PY2 and isinstance(filename, str): filename = filename.decode(getfilesystemencoding())", "non-normalized version. eclipse_prefix = initial_paths[i][0] translated = eclipse_prefix + translated_proper_case[len(python_prefix):]", "MAX_PATH) if rv != 0 and rv <= MAX_PATH: filename", "sent to client: %s\\n' % (translated,)) break else: if DEBUG_CLIENT_SERVER_TRANSLATION:", "it was a relative path for some of the paths", "pythonpath for path in sys.path: abs_path, real_path = initial_norm_paths(join(path, filename))", "!= 0 and rv <= MAX_PATH: filename = buf.value if", "Providing means for the user to make path conversions when", "a remote debugging session, all the pydevd_ files must be", "# For given file f returns tuple of its absolute", "be translated (otherwise they won't be found in the server)", "# Returns tuple of absolute path and real path for", "_NormPaths(filename) return real_path def _AbsFile(filename): abs_path, real_path = _NormPaths(filename) return", "the real path, let's get it with the path with", "= {} initial_norm_paths = _NormPaths def _NormPaths(filename): # Let's redefine", "to see if we're working with a version of python", "ret[1:] return ret # Check that it actually works _get_path_with_real_case(__file__)", "Let's redefine _NormPaths to work with paths that may be", "except: # This one is just internal (so, does not", "another. To do that, the PATHS_FROM_ECLIPSE_TO_PYTHON constant must be filled", "setup translation functions if absolutely needed! def _norm_file_to_server(filename, cache=norm_filename_to_server_container): #", "not None and f.startswith (('build/bdist.', 'build\\\\bdist.')): # files from eggs", "convert_to_long_pathname(convert_to_short_pathname(filename)) # This doesn't handle the drive letter properly (it'll", "inner_path = inner_path[1:] zip_path = zip_path + '!' zip_file_obj =", "paths to follow the os preferences. for i, (path0, path1)", "be sure that: - The case of a file will", "be set on the target machine for the paths that'll", "element is the path in the server machine. # see", "_original_file_to_client norm_file_to_server = _original_file_to_server def _fix_path(path, sep): if path.endswith('/') or", "paths is important! Note that this can be tricky to", "translated = eclipse_prefix + translated_proper_case[len(python_prefix):] if DEBUG_CLIENT_SERVER_TRANSLATION: sys.stderr.write('pydev debugger: sent", "zip_file_obj is None: return False elif zip_file_obj is _NOT_FOUND_SENTINEL: try:", "getattr(loader, attr, None) if meth is not None: return meth(mod_name)", "was added by @jetbrains, but it should probably be reviewed", "is None: # Interpreter shutdown return f if f is", "PATHS_FROM_ECLIPSE_TO_PYTHON = [(r'c:\\my_project\\src', r'/user/projects/my_project/src')] alternatively, this can be set with", "the client to the server). :param os: 'UNIX' or 'WINDOWS'", "cells return not _is_int(filename) and not filename.startswith(\"<ipython-input\") # For given", "eclipse is running. E.g.: If the server (your python process)", "in place. if DebugInfoHolder.DEBUG_TRACE_LEVEL > 2: traceback.print_exc() else: convert_to_long_pathname =", "and not filename.startswith(\"<ipython-input\") # For given file f returns tuple", "the debug server to the client translated = _NormFile(filename) #", "filename = filename.replace(python_sep, eclipse_sep) # used to translate a path", "the norm file. if eclipse_sep != python_sep: translated = translated.replace(eclipse_sep,", "if eclipse_sep != python_sep: # Make sure that the separators", "a no-op on systems without islink support rPath = os.path.abspath", "return file ind = file.find('.zip') if ind == -1: ind", "Python 2 for non-English locale, but Java # side does", "= path0.encode(sys.getfilesystemencoding()) if isinstance(path1, unicode): path1 = path1.encode(sys.getfilesystemencoding()) path0 =", "= _original_file_to_client norm_file_to_server = _original_file_to_server def _fix_path(path, sep): if path.endswith('/')", "sys.stderr.write('-------------------------------------------------------------------------------\\n') sys.stderr.flush() NORM_SEARCH_CACHE = {} initial_norm_paths = _NormPaths def _NormPaths(filename):", "(fabioz): although I can replicate this by creating a file", "if _NormPaths is None: # Interpreter shutdown return f if", "{} NORM_PATHS_AND_BASE_CONTAINER = {} def _NormFile(filename): abs_path, real_path = _NormPaths(filename)", "the appropriate paths. @note: in this context, the server is", "f = File(filename) ret = f.getCanonicalPath() if IS_PY2 and not", "for the paths that'll actually have breakpoints). ''' from _pydevd_bundle.pydevd_constants", "work with paths that may be incorrect try: return NORM_SEARCH_CACHE[filename]", "# The result of this method will be passed to", "can be sure that: - The case of a file", "<= MAX_PATH: filename = buf.value if IS_PY2: filename = filename.encode(getfilesystemencoding())", "return filename.lower() else: def normcase(filename): # `normcase` doesn't lower case", "to (re)setup how the client <-> server translation works to", "in the server) @note: to enable remote debugging in the", "filename real_path = filename NORM_SEARCH_CACHE[filename] = abs_path, real_path return abs_path,", "str): filename = filename.decode(getfilesystemencoding()) rv = GetLongPathName(filename, buf, MAX_PATH) if", "try: import ctypes from ctypes.wintypes import MAX_PATH, LPCWSTR, LPWSTR, DWORD", "all the pydevd_ files must be on the server accessible", "After getting the real path, let's get it with the", "'~' in filename: filename = convert_to_long_pathname(filename) filename = _os_normcase(filename) return", "(otherwise they won't be found in the server) @note: to", "= _norm_file_to_server norm_file_to_client = _norm_file_to_client setup_client_server_paths(PATHS_FROM_ECLIPSE_TO_PYTHON) def _is_int(filename): # isdigit()", "if IS_PY3K: import pkgutil else: from _pydev_imps import _pydev_pkgutil_old as", "+= 4 zip_path = r[:ind] inner_path = r[ind:] if inner_path.startswith('!'):", "not paths_from_eclipse_to_python: # no translation step needed (just inline the", "real path and base name def get_abs_path_real_path_and_base_from_file(f): try: return NORM_PATHS_AND_BASE_CONTAINER[f]", "[LPCWSTR, LPWSTR, DWORD] GetShortPathName.restype = DWORD def _convert_to_long_pathname(filename): buf =", "target machine for the paths that'll actually have breakpoints). '''", "None: # Interpreter shutdown return f ret = get_abs_path_real_path_and_base_from_file(f) #", "%s\\n' % (translated,)) break else: if DEBUG_CLIENT_SERVER_TRANSLATION: sys.stderr.write('pydev debugger: to", "= filename NORM_SEARCH_CACHE[filename] = abs_path, real_path return abs_path, real_path except:", "use the non-normalized version. eclipse_prefix = initial_paths[i][0] translated = eclipse_prefix", "= File(filename) ret = f.getCanonicalPath() if IS_PY2 and not isinstance(ret,", "must actually go on and check if we can find", "I can replicate this by creating a file ending as", "from _pydevd_bundle.pydevd_constants import IS_PY2, IS_PY3K, DebugInfoHolder, IS_WINDOWS, IS_JYTHON from _pydev_bundle._pydev_filesystem_encoding", "and IS_WINDOWS: def get_path_with_real_case(filename): from java.io import File f =", "not correct here -- but at least print it to", "return filename # no-op _ide_os = 'WINDOWS' if IS_WINDOWS else", "system being debugged is case-independent, 'normcase()' should be used on", "# Now, let's do a quick test to see if", "translate a path from the client to the debug server", "from ctypes.wintypes import MAX_PATH, LPCWSTR, LPWSTR, DWORD GetLongPathName = ctypes.windll.kernel32.GetLongPathNameW", "client translated = _NormFile(filename) # After getting the real path,", "r'/user/projects/my_project/src')] alternatively, this can be set with an environment variable", "just in case # the path is different now. translated_proper_case", "'UNIX' or 'WINDOWS' ''' global _ide_os prev = _ide_os if", "inner_path = file[ind:] if inner_path.startswith(\"!\"): # Note (fabioz): although I", "inner_path = inner_path[1:] zip_path = zip_path + '!' if inner_path.startswith('/')", "get_path_with_real_case = lambda filename:filename if sys.platform == 'win32': try: import", "= join(normcase(zip_path), inner_path) return r r = normcase(r) return r", "the os preferences. for i, (path0, path1) in enumerate(paths_from_eclipse_to_python[:]): if", "and may miss breakpoints.\\n') sys.stderr.write('pydev debugger: Related bug: http://bugs.python.org/issue1666807\\n') sys.stderr.write('-------------------------------------------------------------------------------\\n')", "in paths_from_eclipse_to_python])) # Note that when going to the server,", "see module docstring for more details. try: PATHS_FROM_ECLIPSE_TO_PYTHON = json.loads(os.environ.get('PATHS_FROM_ECLIPSE_TO_PYTHON',", "elif zip_file_obj is _NOT_FOUND_SENTINEL: try: import zipfile zip_file_obj = zipfile.ZipFile(zip_path,", "a quick test to see if we're working with a", "else '/' norm_filename_to_server_container = {} norm_filename_to_client_container = {} initial_paths =", "= DWORD def _convert_to_long_pathname(filename): buf = ctypes.create_unicode_buffer(MAX_PATH) if IS_PY2 and", "r.find('.zip') if ind == -1: ind = r.find('.egg') if ind", "the python process # So, this would be 'NormFileFromEclipseToPython' try:", "make path conversions when doing a remote debugging session in", "for eclipse_prefix, server_prefix in paths_from_eclipse_to_python: if translated.startswith(eclipse_prefix): if DEBUG_CLIENT_SERVER_TRANSLATION: sys.stderr.write('pydev", "initial_norm_paths(join(path, filename)) if exists(real_path): break else: sys.stderr.write('pydev debugger: Unable to", "to be incorrectly compiled (internal generated filenames are not absolute)\\n')", "get_abs_path_real_path_and_base_from_file is None: # Interpreter shutdown return f ret =", "f.endswith('$py.class'): f = f[:-len('$py.class')] + '.py' if not is_real_file(f): abs_path,", "_pydevd_bundle.pydevd_constants import IS_PY2, IS_PY3K, DebugInfoHolder, IS_WINDOWS, IS_JYTHON from _pydev_bundle._pydev_filesystem_encoding import", "of python that has no problems # related to the", "those names to a given scope. def _original_file_to_client(filename, cache={}): try:", "only at the beginning of this method. cache[filename] = translated", "if IS_PY2: filename = filename.encode(getfilesystemencoding()) return filename def _convert_to_short_pathname(filename): buf", "zip_file_obj is _NOT_FOUND_SENTINEL: try: import zipfile zip_file_obj = zipfile.ZipFile(zip_path, 'r')", "# used to translate a path from the debug server", "work out, leave no-op conversions in place. if DebugInfoHolder.DEBUG_TRACE_LEVEL >", "prev != os: _ide_os = os # We need to", "cache[filename] except KeyError: # used to translate a path from", "for i, (path0, path1) in enumerate(paths_from_eclipse_to_python[:]): if IS_PY2: if isinstance(path0,", "# We need to (re)setup how the client <-> server", "to get the absolute filenames so that we can be", "we can find it as if it was a relative", "Work on the client and server slashes. python_sep = '\\\\'", "be rebound, users should always import # pydevd_file_utils and then", "of this method will be passed to eclipse # So,", "= initial_paths[i][0] translated = eclipse_prefix + translated_proper_case[len(python_prefix):] if DEBUG_CLIENT_SERVER_TRANSLATION: sys.stderr.write('pydev", "this by creating a file ending as # .zip! or", "or inner_path.startswith('\\\\'): inner_path = inner_path[1:] if inner_path: r = join(normcase(zip_path),", "inner_path.startswith('\\\\'): inner_path = inner_path[1:] if inner_path: r = join(normcase(zip_path), inner_path)", "absolute)\\n') sys.stderr.write('pydev debugger: The debugger may still function, but it", "filename.decode(getfilesystemencoding()) rv = GetShortPathName(filename, buf, MAX_PATH) if rv != 0", "requested during the debug session. NORM_PATHS_CONTAINER = {} NORM_PATHS_AND_BASE_CONTAINER =", "filename.encode(getfilesystemencoding()) return filename def _convert_to_short_pathname(filename): buf = ctypes.create_unicode_buffer(MAX_PATH) if IS_PY2", "case-dependent filesystem (if the system being debugged is case-independent, 'normcase()'", "+ '.py' if not is_real_file(f): abs_path, real_path, base = f,", "be translated to the python process # So, this would", "(it'll be unchanged). # Make sure the drive letter is", "json import os.path import sys import traceback _os_normcase = os.path.normcase", "[(r'c:\\my_project\\src', r'/user/projects/my_project/src')] alternatively, this can be set with an environment", "_NOT_FOUND_SENTINEL) if zip_file_obj is None: return False elif zip_file_obj is", "and ret[0].islower(): return ret[0].upper() + ret[1:] return ret # Check", "resulting path is not in the python process, so, we", "to translate a path from the debug server to the", "IS_PY2 and isinstance(filename, str): filename = filename.decode(getfilesystemencoding()) rv = GetShortPathName(filename,", "f returns tuple of its absolute path, real path and", "translated_proper_case = translated if DEBUG_CLIENT_SERVER_TRANSLATION: sys.stderr.write( 'pydev debugger: _NormFile changed", "lambda filename:filename convert_to_short_pathname = lambda filename:filename get_path_with_real_case = lambda filename:filename", "import ctypes from ctypes.wintypes import MAX_PATH, LPCWSTR, LPWSTR, DWORD GetLongPathName", "translate from the client to the server). :param os: 'UNIX'", "not need any kind of client-server translation) f = frame.f_code.co_filename", "can correct that traceback.print_exc() # Note: as these functions may", "server (your python process) has the structure /user/projects/my_project/src/package/module1.py and the", "actually have breakpoints). ''' from _pydevd_bundle.pydevd_constants import IS_PY2, IS_PY3K, DebugInfoHolder,", "no-op conversions in place. if DebugInfoHolder.DEBUG_TRACE_LEVEL > 2: traceback.print_exc() else:", "in the server machine. # see module docstring for more", "be 'NormFileFromPythonToEclipse' try: return cache[filename] except KeyError: # used to", "# Work on the client and server slashes. python_sep =", "translated.lower() != translated_proper_case.lower(): translated_proper_case = translated if DEBUG_CLIENT_SERVER_TRANSLATION: sys.stderr.write( 'pydev", "ctypes from ctypes.wintypes import MAX_PATH, LPCWSTR, LPWSTR, DWORD GetLongPathName =", "DWORD def _convert_to_long_pathname(filename): buf = ctypes.create_unicode_buffer(MAX_PATH) if IS_PY2 and isinstance(filename,", "NORM_PATHS_CONTAINER[filename] = abs_path, real_path return abs_path, real_path def _NormPath(filename, normpath):", "(and the point is that we want the proper paths", "a list.\\n') PATHS_FROM_ECLIPSE_TO_PYTHON = [] else: # Converting json lists", "to the names generated... try: try: code = rPath.func_code except", "hit). - Providing means for the user to make path", "= filename.encode(getfilesystemencoding()) return filename def _get_path_with_real_case(filename): ret = convert_to_long_pathname(convert_to_short_pathname(filename)) #", "and base name def get_abs_path_real_path_and_base_from_file(f): try: return NORM_PATHS_AND_BASE_CONTAINER[f] except: if", "def setup_client_server_paths(paths): '''paths is the same format as PATHS_FROM_ECLIPSE_TO_PYTHON''' global", "NORM_SEARCH_CACHE[filename] except KeyError: abs_path, real_path = initial_norm_paths(filename) if not exists(real_path):", "not None: return meth(mod_name) return None def get_package_dir(mod_name): for path", "if absolutely needed! def _norm_file_to_server(filename, cache=norm_filename_to_server_container): # Eclipse will send", "(filename, type(filename))) abs_path = _NormPath(filename, os.path.abspath) real_path = _NormPath(filename, rPath)", "except KeyError: if eclipse_sep != python_sep: # Make sure that", "# Note that when going to the server, we do", "of python seems to be incorrectly compiled (internal generated filenames", "to server: %s\\n' % (translated,)) translated = translated.replace(eclipse_prefix, server_prefix) if", "= f[:-len('$py.class')] + '.py' if not is_real_file(f): abs_path, real_path, base", "json.loads(os.environ.get('PATHS_FROM_ECLIPSE_TO_PYTHON', '[]')) except Exception: sys.stderr.write('Error loading PATHS_FROM_ECLIPSE_TO_PYTHON from environment variable.\\n')", "+= 4 zip_path = file[:ind] inner_path = file[ind:] if inner_path.startswith(\"!\"):", "to the existing paths to follow the os preferences. for", "getfilesystemencoding import json import os.path import sys import traceback _os_normcase", "else 'UNIX' def set_ide_os(os): ''' We need to set the", "else: sys.stderr.write('pydev debugger: Unable to find real location for: %s\\n'", "return NORM_SEARCH_CACHE[filename] except KeyError: abs_path, real_path = initial_norm_paths(filename) if not", "<-> server translation works to provide proper separators. setup_client_server_paths(_last_client_server_paths_set) DEBUG_CLIENT_SERVER_TRANSLATION", "be found in the server) @note: to enable remote debugging", "it should probably be reviewed # later on). # Note", "with # the real case and then obtain a new", "_convert_to_long_pathname convert_to_short_pathname = _convert_to_short_pathname get_path_with_real_case = _get_path_with_real_case elif IS_JYTHON and", "http://bugs.python.org/issue1666807\\n') sys.stderr.write('-------------------------------------------------------------------------------\\n') sys.stderr.flush() NORM_SEARCH_CACHE = {} initial_norm_paths = _NormPaths def", "'/' norm_filename_to_server_container = {} norm_filename_to_client_container = {} initial_paths = list(paths)", "a list of tuples where the 1st element of the", "the PATHS_FROM_ECLIPSE_TO_PYTHON constant must be filled with the appropriate paths.", "with 'exists'. inner_path = inner_path[1:] zip_path = zip_path + '!'", "= rPath.__code__ if not exists(_NormFile(code.co_filename)): sys.stderr.write('-------------------------------------------------------------------------------\\n') sys.stderr.write('pydev debugger: CRITICAL WARNING:", "sent to server: %s\\n' % (translated,)) break else: if DEBUG_CLIENT_SERVER_TRANSLATION:", "str): return ret.encode(getfilesystemencoding()) return ret if IS_WINDOWS: if IS_JYTHON: def", "filesystem (if the system being debugged is case-independent, 'normcase()' should", "rPath.func_code except AttributeError: code = rPath.__code__ if not exists(_NormFile(code.co_filename)): sys.stderr.write('-------------------------------------------------------------------------------\\n')", "% ( translated_proper_case, translated)) for i, (eclipse_prefix, python_prefix) in enumerate(paths_from_eclipse_to_python):", "= _NormPaths(filename) return abs_path # Returns tuple of absolute path", "path1) in enumerate(paths_from_eclipse_to_python[:]): if IS_PY2: if isinstance(path0, unicode): path0 =", "python_sep: # Make sure that the separators are what we", "and the client is where eclipse is running. E.g.: If", "_NormFile(translated) cache[filename] = translated return translated def _norm_file_to_client(filename, cache=norm_filename_to_client_container): #", "pydevd_file_utils.norm_file_to_client # pydevd_file_utils.norm_file_to_server # # instead of importing any of", "host where the code is running may be actually different", "try: import zipfile zip_file_obj = zipfile.ZipFile(zip_path, 'r') _ZIP_SEARCH_CACHE[zip_path] = zip_file_obj", "inner_path) except KeyError: return None return None # Now, let's", "drive letter is always uppercase. if len(ret) > 1 and", "normpath(filename) ind = r.find('.zip') if ind == -1: ind =", "eclipse_sep = '\\\\' if _ide_os == 'WINDOWS' else '/' norm_filename_to_server_container", "relative path for some of the paths in the pythonpath", "running. E.g.: If the server (your python process) has the", "if not paths_from_eclipse_to_python: # no translation step needed (just inline", "# So, this would be 'NormFileFromEclipseToPython' try: return cache[filename] except" ]
[ "Actions from src.networking.Client import Client from src.utils.DH_Encryption import Encryption from", "Operation.DISCONNECT elif incoming in NetworkPackets.NetLogicIncomes.list(): if incoming == NetworkPackets.NetLogicIncomes.PAIRED.value: Constants.Network.IS_PAIRING", "Network.IS_ONLINE: incoming = NetworkPackets.split(incoming)[0] if incoming in Operation.list(): if incoming", "is_valid: num = self.gen_id() if num != file.read(): file.close() os.remove(Constants.Files.ID)", "manage(self, incoming: str): \"\"\" This functions deals with the execution", "msg = NetworkPackets.split(self.client.receive()) is_valid = msg[0] == NetworkPackets.NetLogicIncomes.VALID.value file.write(num) else:", "num = num.zfill(4) return num def open_id_file(self): try: open(Constants.Files.ID, 'r+').close()", "== Operation.MUTE.value: Actions.mute() elif incoming == Operation.OFF.value: Actions.shut_down() elif incoming", "incoming in Operation.list(): if incoming == Operation.VOL_UP.value: Actions.vol_up() elif incoming", "Raw net msg. \"\"\" if Network.IS_ONLINE: incoming = NetworkPackets.split(incoming)[0] if", "net msg. \"\"\" if Network.IS_ONLINE: incoming = NetworkPackets.split(incoming)[0] if incoming", "Actions.sleep() elif incoming == Operation.RESTART.value: Actions.restart() elif incoming == Operation.LOCK.value:", "self.val = self.client.connect() if not self.val: Network.IS_ONLINE = False def", "0: # Empty is_valid = False while not is_valid: num", "file.write(num) file.close() def manage(self, incoming: str): \"\"\" This functions deals", "msg[0] == NetworkPackets.NetLogicIncomes.VALID.value file.write(num) else: is_valid = False num =", "incoming = NetworkPackets.split(incoming)[0] if incoming in Operation.list(): if incoming ==", "NetworkPackets.NetLogicIncomes.INVALID: pass class Operation(Enum): \"\"\" All the operations that can", "if not is_valid: num = self.gen_id() if num != file.read():", "num)) msg = NetworkPackets.split(self.client.receive()) is_valid = msg[0] == NetworkPackets.NetLogicIncomes.VALID.value if", "with any flow of net msgs. \"\"\" def __init__(self): address", "= self.gen_id() if num != file.read(): file.close() os.remove(Constants.Files.ID) file =", "net msgs. \"\"\" def __init__(self): address = (Network.SERVER_IP, Network.SERVER_PORT) self.client", "= (Network.SERVER_IP, Network.SERVER_PORT) self.client = Client(str(socket.gethostname()), address) self.val = self.client.connect()", "incoming: Raw net msg. \"\"\" if Network.IS_ONLINE: incoming = NetworkPackets.split(incoming)[0]", "except FileNotFoundError: open(Constants.Files.ID, 'x').close() finally: file = open(Constants.Files.ID, 'r+') return", "the full process of the sync phase. \"\"\" if Network.IS_ONLINE:", "src.networking.Client import Client from src.utils.DH_Encryption import Encryption from src.utils.Enum import", "Client from src.utils.DH_Encryption import Encryption from src.utils.Enum import Enum class", "= \"\" file = self.open_id_file() if os.path.getsize(Constants.Files.ID) == 0: #", "asked to execute. \"\"\" VOL_UP = \"VOL_UP\" VOL_DOWN = \"VOL_DOWN\"", "elif incoming == Operation.OFF.value: Actions.shut_down() elif incoming == Operation.SLEEP.value: Actions.sleep()", "This functions deals with the execution of the required operations.", "== Operation.PREV.value: Actions.prev_song() elif incoming == Operation.MUTE.value: Actions.mute() elif incoming", "== Operation.DISCONNECT.value: self.client.send(NetworkPackets.assemble(Operation.DISCONNECT.value)) return Operation.DISCONNECT elif incoming in NetworkPackets.NetLogicIncomes.list(): if", "!= file.read(): file.close() os.remove(Constants.Files.ID) file = self.open_id_file() file.write(num) file.close() def", "\"\"\" VOL_UP = \"VOL_UP\" VOL_DOWN = \"VOL_DOWN\" PAUSE_PLAY_TOGGLE = \"PTT\"", "This class is responsible for dealing with any flow of", "Encryption(g, n) crypto.get_full_key(g_pow_a_mod_n) self.client.send(NetworkPackets.assemble(NetworkPackets.NetLogicIncomes.CONNECT.value, str(crypto.get_partial_key()))) self.client.crypto = crypto def gen_id(self)", "NetworkPackets.split(self.client.receive()) is_valid = msg[0] == NetworkPackets.NetLogicIncomes.VALID.value file.write(num) else: is_valid =", "= self.client.connect() if not self.val: Network.IS_ONLINE = False def go_crypto(self):", "from src.networking import NetworkPackets, Actions from src.networking.Client import Client from", "\"ID_VAL\", num)) msg = NetworkPackets.split(self.client.receive()) is_valid = msg[0] == NetworkPackets.NetLogicIncomes.VALID.value", "<gh_stars>1-10 import os import socket from random import randint from", "\"\"\" if Network.IS_ONLINE: incoming = NetworkPackets.split(incoming)[0] if incoming in Operation.list():", "dealing with any flow of net msgs. \"\"\" def __init__(self):", "VOL_UP = \"VOL_UP\" VOL_DOWN = \"VOL_DOWN\" PAUSE_PLAY_TOGGLE = \"PTT\" SKIP", "os import socket from random import randint from src import", "from src.utils.Enum import Enum class SessionManager: \"\"\" This class is", "n) crypto.get_full_key(g_pow_a_mod_n) self.client.send(NetworkPackets.assemble(NetworkPackets.NetLogicIncomes.CONNECT.value, str(crypto.get_partial_key()))) self.client.crypto = crypto def gen_id(self) ->", "incoming == Operation.MAGIC_BTN.value: Actions.run_file() elif incoming == Operation.USAGE.value: self.client.send(NetworkPackets.assemble(arr=Actions.COMPUTER.get_use_as_str_arr())) elif", "open_id_file(self): try: open(Constants.Files.ID, 'r+').close() except FileNotFoundError: open(Constants.Files.ID, 'x').close() finally: file", "= msg[0] == NetworkPackets.NetLogicIncomes.VALID.value file.write(num) else: is_valid = False num", "src.utils.DH_Encryption import Encryption from src.utils.Enum import Enum class SessionManager: \"\"\"", "incoming == Operation.PREV.value: Actions.prev_song() elif incoming == Operation.MUTE.value: Actions.mute() elif", "\"\" file = self.open_id_file() if os.path.getsize(Constants.Files.ID) == 0: # Empty", "responsible for dealing with any flow of net msgs. \"\"\"", "\"RESTRT\" LOCK = \"LCK\" LOG_OUT = \"LGOT\" DISCONNECT = \"DISCON\"", "elif incoming == Operation.LOCK.value: Actions.lock() elif incoming == Operation.LOG_OUT.value: Actions.log_out()", "LOG_OUT = \"LGOT\" DISCONNECT = \"DISCON\" MAGIC_BTN = \"MAGIC\" SPECS_INFO", "Operation.SKIP.value: Actions.next_song() elif incoming == Operation.PREV.value: Actions.prev_song() elif incoming ==", "\"VOL_DOWN\" PAUSE_PLAY_TOGGLE = \"PTT\" SKIP = \"SKIP\" PREV = \"PREV\"", "FileNotFoundError: open(Constants.Files.ID, 'x').close() finally: file = open(Constants.Files.ID, 'r+') return file", "file def sync(self): \"\"\" This function contains the full process", "not is_valid: num = self.gen_id() if num != file.read(): file.close()", "This function contains the full process of the sync phase.", "= \"LCK\" LOG_OUT = \"LGOT\" DISCONNECT = \"DISCON\" MAGIC_BTN =", "def go_crypto(self): msg = NetworkPackets.split(self.client.receive()) g = int(msg[1]) n =", "MUTE = \"MUTE\" OFF = \"OFF\" SLEEP = \"SLEEP\" RESTART", "False num = file.read() while not is_valid: self.client.send(NetworkPackets.assemble(\"COMPUTER\", \"ID_VAL\", num))", "while not is_valid: num = self.gen_id() self.client.send(NetworkPackets.assemble(\"COMPUTER\", \"ID_VAL\", num)) msg", "== Operation.VOL_UP.value: Actions.vol_up() elif incoming == Operation.VOL_DOWN.value: Actions.vol_down() elif incoming", "incoming == Operation.DISCONNECT.value: self.client.send(NetworkPackets.assemble(Operation.DISCONNECT.value)) return Operation.DISCONNECT elif incoming in NetworkPackets.NetLogicIncomes.list():", "class Operation(Enum): \"\"\" All the operations that can be asked", "\"\"\" def __init__(self): address = (Network.SERVER_IP, Network.SERVER_PORT) self.client = Client(str(socket.gethostname()),", "file.write(num) else: is_valid = False num = file.read() while not", "is_valid = msg[0] == NetworkPackets.NetLogicIncomes.VALID.value if not is_valid: num =", "= \"SLEEP\" RESTART = \"RESTRT\" LOCK = \"LCK\" LOG_OUT =", "self.open_id_file() if os.path.getsize(Constants.Files.ID) == 0: # Empty is_valid = False", "DISCONNECT = \"DISCON\" MAGIC_BTN = \"MAGIC\" SPECS_INFO = \"SPECS\" USAGE", "elif incoming == Operation.SKIP.value: Actions.next_song() elif incoming == Operation.PREV.value: Actions.prev_song()", "Actions.vol_down() elif incoming == Operation.PAUSE_PLAY_TOGGLE.value: Actions.play_pause() elif incoming == Operation.SKIP.value:", "self.client.crypto = crypto def gen_id(self) -> str: num = str(randint(1,", "incoming == NetworkPackets.NetLogicIncomes.PAIRED.value: Constants.Network.IS_PAIRING = True self.client.send(NetworkPackets.assemble(arr=Actions.COMPUTER.get_specs_as_str_arr())) elif incoming ==", "(Network.SERVER_IP, Network.SERVER_PORT) self.client = Client(str(socket.gethostname()), address) self.val = self.client.connect() if", "== Operation.VOL_DOWN.value: Actions.vol_down() elif incoming == Operation.PAUSE_PLAY_TOGGLE.value: Actions.play_pause() elif incoming", "finally: file = open(Constants.Files.ID, 'r+') return file def sync(self): \"\"\"", "import os import socket from random import randint from src", "execution of the required operations. :param incoming: Raw net msg.", "self.open_id_file() file.write(num) file.close() def manage(self, incoming: str): \"\"\" This functions", "OFF = \"OFF\" SLEEP = \"SLEEP\" RESTART = \"RESTRT\" LOCK", "= \"DISCON\" MAGIC_BTN = \"MAGIC\" SPECS_INFO = \"SPECS\" USAGE =", "SLEEP = \"SLEEP\" RESTART = \"RESTRT\" LOCK = \"LCK\" LOG_OUT", "= self.open_id_file() file.write(num) file.close() def manage(self, incoming: str): \"\"\" This", "str(randint(1, 9999)) num = num.zfill(4) return num def open_id_file(self): try:", "incoming == NetworkPackets.NetLogicIncomes.INVALID: pass class Operation(Enum): \"\"\" All the operations", "incoming == Operation.LOG_OUT.value: Actions.log_out() elif incoming == Operation.MAGIC_BTN.value: Actions.run_file() elif", "\"\"\" This class is responsible for dealing with any flow", "if num != file.read(): file.close() os.remove(Constants.Files.ID) file = self.open_id_file() file.write(num)", "elif incoming in NetworkPackets.NetLogicIncomes.list(): if incoming == NetworkPackets.NetLogicIncomes.PAIRED.value: Constants.Network.IS_PAIRING =", "process of the sync phase. \"\"\" if Network.IS_ONLINE: self.go_crypto() num", "== Operation.LOG_OUT.value: Actions.log_out() elif incoming == Operation.MAGIC_BTN.value: Actions.run_file() elif incoming", "open(Constants.Files.ID, 'x').close() finally: file = open(Constants.Files.ID, 'r+') return file def", "elif incoming == Operation.PAUSE_PLAY_TOGGLE.value: Actions.play_pause() elif incoming == Operation.SKIP.value: Actions.next_song()", "msg = NetworkPackets.split(self.client.receive()) g = int(msg[1]) n = int(msg[2]) g_pow_a_mod_n", "sync(self): \"\"\" This function contains the full process of the", "incoming in NetworkPackets.NetLogicIncomes.list(): if incoming == NetworkPackets.NetLogicIncomes.PAIRED.value: Constants.Network.IS_PAIRING = True", "that can be asked to execute. \"\"\" VOL_UP = \"VOL_UP\"", "Operation.PREV.value: Actions.prev_song() elif incoming == Operation.MUTE.value: Actions.mute() elif incoming ==", "from src.networking.Client import Client from src.utils.DH_Encryption import Encryption from src.utils.Enum", "Operation.USAGE.value: self.client.send(NetworkPackets.assemble(arr=Actions.COMPUTER.get_use_as_str_arr())) elif incoming == Operation.DISCONNECT.value: self.client.send(NetworkPackets.assemble(Operation.DISCONNECT.value)) return Operation.DISCONNECT elif", "import Enum class SessionManager: \"\"\" This class is responsible for", "is_valid: self.client.send(NetworkPackets.assemble(\"COMPUTER\", \"ID_VAL\", num)) msg = NetworkPackets.split(self.client.receive()) is_valid = msg[0]", "PAUSE_PLAY_TOGGLE = \"PTT\" SKIP = \"SKIP\" PREV = \"PREV\" MUTE", "def __init__(self): address = (Network.SERVER_IP, Network.SERVER_PORT) self.client = Client(str(socket.gethostname()), address)", "contains the full process of the sync phase. \"\"\" if", "elif incoming == Operation.MAGIC_BTN.value: Actions.run_file() elif incoming == Operation.USAGE.value: self.client.send(NetworkPackets.assemble(arr=Actions.COMPUTER.get_use_as_str_arr()))", "elif incoming == Operation.MUTE.value: Actions.mute() elif incoming == Operation.OFF.value: Actions.shut_down()", "= int(msg[2]) g_pow_a_mod_n = int(msg[3]) crypto = Encryption(g, n) crypto.get_full_key(g_pow_a_mod_n)", "self.client = Client(str(socket.gethostname()), address) self.val = self.client.connect() if not self.val:", "9999)) num = num.zfill(4) return num def open_id_file(self): try: open(Constants.Files.ID,", "import NetworkPackets, Actions from src.networking.Client import Client from src.utils.DH_Encryption import", "class is responsible for dealing with any flow of net", "if incoming == NetworkPackets.NetLogicIncomes.PAIRED.value: Constants.Network.IS_PAIRING = True self.client.send(NetworkPackets.assemble(arr=Actions.COMPUTER.get_specs_as_str_arr())) elif incoming", "Network.IS_ONLINE: self.go_crypto() num = \"\" file = self.open_id_file() if os.path.getsize(Constants.Files.ID)", "incoming == Operation.VOL_DOWN.value: Actions.vol_down() elif incoming == Operation.PAUSE_PLAY_TOGGLE.value: Actions.play_pause() elif", "== Operation.SKIP.value: Actions.next_song() elif incoming == Operation.PREV.value: Actions.prev_song() elif incoming", "self.client.send(NetworkPackets.assemble(Operation.DISCONNECT.value)) return Operation.DISCONNECT elif incoming in NetworkPackets.NetLogicIncomes.list(): if incoming ==", "the required operations. :param incoming: Raw net msg. \"\"\" if", "to execute. \"\"\" VOL_UP = \"VOL_UP\" VOL_DOWN = \"VOL_DOWN\" PAUSE_PLAY_TOGGLE", "Actions.next_song() elif incoming == Operation.PREV.value: Actions.prev_song() elif incoming == Operation.MUTE.value:", "= Encryption(g, n) crypto.get_full_key(g_pow_a_mod_n) self.client.send(NetworkPackets.assemble(NetworkPackets.NetLogicIncomes.CONNECT.value, str(crypto.get_partial_key()))) self.client.crypto = crypto def", "= True self.client.send(NetworkPackets.assemble(arr=Actions.COMPUTER.get_specs_as_str_arr())) elif incoming == NetworkPackets.NetLogicIncomes.INVALID: pass class Operation(Enum):", "def manage(self, incoming: str): \"\"\" This functions deals with the", "gen_id(self) -> str: num = str(randint(1, 9999)) num = num.zfill(4)", "= \"PTT\" SKIP = \"SKIP\" PREV = \"PREV\" MUTE =", "num = \"\" file = self.open_id_file() if os.path.getsize(Constants.Files.ID) == 0:", "= str(randint(1, 9999)) num = num.zfill(4) return num def open_id_file(self):", "os.path.getsize(Constants.Files.ID) == 0: # Empty is_valid = False while not", "address = (Network.SERVER_IP, Network.SERVER_PORT) self.client = Client(str(socket.gethostname()), address) self.val =", "== Operation.RESTART.value: Actions.restart() elif incoming == Operation.LOCK.value: Actions.lock() elif incoming", "Operation.LOCK.value: Actions.lock() elif incoming == Operation.LOG_OUT.value: Actions.log_out() elif incoming ==", "True self.client.send(NetworkPackets.assemble(arr=Actions.COMPUTER.get_specs_as_str_arr())) elif incoming == NetworkPackets.NetLogicIncomes.INVALID: pass class Operation(Enum): \"\"\"", "be asked to execute. \"\"\" VOL_UP = \"VOL_UP\" VOL_DOWN =", "return num def open_id_file(self): try: open(Constants.Files.ID, 'r+').close() except FileNotFoundError: open(Constants.Files.ID,", "= file.read() while not is_valid: self.client.send(NetworkPackets.assemble(\"COMPUTER\", \"ID_VAL\", num)) msg =", "return file def sync(self): \"\"\" This function contains the full", "self.gen_id() if num != file.read(): file.close() os.remove(Constants.Files.ID) file = self.open_id_file()", "num = file.read() while not is_valid: self.client.send(NetworkPackets.assemble(\"COMPUTER\", \"ID_VAL\", num)) msg", "from random import randint from src import Constants from src.Constants", "msg[0] == NetworkPackets.NetLogicIncomes.VALID.value if not is_valid: num = self.gen_id() if", "Constants from src.Constants import Network from src.networking import NetworkPackets, Actions", "= Client(str(socket.gethostname()), address) self.val = self.client.connect() if not self.val: Network.IS_ONLINE", "\"\"\" if Network.IS_ONLINE: self.go_crypto() num = \"\" file = self.open_id_file()", "num = self.gen_id() if num != file.read(): file.close() os.remove(Constants.Files.ID) file", "msg. \"\"\" if Network.IS_ONLINE: incoming = NetworkPackets.split(incoming)[0] if incoming in", "num != file.read(): file.close() os.remove(Constants.Files.ID) file = self.open_id_file() file.write(num) file.close()", "int(msg[2]) g_pow_a_mod_n = int(msg[3]) crypto = Encryption(g, n) crypto.get_full_key(g_pow_a_mod_n) self.client.send(NetworkPackets.assemble(NetworkPackets.NetLogicIncomes.CONNECT.value,", "from src.utils.DH_Encryption import Encryption from src.utils.Enum import Enum class SessionManager:", "NetworkPackets.NetLogicIncomes.list(): if incoming == NetworkPackets.NetLogicIncomes.PAIRED.value: Constants.Network.IS_PAIRING = True self.client.send(NetworkPackets.assemble(arr=Actions.COMPUTER.get_specs_as_str_arr())) elif", "Operation.MUTE.value: Actions.mute() elif incoming == Operation.OFF.value: Actions.shut_down() elif incoming ==", "== NetworkPackets.NetLogicIncomes.VALID.value file.write(num) else: is_valid = False num = file.read()", "= \"SKIP\" PREV = \"PREV\" MUTE = \"MUTE\" OFF =", "'r+') return file def sync(self): \"\"\" This function contains the", "Operation.VOL_DOWN.value: Actions.vol_down() elif incoming == Operation.PAUSE_PLAY_TOGGLE.value: Actions.play_pause() elif incoming ==", "random import randint from src import Constants from src.Constants import", "\"VOL_UP\" VOL_DOWN = \"VOL_DOWN\" PAUSE_PLAY_TOGGLE = \"PTT\" SKIP = \"SKIP\"", "Operation.SLEEP.value: Actions.sleep() elif incoming == Operation.RESTART.value: Actions.restart() elif incoming ==", "incoming == Operation.MUTE.value: Actions.mute() elif incoming == Operation.OFF.value: Actions.shut_down() elif", "file = open(Constants.Files.ID, 'r+') return file def sync(self): \"\"\" This", "if incoming in Operation.list(): if incoming == Operation.VOL_UP.value: Actions.vol_up() elif", "self.go_crypto() num = \"\" file = self.open_id_file() if os.path.getsize(Constants.Files.ID) ==", "self.client.send(NetworkPackets.assemble(NetworkPackets.NetLogicIncomes.CONNECT.value, str(crypto.get_partial_key()))) self.client.crypto = crypto def gen_id(self) -> str: num", "Operation.OFF.value: Actions.shut_down() elif incoming == Operation.SLEEP.value: Actions.sleep() elif incoming ==", "crypto.get_full_key(g_pow_a_mod_n) self.client.send(NetworkPackets.assemble(NetworkPackets.NetLogicIncomes.CONNECT.value, str(crypto.get_partial_key()))) self.client.crypto = crypto def gen_id(self) -> str:", "== Operation.MAGIC_BTN.value: Actions.run_file() elif incoming == Operation.USAGE.value: self.client.send(NetworkPackets.assemble(arr=Actions.COMPUTER.get_use_as_str_arr())) elif incoming", "NetworkPackets.NetLogicIncomes.VALID.value if not is_valid: num = self.gen_id() if num !=", "Operation.MAGIC_BTN.value: Actions.run_file() elif incoming == Operation.USAGE.value: self.client.send(NetworkPackets.assemble(arr=Actions.COMPUTER.get_use_as_str_arr())) elif incoming ==", "NetworkPackets.NetLogicIncomes.VALID.value file.write(num) else: is_valid = False num = file.read() while", "functions deals with the execution of the required operations. :param", "if not self.val: Network.IS_ONLINE = False def go_crypto(self): msg =", "incoming == Operation.RESTART.value: Actions.restart() elif incoming == Operation.LOCK.value: Actions.lock() elif", "else: is_valid = False num = file.read() while not is_valid:", "= int(msg[3]) crypto = Encryption(g, n) crypto.get_full_key(g_pow_a_mod_n) self.client.send(NetworkPackets.assemble(NetworkPackets.NetLogicIncomes.CONNECT.value, str(crypto.get_partial_key()))) self.client.crypto", "the operations that can be asked to execute. \"\"\" VOL_UP", "False while not is_valid: num = self.gen_id() self.client.send(NetworkPackets.assemble(\"COMPUTER\", \"ID_VAL\", num))", "\"\"\" This function contains the full process of the sync", "sync phase. \"\"\" if Network.IS_ONLINE: self.go_crypto() num = \"\" file", "= \"VOL_DOWN\" PAUSE_PLAY_TOGGLE = \"PTT\" SKIP = \"SKIP\" PREV =", "= \"RESTRT\" LOCK = \"LCK\" LOG_OUT = \"LGOT\" DISCONNECT =", "incoming: str): \"\"\" This functions deals with the execution of", "Operation(Enum): \"\"\" All the operations that can be asked to", "self.gen_id() self.client.send(NetworkPackets.assemble(\"COMPUTER\", \"ID_VAL\", num)) msg = NetworkPackets.split(self.client.receive()) is_valid = msg[0]", "SessionManager: \"\"\" This class is responsible for dealing with any", "\"\"\" All the operations that can be asked to execute.", "address) self.val = self.client.connect() if not self.val: Network.IS_ONLINE = False", "= NetworkPackets.split(self.client.receive()) is_valid = msg[0] == NetworkPackets.NetLogicIncomes.VALID.value if not is_valid:", "Actions.vol_up() elif incoming == Operation.VOL_DOWN.value: Actions.vol_down() elif incoming == Operation.PAUSE_PLAY_TOGGLE.value:", "file = self.open_id_file() if os.path.getsize(Constants.Files.ID) == 0: # Empty is_valid", "n = int(msg[2]) g_pow_a_mod_n = int(msg[3]) crypto = Encryption(g, n)", "num = str(randint(1, 9999)) num = num.zfill(4) return num def", "in Operation.list(): if incoming == Operation.VOL_UP.value: Actions.vol_up() elif incoming ==", "\"LCK\" LOG_OUT = \"LGOT\" DISCONNECT = \"DISCON\" MAGIC_BTN = \"MAGIC\"", "elif incoming == Operation.LOG_OUT.value: Actions.log_out() elif incoming == Operation.MAGIC_BTN.value: Actions.run_file()", "\"SLEEP\" RESTART = \"RESTRT\" LOCK = \"LCK\" LOG_OUT = \"LGOT\"", "Network from src.networking import NetworkPackets, Actions from src.networking.Client import Client", "= False def go_crypto(self): msg = NetworkPackets.split(self.client.receive()) g = int(msg[1])", "go_crypto(self): msg = NetworkPackets.split(self.client.receive()) g = int(msg[1]) n = int(msg[2])", "num = self.gen_id() self.client.send(NetworkPackets.assemble(\"COMPUTER\", \"ID_VAL\", num)) msg = NetworkPackets.split(self.client.receive()) is_valid", "All the operations that can be asked to execute. \"\"\"", "from src.Constants import Network from src.networking import NetworkPackets, Actions from", "crypto = Encryption(g, n) crypto.get_full_key(g_pow_a_mod_n) self.client.send(NetworkPackets.assemble(NetworkPackets.NetLogicIncomes.CONNECT.value, str(crypto.get_partial_key()))) self.client.crypto = crypto", "Client(str(socket.gethostname()), address) self.val = self.client.connect() if not self.val: Network.IS_ONLINE =", "self.client.send(NetworkPackets.assemble(arr=Actions.COMPUTER.get_specs_as_str_arr())) elif incoming == NetworkPackets.NetLogicIncomes.INVALID: pass class Operation(Enum): \"\"\" All", "\"PTT\" SKIP = \"SKIP\" PREV = \"PREV\" MUTE = \"MUTE\"", "== Operation.PAUSE_PLAY_TOGGLE.value: Actions.play_pause() elif incoming == Operation.SKIP.value: Actions.next_song() elif incoming", "Operation.RESTART.value: Actions.restart() elif incoming == Operation.LOCK.value: Actions.lock() elif incoming ==", "with the execution of the required operations. :param incoming: Raw", "= self.gen_id() self.client.send(NetworkPackets.assemble(\"COMPUTER\", \"ID_VAL\", num)) msg = NetworkPackets.split(self.client.receive()) is_valid =", "src.Constants import Network from src.networking import NetworkPackets, Actions from src.networking.Client", "import Encryption from src.utils.Enum import Enum class SessionManager: \"\"\" This", "src.utils.Enum import Enum class SessionManager: \"\"\" This class is responsible", "file.read(): file.close() os.remove(Constants.Files.ID) file = self.open_id_file() file.write(num) file.close() def manage(self,", "= NetworkPackets.split(incoming)[0] if incoming in Operation.list(): if incoming == Operation.VOL_UP.value:", "Network.SERVER_PORT) self.client = Client(str(socket.gethostname()), address) self.val = self.client.connect() if not", "Actions.run_file() elif incoming == Operation.USAGE.value: self.client.send(NetworkPackets.assemble(arr=Actions.COMPUTER.get_use_as_str_arr())) elif incoming == Operation.DISCONNECT.value:", "VOL_DOWN = \"VOL_DOWN\" PAUSE_PLAY_TOGGLE = \"PTT\" SKIP = \"SKIP\" PREV", "elif incoming == Operation.PREV.value: Actions.prev_song() elif incoming == Operation.MUTE.value: Actions.mute()", "while not is_valid: self.client.send(NetworkPackets.assemble(\"COMPUTER\", \"ID_VAL\", num)) msg = NetworkPackets.split(self.client.receive()) is_valid", "Actions.mute() elif incoming == Operation.OFF.value: Actions.shut_down() elif incoming == Operation.SLEEP.value:", "not self.val: Network.IS_ONLINE = False def go_crypto(self): msg = NetworkPackets.split(self.client.receive())", "self.client.connect() if not self.val: Network.IS_ONLINE = False def go_crypto(self): msg", "open(Constants.Files.ID, 'r+').close() except FileNotFoundError: open(Constants.Files.ID, 'x').close() finally: file = open(Constants.Files.ID,", ":param incoming: Raw net msg. \"\"\" if Network.IS_ONLINE: incoming =", "import Network from src.networking import NetworkPackets, Actions from src.networking.Client import", "g_pow_a_mod_n = int(msg[3]) crypto = Encryption(g, n) crypto.get_full_key(g_pow_a_mod_n) self.client.send(NetworkPackets.assemble(NetworkPackets.NetLogicIncomes.CONNECT.value, str(crypto.get_partial_key())))", "not is_valid: self.client.send(NetworkPackets.assemble(\"COMPUTER\", \"ID_VAL\", num)) msg = NetworkPackets.split(self.client.receive()) is_valid =", "Operation.PAUSE_PLAY_TOGGLE.value: Actions.play_pause() elif incoming == Operation.SKIP.value: Actions.next_song() elif incoming ==", "file.read() while not is_valid: self.client.send(NetworkPackets.assemble(\"COMPUTER\", \"ID_VAL\", num)) msg = NetworkPackets.split(self.client.receive())", "if Network.IS_ONLINE: incoming = NetworkPackets.split(incoming)[0] if incoming in Operation.list(): if", "NetworkPackets.split(self.client.receive()) g = int(msg[1]) n = int(msg[2]) g_pow_a_mod_n = int(msg[3])", "= \"LGOT\" DISCONNECT = \"DISCON\" MAGIC_BTN = \"MAGIC\" SPECS_INFO =", "Constants.Network.IS_PAIRING = True self.client.send(NetworkPackets.assemble(arr=Actions.COMPUTER.get_specs_as_str_arr())) elif incoming == NetworkPackets.NetLogicIncomes.INVALID: pass class", "def gen_id(self) -> str: num = str(randint(1, 9999)) num =", "= num.zfill(4) return num def open_id_file(self): try: open(Constants.Files.ID, 'r+').close() except", "full process of the sync phase. \"\"\" if Network.IS_ONLINE: self.go_crypto()", "is_valid = False while not is_valid: num = self.gen_id() self.client.send(NetworkPackets.assemble(\"COMPUTER\",", "\"SKIP\" PREV = \"PREV\" MUTE = \"MUTE\" OFF = \"OFF\"", "os.remove(Constants.Files.ID) file = self.open_id_file() file.write(num) file.close() def manage(self, incoming: str):", "flow of net msgs. \"\"\" def __init__(self): address = (Network.SERVER_IP,", "NetworkPackets.split(incoming)[0] if incoming in Operation.list(): if incoming == Operation.VOL_UP.value: Actions.vol_up()", "Encryption from src.utils.Enum import Enum class SessionManager: \"\"\" This class", "the sync phase. \"\"\" if Network.IS_ONLINE: self.go_crypto() num = \"\"", "'r+').close() except FileNotFoundError: open(Constants.Files.ID, 'x').close() finally: file = open(Constants.Files.ID, 'r+')", "Actions.log_out() elif incoming == Operation.MAGIC_BTN.value: Actions.run_file() elif incoming == Operation.USAGE.value:", "file = self.open_id_file() file.write(num) file.close() def manage(self, incoming: str): \"\"\"", "open(Constants.Files.ID, 'r+') return file def sync(self): \"\"\" This function contains", "'x').close() finally: file = open(Constants.Files.ID, 'r+') return file def sync(self):", "Actions.prev_song() elif incoming == Operation.MUTE.value: Actions.mute() elif incoming == Operation.OFF.value:", "Empty is_valid = False while not is_valid: num = self.gen_id()", "Operation.list(): if incoming == Operation.VOL_UP.value: Actions.vol_up() elif incoming == Operation.VOL_DOWN.value:", "RESTART = \"RESTRT\" LOCK = \"LCK\" LOG_OUT = \"LGOT\" DISCONNECT", "operations. :param incoming: Raw net msg. \"\"\" if Network.IS_ONLINE: incoming", "any flow of net msgs. \"\"\" def __init__(self): address =", "str(crypto.get_partial_key()))) self.client.crypto = crypto def gen_id(self) -> str: num =", "\"MUTE\" OFF = \"OFF\" SLEEP = \"SLEEP\" RESTART = \"RESTRT\"", "function contains the full process of the sync phase. \"\"\"", "import socket from random import randint from src import Constants", "== Operation.OFF.value: Actions.shut_down() elif incoming == Operation.SLEEP.value: Actions.sleep() elif incoming", "num.zfill(4) return num def open_id_file(self): try: open(Constants.Files.ID, 'r+').close() except FileNotFoundError:", "elif incoming == Operation.RESTART.value: Actions.restart() elif incoming == Operation.LOCK.value: Actions.lock()", "NetworkPackets.split(self.client.receive()) is_valid = msg[0] == NetworkPackets.NetLogicIncomes.VALID.value if not is_valid: num", "== NetworkPackets.NetLogicIncomes.VALID.value if not is_valid: num = self.gen_id() if num", "Operation.DISCONNECT.value: self.client.send(NetworkPackets.assemble(Operation.DISCONNECT.value)) return Operation.DISCONNECT elif incoming in NetworkPackets.NetLogicIncomes.list(): if incoming", "incoming == Operation.LOCK.value: Actions.lock() elif incoming == Operation.LOG_OUT.value: Actions.log_out() elif", "the execution of the required operations. :param incoming: Raw net", "False def go_crypto(self): msg = NetworkPackets.split(self.client.receive()) g = int(msg[1]) n", "str: num = str(randint(1, 9999)) num = num.zfill(4) return num", "msgs. \"\"\" def __init__(self): address = (Network.SERVER_IP, Network.SERVER_PORT) self.client =", "of the required operations. :param incoming: Raw net msg. \"\"\"", "= \"VOL_UP\" VOL_DOWN = \"VOL_DOWN\" PAUSE_PLAY_TOGGLE = \"PTT\" SKIP =", "Operation.VOL_UP.value: Actions.vol_up() elif incoming == Operation.VOL_DOWN.value: Actions.vol_down() elif incoming ==", "operations that can be asked to execute. \"\"\" VOL_UP =", "incoming == Operation.VOL_UP.value: Actions.vol_up() elif incoming == Operation.VOL_DOWN.value: Actions.vol_down() elif", "if incoming == Operation.VOL_UP.value: Actions.vol_up() elif incoming == Operation.VOL_DOWN.value: Actions.vol_down()", "__init__(self): address = (Network.SERVER_IP, Network.SERVER_PORT) self.client = Client(str(socket.gethostname()), address) self.val", "Network.IS_ONLINE = False def go_crypto(self): msg = NetworkPackets.split(self.client.receive()) g =", "= msg[0] == NetworkPackets.NetLogicIncomes.VALID.value if not is_valid: num = self.gen_id()", "def open_id_file(self): try: open(Constants.Files.ID, 'r+').close() except FileNotFoundError: open(Constants.Files.ID, 'x').close() finally:", "can be asked to execute. \"\"\" VOL_UP = \"VOL_UP\" VOL_DOWN", "Actions.play_pause() elif incoming == Operation.SKIP.value: Actions.next_song() elif incoming == Operation.PREV.value:", "incoming == Operation.PAUSE_PLAY_TOGGLE.value: Actions.play_pause() elif incoming == Operation.SKIP.value: Actions.next_song() elif", "self.client.send(NetworkPackets.assemble(arr=Actions.COMPUTER.get_use_as_str_arr())) elif incoming == Operation.DISCONNECT.value: self.client.send(NetworkPackets.assemble(Operation.DISCONNECT.value)) return Operation.DISCONNECT elif incoming", "\"\"\" This functions deals with the execution of the required", "= NetworkPackets.split(self.client.receive()) is_valid = msg[0] == NetworkPackets.NetLogicIncomes.VALID.value file.write(num) else: is_valid", "is_valid = False num = file.read() while not is_valid: self.client.send(NetworkPackets.assemble(\"COMPUTER\",", "randint from src import Constants from src.Constants import Network from", "is_valid: num = self.gen_id() self.client.send(NetworkPackets.assemble(\"COMPUTER\", \"ID_VAL\", num)) msg = NetworkPackets.split(self.client.receive())", "self.val: Network.IS_ONLINE = False def go_crypto(self): msg = NetworkPackets.split(self.client.receive()) g", "is responsible for dealing with any flow of net msgs.", "required operations. :param incoming: Raw net msg. \"\"\" if Network.IS_ONLINE:", "elif incoming == Operation.SLEEP.value: Actions.sleep() elif incoming == Operation.RESTART.value: Actions.restart()", "execute. \"\"\" VOL_UP = \"VOL_UP\" VOL_DOWN = \"VOL_DOWN\" PAUSE_PLAY_TOGGLE =", "elif incoming == Operation.DISCONNECT.value: self.client.send(NetworkPackets.assemble(Operation.DISCONNECT.value)) return Operation.DISCONNECT elif incoming in", "if os.path.getsize(Constants.Files.ID) == 0: # Empty is_valid = False while", "of net msgs. \"\"\" def __init__(self): address = (Network.SERVER_IP, Network.SERVER_PORT)", "NetworkPackets.NetLogicIncomes.PAIRED.value: Constants.Network.IS_PAIRING = True self.client.send(NetworkPackets.assemble(arr=Actions.COMPUTER.get_specs_as_str_arr())) elif incoming == NetworkPackets.NetLogicIncomes.INVALID: pass", "Enum class SessionManager: \"\"\" This class is responsible for dealing", "phase. \"\"\" if Network.IS_ONLINE: self.go_crypto() num = \"\" file =", "int(msg[1]) n = int(msg[2]) g_pow_a_mod_n = int(msg[3]) crypto = Encryption(g,", "-> str: num = str(randint(1, 9999)) num = num.zfill(4) return", "not is_valid: num = self.gen_id() self.client.send(NetworkPackets.assemble(\"COMPUTER\", \"ID_VAL\", num)) msg =", "elif incoming == Operation.VOL_DOWN.value: Actions.vol_down() elif incoming == Operation.PAUSE_PLAY_TOGGLE.value: Actions.play_pause()", "PREV = \"PREV\" MUTE = \"MUTE\" OFF = \"OFF\" SLEEP", "== Operation.SLEEP.value: Actions.sleep() elif incoming == Operation.RESTART.value: Actions.restart() elif incoming", "\"DISCON\" MAGIC_BTN = \"MAGIC\" SPECS_INFO = \"SPECS\" USAGE = \"USE\"", "= int(msg[1]) n = int(msg[2]) g_pow_a_mod_n = int(msg[3]) crypto =", "msg = NetworkPackets.split(self.client.receive()) is_valid = msg[0] == NetworkPackets.NetLogicIncomes.VALID.value if not", "\"PREV\" MUTE = \"MUTE\" OFF = \"OFF\" SLEEP = \"SLEEP\"", "from src import Constants from src.Constants import Network from src.networking", "= False while not is_valid: num = self.gen_id() self.client.send(NetworkPackets.assemble(\"COMPUTER\", \"ID_VAL\",", "== Operation.USAGE.value: self.client.send(NetworkPackets.assemble(arr=Actions.COMPUTER.get_use_as_str_arr())) elif incoming == Operation.DISCONNECT.value: self.client.send(NetworkPackets.assemble(Operation.DISCONNECT.value)) return Operation.DISCONNECT", "g = int(msg[1]) n = int(msg[2]) g_pow_a_mod_n = int(msg[3]) crypto", "== Operation.LOCK.value: Actions.lock() elif incoming == Operation.LOG_OUT.value: Actions.log_out() elif incoming", "socket from random import randint from src import Constants from", "= False num = file.read() while not is_valid: self.client.send(NetworkPackets.assemble(\"COMPUTER\", \"ID_VAL\",", "incoming == Operation.SLEEP.value: Actions.sleep() elif incoming == Operation.RESTART.value: Actions.restart() elif", "def sync(self): \"\"\" This function contains the full process of", "int(msg[3]) crypto = Encryption(g, n) crypto.get_full_key(g_pow_a_mod_n) self.client.send(NetworkPackets.assemble(NetworkPackets.NetLogicIncomes.CONNECT.value, str(crypto.get_partial_key()))) self.client.crypto =", "== NetworkPackets.NetLogicIncomes.INVALID: pass class Operation(Enum): \"\"\" All the operations that", "\"LGOT\" DISCONNECT = \"DISCON\" MAGIC_BTN = \"MAGIC\" SPECS_INFO = \"SPECS\"", "Operation.LOG_OUT.value: Actions.log_out() elif incoming == Operation.MAGIC_BTN.value: Actions.run_file() elif incoming ==", "src import Constants from src.Constants import Network from src.networking import", "Actions.lock() elif incoming == Operation.LOG_OUT.value: Actions.log_out() elif incoming == Operation.MAGIC_BTN.value:", "import randint from src import Constants from src.Constants import Network", "elif incoming == NetworkPackets.NetLogicIncomes.INVALID: pass class Operation(Enum): \"\"\" All the", "= open(Constants.Files.ID, 'r+') return file def sync(self): \"\"\" This function", "return Operation.DISCONNECT elif incoming in NetworkPackets.NetLogicIncomes.list(): if incoming == NetworkPackets.NetLogicIncomes.PAIRED.value:", "incoming == Operation.OFF.value: Actions.shut_down() elif incoming == Operation.SLEEP.value: Actions.sleep() elif", "is_valid = msg[0] == NetworkPackets.NetLogicIncomes.VALID.value file.write(num) else: is_valid = False", "num)) msg = NetworkPackets.split(self.client.receive()) is_valid = msg[0] == NetworkPackets.NetLogicIncomes.VALID.value file.write(num)", "for dealing with any flow of net msgs. \"\"\" def", "str): \"\"\" This functions deals with the execution of the", "= NetworkPackets.split(self.client.receive()) g = int(msg[1]) n = int(msg[2]) g_pow_a_mod_n =", "file.close() def manage(self, incoming: str): \"\"\" This functions deals with", "num def open_id_file(self): try: open(Constants.Files.ID, 'r+').close() except FileNotFoundError: open(Constants.Files.ID, 'x').close()", "Actions.shut_down() elif incoming == Operation.SLEEP.value: Actions.sleep() elif incoming == Operation.RESTART.value:", "self.client.send(NetworkPackets.assemble(\"COMPUTER\", \"ID_VAL\", num)) msg = NetworkPackets.split(self.client.receive()) is_valid = msg[0] ==", "import Client from src.utils.DH_Encryption import Encryption from src.utils.Enum import Enum", "= \"PREV\" MUTE = \"MUTE\" OFF = \"OFF\" SLEEP =", "incoming == Operation.SKIP.value: Actions.next_song() elif incoming == Operation.PREV.value: Actions.prev_song() elif", "# Empty is_valid = False while not is_valid: num =", "= crypto def gen_id(self) -> str: num = str(randint(1, 9999))", "== NetworkPackets.NetLogicIncomes.PAIRED.value: Constants.Network.IS_PAIRING = True self.client.send(NetworkPackets.assemble(arr=Actions.COMPUTER.get_specs_as_str_arr())) elif incoming == NetworkPackets.NetLogicIncomes.INVALID:", "SKIP = \"SKIP\" PREV = \"PREV\" MUTE = \"MUTE\" OFF", "try: open(Constants.Files.ID, 'r+').close() except FileNotFoundError: open(Constants.Files.ID, 'x').close() finally: file =", "class SessionManager: \"\"\" This class is responsible for dealing with", "Actions.restart() elif incoming == Operation.LOCK.value: Actions.lock() elif incoming == Operation.LOG_OUT.value:", "NetworkPackets, Actions from src.networking.Client import Client from src.utils.DH_Encryption import Encryption", "\"OFF\" SLEEP = \"SLEEP\" RESTART = \"RESTRT\" LOCK = \"LCK\"", "deals with the execution of the required operations. :param incoming:", "incoming == Operation.USAGE.value: self.client.send(NetworkPackets.assemble(arr=Actions.COMPUTER.get_use_as_str_arr())) elif incoming == Operation.DISCONNECT.value: self.client.send(NetworkPackets.assemble(Operation.DISCONNECT.value)) return", "= \"MUTE\" OFF = \"OFF\" SLEEP = \"SLEEP\" RESTART =", "crypto def gen_id(self) -> str: num = str(randint(1, 9999)) num", "= self.open_id_file() if os.path.getsize(Constants.Files.ID) == 0: # Empty is_valid =", "LOCK = \"LCK\" LOG_OUT = \"LGOT\" DISCONNECT = \"DISCON\" MAGIC_BTN", "== 0: # Empty is_valid = False while not is_valid:", "src.networking import NetworkPackets, Actions from src.networking.Client import Client from src.utils.DH_Encryption", "= \"OFF\" SLEEP = \"SLEEP\" RESTART = \"RESTRT\" LOCK =", "file.close() os.remove(Constants.Files.ID) file = self.open_id_file() file.write(num) file.close() def manage(self, incoming:", "pass class Operation(Enum): \"\"\" All the operations that can be", "elif incoming == Operation.USAGE.value: self.client.send(NetworkPackets.assemble(arr=Actions.COMPUTER.get_use_as_str_arr())) elif incoming == Operation.DISCONNECT.value: self.client.send(NetworkPackets.assemble(Operation.DISCONNECT.value))", "import Constants from src.Constants import Network from src.networking import NetworkPackets,", "if Network.IS_ONLINE: self.go_crypto() num = \"\" file = self.open_id_file() if", "in NetworkPackets.NetLogicIncomes.list(): if incoming == NetworkPackets.NetLogicIncomes.PAIRED.value: Constants.Network.IS_PAIRING = True self.client.send(NetworkPackets.assemble(arr=Actions.COMPUTER.get_specs_as_str_arr()))", "of the sync phase. \"\"\" if Network.IS_ONLINE: self.go_crypto() num =" ]
[ "is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='requests', full_name='container.ResourceRequirements.requests', index=1, number=2, type=11,", "default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='requests',", "NOT EDIT! # source: resource_requirements.proto import sys _b=sys.version_info[0]<3 and (lambda", "number=2, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False,", "import message as _message from google.protobuf import reflection as _reflection", "= _RESOURCEREQUIREMENTS _sym_db.RegisterFileDescriptor(DESCRIPTOR) ResourceRequirements = _reflection.GeneratedProtocolMessageType('ResourceRequirements', (_message.Message,), { 'DESCRIPTOR' :", "protocol buffer compiler. DO NOT EDIT! # source: resource_requirements.proto import", "import sys _b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) from", "_message from google.protobuf import reflection as _reflection from google.protobuf import", "index=0, number=1, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None,", "source: resource_requirements.proto import sys _b=sys.version_info[0]<3 and (lambda x:x) or (lambda", "_RESOURCEREQUIREMENTS = _descriptor.Descriptor( name='ResourceRequirements', full_name='container.ResourceRequirements', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor(", "file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto3',", "compiler. DO NOT EDIT! # source: resource_requirements.proto import sys _b=sys.version_info[0]<3", "from influxdb_service_sdk.model.container import resource_list_pb2 as influxdb__service__sdk_dot_model_dot_container_dot_resource__list__pb2 DESCRIPTOR = _descriptor.FileDescriptor( name='resource_requirements.proto',", "'resource_requirements_pb2' # @@protoc_insertion_point(class_scope:container.ResourceRequirements) }) _sym_db.RegisterMessage(ResourceRequirements) DESCRIPTOR._options = None # @@protoc_insertion_point(module_scope)", "<filename>influxdb_service_sdk/model/container/resource_requirements_pb2.py # -*- coding: utf-8 -*- # Generated by the", "serialized_options=_b('ZCgo.easyops.local/contracts/protorepo-models/easyops/model/container'), serialized_pb=_b('\\n\\x1bresource_requirements.proto\\x12\\tcontainer\\x1a\\x38influxdb_service_sdk/model/container/resource_list.proto\\\"j\\n\\x14ResourceRequirements\\x12\\'\\n\\x06limits\\x18\\x01 \\x01(\\x0b\\x32\\x17.container.ResourceList\\x12)\\n\\x08requests\\x18\\x02 \\x01(\\x0b\\x32\\x17.container.ResourceListBEZCgo.easyops.local/contracts/protorepo-models/easyops/model/containerb\\x06proto3') , dependencies=[influxdb__service__sdk_dot_model_dot_container_dot_resource__list__pb2.DESCRIPTOR,]) _RESOURCEREQUIREMENTS = _descriptor.Descriptor( name='ResourceRequirements',", "@@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() from influxdb_service_sdk.model.container import resource_list_pb2 as influxdb__service__sdk_dot_model_dot_container_dot_resource__list__pb2", "symbol_database as _symbol_database # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() from influxdb_service_sdk.model.container", "serialized_options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False,", "# source: resource_requirements.proto import sys _b=sys.version_info[0]<3 and (lambda x:x) or", "_RESOURCEREQUIREMENTS _sym_db.RegisterFileDescriptor(DESCRIPTOR) ResourceRequirements = _reflection.GeneratedProtocolMessageType('ResourceRequirements', (_message.Message,), { 'DESCRIPTOR' : _RESOURCEREQUIREMENTS,", "by the protocol buffer compiler. DO NOT EDIT! # source:", "full_name='container.ResourceRequirements.limits', index=0, number=1, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None,", "(lambda x:x.encode('latin1')) from google.protobuf import descriptor as _descriptor from google.protobuf", "type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None,", "= _symbol_database.Default() from influxdb_service_sdk.model.container import resource_list_pb2 as influxdb__service__sdk_dot_model_dot_container_dot_resource__list__pb2 DESCRIPTOR =", "_symbol_database.Default() from influxdb_service_sdk.model.container import resource_list_pb2 as influxdb__service__sdk_dot_model_dot_container_dot_resource__list__pb2 DESCRIPTOR = _descriptor.FileDescriptor(", "file=DESCRIPTOR), _descriptor.FieldDescriptor( name='requests', full_name='container.ResourceRequirements.requests', index=1, number=2, type=11, cpp_type=10, label=1, has_default_value=False,", "= _reflection.GeneratedProtocolMessageType('ResourceRequirements', (_message.Message,), { 'DESCRIPTOR' : _RESOURCEREQUIREMENTS, '__module__' : 'resource_requirements_pb2'", "name='ResourceRequirements', full_name='container.ResourceRequirements', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='limits', full_name='container.ResourceRequirements.limits', index=0,", "_RESOURCEREQUIREMENTS.fields_by_name['requests'].message_type = influxdb__service__sdk_dot_model_dot_container_dot_resource__list__pb2._RESOURCELIST DESCRIPTOR.message_types_by_name['ResourceRequirements'] = _RESOURCEREQUIREMENTS _sym_db.RegisterFileDescriptor(DESCRIPTOR) ResourceRequirements = _reflection.GeneratedProtocolMessageType('ResourceRequirements',", "_sym_db.RegisterFileDescriptor(DESCRIPTOR) ResourceRequirements = _reflection.GeneratedProtocolMessageType('ResourceRequirements', (_message.Message,), { 'DESCRIPTOR' : _RESOURCEREQUIREMENTS, '__module__'", "_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) from google.protobuf import", "import reflection as _reflection from google.protobuf import symbol_database as _symbol_database", "as _descriptor from google.protobuf import message as _message from google.protobuf", "ResourceRequirements = _reflection.GeneratedProtocolMessageType('ResourceRequirements', (_message.Message,), { 'DESCRIPTOR' : _RESOURCEREQUIREMENTS, '__module__' :", "influxdb__service__sdk_dot_model_dot_container_dot_resource__list__pb2 DESCRIPTOR = _descriptor.FileDescriptor( name='resource_requirements.proto', package='container', syntax='proto3', serialized_options=_b('ZCgo.easyops.local/contracts/protorepo-models/easyops/model/container'), serialized_pb=_b('\\n\\x1bresource_requirements.proto\\x12\\tcontainer\\x1a\\x38influxdb_service_sdk/model/container/resource_list.proto\\\"j\\n\\x14ResourceRequirements\\x12\\'\\n\\x06limits\\x18\\x01 \\x01(\\x0b\\x32\\x17.container.ResourceList\\x12)\\n\\x08requests\\x18\\x02", "\\x01(\\x0b\\x32\\x17.container.ResourceListBEZCgo.easyops.local/contracts/protorepo-models/easyops/model/containerb\\x06proto3') , dependencies=[influxdb__service__sdk_dot_model_dot_container_dot_resource__list__pb2.DESCRIPTOR,]) _RESOURCEREQUIREMENTS = _descriptor.Descriptor( name='ResourceRequirements', full_name='container.ResourceRequirements', filename=None, file=DESCRIPTOR,", "cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None,", "resource_list_pb2 as influxdb__service__sdk_dot_model_dot_container_dot_resource__list__pb2 DESCRIPTOR = _descriptor.FileDescriptor( name='resource_requirements.proto', package='container', syntax='proto3', serialized_options=_b('ZCgo.easyops.local/contracts/protorepo-models/easyops/model/container'),", "import resource_list_pb2 as influxdb__service__sdk_dot_model_dot_container_dot_resource__list__pb2 DESCRIPTOR = _descriptor.FileDescriptor( name='resource_requirements.proto', package='container', syntax='proto3',", "as _message from google.protobuf import reflection as _reflection from google.protobuf", "filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='limits', full_name='container.ResourceRequirements.limits', index=0, number=1, type=11,", "extension_ranges=[], oneofs=[ ], serialized_start=100, serialized_end=206, ) _RESOURCEREQUIREMENTS.fields_by_name['limits'].message_type = influxdb__service__sdk_dot_model_dot_container_dot_resource__list__pb2._RESOURCELIST _RESOURCEREQUIREMENTS.fields_by_name['requests'].message_type", "from google.protobuf import message as _message from google.protobuf import reflection", "EDIT! # source: resource_requirements.proto import sys _b=sys.version_info[0]<3 and (lambda x:x)", "], serialized_start=100, serialized_end=206, ) _RESOURCEREQUIREMENTS.fields_by_name['limits'].message_type = influxdb__service__sdk_dot_model_dot_container_dot_resource__list__pb2._RESOURCELIST _RESOURCEREQUIREMENTS.fields_by_name['requests'].message_type = influxdb__service__sdk_dot_model_dot_container_dot_resource__list__pb2._RESOURCELIST", "serialized_end=206, ) _RESOURCEREQUIREMENTS.fields_by_name['limits'].message_type = influxdb__service__sdk_dot_model_dot_container_dot_resource__list__pb2._RESOURCELIST _RESOURCEREQUIREMENTS.fields_by_name['requests'].message_type = influxdb__service__sdk_dot_model_dot_container_dot_resource__list__pb2._RESOURCELIST DESCRIPTOR.message_types_by_name['ResourceRequirements'] =", "syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=100, serialized_end=206, ) _RESOURCEREQUIREMENTS.fields_by_name['limits'].message_type = influxdb__service__sdk_dot_model_dot_container_dot_resource__list__pb2._RESOURCELIST", "# Generated by the protocol buffer compiler. DO NOT EDIT!", "_symbol_database # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() from influxdb_service_sdk.model.container import resource_list_pb2", "(lambda x:x) or (lambda x:x.encode('latin1')) from google.protobuf import descriptor as", "containing_type=None, fields=[ _descriptor.FieldDescriptor( name='limits', full_name='container.ResourceRequirements.limits', index=0, number=1, type=11, cpp_type=10, label=1,", "message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), ], extensions=[ ],", "containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='requests', full_name='container.ResourceRequirements.requests', index=1, number=2,", "is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ],", "import symbol_database as _symbol_database # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() from", "utf-8 -*- # Generated by the protocol buffer compiler. DO", "name='requests', full_name='container.ResourceRequirements.requests', index=1, number=2, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None,", "extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='requests', full_name='container.ResourceRequirements.requests', index=1, number=2, type=11, cpp_type=10,", "import descriptor as _descriptor from google.protobuf import message as _message", "extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[", "'__module__' : 'resource_requirements_pb2' # @@protoc_insertion_point(class_scope:container.ResourceRequirements) }) _sym_db.RegisterMessage(ResourceRequirements) DESCRIPTOR._options = None", "Generated by the protocol buffer compiler. DO NOT EDIT! #", "x:x) or (lambda x:x.encode('latin1')) from google.protobuf import descriptor as _descriptor", "influxdb__service__sdk_dot_model_dot_container_dot_resource__list__pb2._RESOURCELIST _RESOURCEREQUIREMENTS.fields_by_name['requests'].message_type = influxdb__service__sdk_dot_model_dot_container_dot_resource__list__pb2._RESOURCELIST DESCRIPTOR.message_types_by_name['ResourceRequirements'] = _RESOURCEREQUIREMENTS _sym_db.RegisterFileDescriptor(DESCRIPTOR) ResourceRequirements =", "message as _message from google.protobuf import reflection as _reflection from", "_reflection from google.protobuf import symbol_database as _symbol_database # @@protoc_insertion_point(imports) _sym_db", "label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR),", "extension_scope=None, serialized_options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None,", "index=1, number=2, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None,", "coding: utf-8 -*- # Generated by the protocol buffer compiler.", ") _RESOURCEREQUIREMENTS.fields_by_name['limits'].message_type = influxdb__service__sdk_dot_model_dot_container_dot_resource__list__pb2._RESOURCELIST _RESOURCEREQUIREMENTS.fields_by_name['requests'].message_type = influxdb__service__sdk_dot_model_dot_container_dot_resource__list__pb2._RESOURCELIST DESCRIPTOR.message_types_by_name['ResourceRequirements'] = _RESOURCEREQUIREMENTS", "-*- # Generated by the protocol buffer compiler. DO NOT", "descriptor as _descriptor from google.protobuf import message as _message from", "google.protobuf import descriptor as _descriptor from google.protobuf import message as", "serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=100, serialized_end=206, ) _RESOURCEREQUIREMENTS.fields_by_name['limits'].message_type", "is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=100, serialized_end=206, ) _RESOURCEREQUIREMENTS.fields_by_name['limits'].message_type =", "oneofs=[ ], serialized_start=100, serialized_end=206, ) _RESOURCEREQUIREMENTS.fields_by_name['limits'].message_type = influxdb__service__sdk_dot_model_dot_container_dot_resource__list__pb2._RESOURCELIST _RESOURCEREQUIREMENTS.fields_by_name['requests'].message_type =", "_reflection.GeneratedProtocolMessageType('ResourceRequirements', (_message.Message,), { 'DESCRIPTOR' : _RESOURCEREQUIREMENTS, '__module__' : 'resource_requirements_pb2' #", "DESCRIPTOR.message_types_by_name['ResourceRequirements'] = _RESOURCEREQUIREMENTS _sym_db.RegisterFileDescriptor(DESCRIPTOR) ResourceRequirements = _reflection.GeneratedProtocolMessageType('ResourceRequirements', (_message.Message,), { 'DESCRIPTOR'", "x:x.encode('latin1')) from google.protobuf import descriptor as _descriptor from google.protobuf import", "_descriptor from google.protobuf import message as _message from google.protobuf import", "enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='requests', full_name='container.ResourceRequirements.requests', index=1,", "google.protobuf import symbol_database as _symbol_database # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default()", "reflection as _reflection from google.protobuf import symbol_database as _symbol_database #", "as _reflection from google.protobuf import symbol_database as _symbol_database # @@protoc_insertion_point(imports)", "serialized_start=100, serialized_end=206, ) _RESOURCEREQUIREMENTS.fields_by_name['limits'].message_type = influxdb__service__sdk_dot_model_dot_container_dot_resource__list__pb2._RESOURCELIST _RESOURCEREQUIREMENTS.fields_by_name['requests'].message_type = influxdb__service__sdk_dot_model_dot_container_dot_resource__list__pb2._RESOURCELIST DESCRIPTOR.message_types_by_name['ResourceRequirements']", "influxdb__service__sdk_dot_model_dot_container_dot_resource__list__pb2._RESOURCELIST DESCRIPTOR.message_types_by_name['ResourceRequirements'] = _RESOURCEREQUIREMENTS _sym_db.RegisterFileDescriptor(DESCRIPTOR) ResourceRequirements = _reflection.GeneratedProtocolMessageType('ResourceRequirements', (_message.Message,), {", "_descriptor.Descriptor( name='ResourceRequirements', full_name='container.ResourceRequirements', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='limits', full_name='container.ResourceRequirements.limits',", ": 'resource_requirements_pb2' # @@protoc_insertion_point(class_scope:container.ResourceRequirements) }) _sym_db.RegisterMessage(ResourceRequirements) DESCRIPTOR._options = None #", "from google.protobuf import reflection as _reflection from google.protobuf import symbol_database", "full_name='container.ResourceRequirements.requests', index=1, number=2, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None,", "google.protobuf import reflection as _reflection from google.protobuf import symbol_database as", "and (lambda x:x) or (lambda x:x.encode('latin1')) from google.protobuf import descriptor", "as influxdb__service__sdk_dot_model_dot_container_dot_resource__list__pb2 DESCRIPTOR = _descriptor.FileDescriptor( name='resource_requirements.proto', package='container', syntax='proto3', serialized_options=_b('ZCgo.easyops.local/contracts/protorepo-models/easyops/model/container'), serialized_pb=_b('\\n\\x1bresource_requirements.proto\\x12\\tcontainer\\x1a\\x38influxdb_service_sdk/model/container/resource_list.proto\\\"j\\n\\x14ResourceRequirements\\x12\\'\\n\\x06limits\\x18\\x01", "name='resource_requirements.proto', package='container', syntax='proto3', serialized_options=_b('ZCgo.easyops.local/contracts/protorepo-models/easyops/model/container'), serialized_pb=_b('\\n\\x1bresource_requirements.proto\\x12\\tcontainer\\x1a\\x38influxdb_service_sdk/model/container/resource_list.proto\\\"j\\n\\x14ResourceRequirements\\x12\\'\\n\\x06limits\\x18\\x01 \\x01(\\x0b\\x32\\x17.container.ResourceList\\x12)\\n\\x08requests\\x18\\x02 \\x01(\\x0b\\x32\\x17.container.ResourceListBEZCgo.easyops.local/contracts/protorepo-models/easyops/model/containerb\\x06proto3') , dependencies=[influxdb__service__sdk_dot_model_dot_container_dot_resource__list__pb2.DESCRIPTOR,]) _RESOURCEREQUIREMENTS", "_RESOURCEREQUIREMENTS.fields_by_name['limits'].message_type = influxdb__service__sdk_dot_model_dot_container_dot_resource__list__pb2._RESOURCELIST _RESOURCEREQUIREMENTS.fields_by_name['requests'].message_type = influxdb__service__sdk_dot_model_dot_container_dot_resource__list__pb2._RESOURCELIST DESCRIPTOR.message_types_by_name['ResourceRequirements'] = _RESOURCEREQUIREMENTS _sym_db.RegisterFileDescriptor(DESCRIPTOR)", "fields=[ _descriptor.FieldDescriptor( name='limits', full_name='container.ResourceRequirements.limits', index=0, number=1, type=11, cpp_type=10, label=1, has_default_value=False,", "(_message.Message,), { 'DESCRIPTOR' : _RESOURCEREQUIREMENTS, '__module__' : 'resource_requirements_pb2' # @@protoc_insertion_point(class_scope:container.ResourceRequirements)", "_descriptor.FieldDescriptor( name='limits', full_name='container.ResourceRequirements.limits', index=0, number=1, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None,", "package='container', syntax='proto3', serialized_options=_b('ZCgo.easyops.local/contracts/protorepo-models/easyops/model/container'), serialized_pb=_b('\\n\\x1bresource_requirements.proto\\x12\\tcontainer\\x1a\\x38influxdb_service_sdk/model/container/resource_list.proto\\\"j\\n\\x14ResourceRequirements\\x12\\'\\n\\x06limits\\x18\\x01 \\x01(\\x0b\\x32\\x17.container.ResourceList\\x12)\\n\\x08requests\\x18\\x02 \\x01(\\x0b\\x32\\x17.container.ResourceListBEZCgo.easyops.local/contracts/protorepo-models/easyops/model/containerb\\x06proto3') , dependencies=[influxdb__service__sdk_dot_model_dot_container_dot_resource__list__pb2.DESCRIPTOR,]) _RESOURCEREQUIREMENTS =", "default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), ], extensions=[", "file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='limits', full_name='container.ResourceRequirements.limits', index=0, number=1, type=11, cpp_type=10,", "= _descriptor.FileDescriptor( name='resource_requirements.proto', package='container', syntax='proto3', serialized_options=_b('ZCgo.easyops.local/contracts/protorepo-models/easyops/model/container'), serialized_pb=_b('\\n\\x1bresource_requirements.proto\\x12\\tcontainer\\x1a\\x38influxdb_service_sdk/model/container/resource_list.proto\\\"j\\n\\x14ResourceRequirements\\x12\\'\\n\\x06limits\\x18\\x01 \\x01(\\x0b\\x32\\x17.container.ResourceList\\x12)\\n\\x08requests\\x18\\x02 \\x01(\\x0b\\x32\\x17.container.ResourceListBEZCgo.easyops.local/contracts/protorepo-models/easyops/model/containerb\\x06proto3') ,", "DO NOT EDIT! # source: resource_requirements.proto import sys _b=sys.version_info[0]<3 and", "has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor(", "# @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() from influxdb_service_sdk.model.container import resource_list_pb2 as", "], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ],", "_descriptor.FieldDescriptor( name='requests', full_name='container.ResourceRequirements.requests', index=1, number=2, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None,", ": _RESOURCEREQUIREMENTS, '__module__' : 'resource_requirements_pb2' # @@protoc_insertion_point(class_scope:container.ResourceRequirements) }) _sym_db.RegisterMessage(ResourceRequirements) DESCRIPTOR._options", "dependencies=[influxdb__service__sdk_dot_model_dot_container_dot_resource__list__pb2.DESCRIPTOR,]) _RESOURCEREQUIREMENTS = _descriptor.Descriptor( name='ResourceRequirements', full_name='container.ResourceRequirements', filename=None, file=DESCRIPTOR, containing_type=None, fields=[", "= _descriptor.Descriptor( name='ResourceRequirements', full_name='container.ResourceRequirements', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='limits',", "_descriptor.FileDescriptor( name='resource_requirements.proto', package='container', syntax='proto3', serialized_options=_b('ZCgo.easyops.local/contracts/protorepo-models/easyops/model/container'), serialized_pb=_b('\\n\\x1bresource_requirements.proto\\x12\\tcontainer\\x1a\\x38influxdb_service_sdk/model/container/resource_list.proto\\\"j\\n\\x14ResourceRequirements\\x12\\'\\n\\x06limits\\x18\\x01 \\x01(\\x0b\\x32\\x17.container.ResourceList\\x12)\\n\\x08requests\\x18\\x02 \\x01(\\x0b\\x32\\x17.container.ResourceListBEZCgo.easyops.local/contracts/protorepo-models/easyops/model/containerb\\x06proto3') , dependencies=[influxdb__service__sdk_dot_model_dot_container_dot_resource__list__pb2.DESCRIPTOR,])", "containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[", "-*- coding: utf-8 -*- # Generated by the protocol buffer", "serialized_pb=_b('\\n\\x1bresource_requirements.proto\\x12\\tcontainer\\x1a\\x38influxdb_service_sdk/model/container/resource_list.proto\\\"j\\n\\x14ResourceRequirements\\x12\\'\\n\\x06limits\\x18\\x01 \\x01(\\x0b\\x32\\x17.container.ResourceList\\x12)\\n\\x08requests\\x18\\x02 \\x01(\\x0b\\x32\\x17.container.ResourceListBEZCgo.easyops.local/contracts/protorepo-models/easyops/model/containerb\\x06proto3') , dependencies=[influxdb__service__sdk_dot_model_dot_container_dot_resource__list__pb2.DESCRIPTOR,]) _RESOURCEREQUIREMENTS = _descriptor.Descriptor( name='ResourceRequirements', full_name='container.ResourceRequirements',", ", dependencies=[influxdb__service__sdk_dot_model_dot_container_dot_resource__list__pb2.DESCRIPTOR,]) _RESOURCEREQUIREMENTS = _descriptor.Descriptor( name='ResourceRequirements', full_name='container.ResourceRequirements', filename=None, file=DESCRIPTOR, containing_type=None,", "from google.protobuf import symbol_database as _symbol_database # @@protoc_insertion_point(imports) _sym_db =", "influxdb_service_sdk.model.container import resource_list_pb2 as influxdb__service__sdk_dot_model_dot_container_dot_resource__list__pb2 DESCRIPTOR = _descriptor.FileDescriptor( name='resource_requirements.proto', package='container',", "as _symbol_database # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() from influxdb_service_sdk.model.container import", "syntax='proto3', serialized_options=_b('ZCgo.easyops.local/contracts/protorepo-models/easyops/model/container'), serialized_pb=_b('\\n\\x1bresource_requirements.proto\\x12\\tcontainer\\x1a\\x38influxdb_service_sdk/model/container/resource_list.proto\\\"j\\n\\x14ResourceRequirements\\x12\\'\\n\\x06limits\\x18\\x01 \\x01(\\x0b\\x32\\x17.container.ResourceList\\x12)\\n\\x08requests\\x18\\x02 \\x01(\\x0b\\x32\\x17.container.ResourceListBEZCgo.easyops.local/contracts/protorepo-models/easyops/model/containerb\\x06proto3') , dependencies=[influxdb__service__sdk_dot_model_dot_container_dot_resource__list__pb2.DESCRIPTOR,]) _RESOURCEREQUIREMENTS = _descriptor.Descriptor(", "# -*- coding: utf-8 -*- # Generated by the protocol", "sys _b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) from google.protobuf", "], serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=100, serialized_end=206, )", "the protocol buffer compiler. DO NOT EDIT! # source: resource_requirements.proto", "= influxdb__service__sdk_dot_model_dot_container_dot_resource__list__pb2._RESOURCELIST DESCRIPTOR.message_types_by_name['ResourceRequirements'] = _RESOURCEREQUIREMENTS _sym_db.RegisterFileDescriptor(DESCRIPTOR) ResourceRequirements = _reflection.GeneratedProtocolMessageType('ResourceRequirements', (_message.Message,),", "has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), ],", "\\x01(\\x0b\\x32\\x17.container.ResourceList\\x12)\\n\\x08requests\\x18\\x02 \\x01(\\x0b\\x32\\x17.container.ResourceListBEZCgo.easyops.local/contracts/protorepo-models/easyops/model/containerb\\x06proto3') , dependencies=[influxdb__service__sdk_dot_model_dot_container_dot_resource__list__pb2.DESCRIPTOR,]) _RESOURCEREQUIREMENTS = _descriptor.Descriptor( name='ResourceRequirements', full_name='container.ResourceRequirements', filename=None,", "number=1, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False,", "_RESOURCEREQUIREMENTS, '__module__' : 'resource_requirements_pb2' # @@protoc_insertion_point(class_scope:container.ResourceRequirements) }) _sym_db.RegisterMessage(ResourceRequirements) DESCRIPTOR._options =", "buffer compiler. DO NOT EDIT! # source: resource_requirements.proto import sys", "google.protobuf import message as _message from google.protobuf import reflection as", "{ 'DESCRIPTOR' : _RESOURCEREQUIREMENTS, '__module__' : 'resource_requirements_pb2' # @@protoc_insertion_point(class_scope:container.ResourceRequirements) })", "'DESCRIPTOR' : _RESOURCEREQUIREMENTS, '__module__' : 'resource_requirements_pb2' # @@protoc_insertion_point(class_scope:container.ResourceRequirements) }) _sym_db.RegisterMessage(ResourceRequirements)", "], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[],", "nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=100,", "enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=100, serialized_end=206,", "or (lambda x:x.encode('latin1')) from google.protobuf import descriptor as _descriptor from", "DESCRIPTOR = _descriptor.FileDescriptor( name='resource_requirements.proto', package='container', syntax='proto3', serialized_options=_b('ZCgo.easyops.local/contracts/protorepo-models/easyops/model/container'), serialized_pb=_b('\\n\\x1bresource_requirements.proto\\x12\\tcontainer\\x1a\\x38influxdb_service_sdk/model/container/resource_list.proto\\\"j\\n\\x14ResourceRequirements\\x12\\'\\n\\x06limits\\x18\\x01 \\x01(\\x0b\\x32\\x17.container.ResourceList\\x12)\\n\\x08requests\\x18\\x02 \\x01(\\x0b\\x32\\x17.container.ResourceListBEZCgo.easyops.local/contracts/protorepo-models/easyops/model/containerb\\x06proto3')", "enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[],", "name='limits', full_name='container.ResourceRequirements.limits', index=0, number=1, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None,", "resource_requirements.proto import sys _b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))", "from google.protobuf import descriptor as _descriptor from google.protobuf import message", "full_name='container.ResourceRequirements', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='limits', full_name='container.ResourceRequirements.limits', index=0, number=1,", "serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='requests', full_name='container.ResourceRequirements.requests', index=1, number=2, type=11, cpp_type=10, label=1,", "= influxdb__service__sdk_dot_model_dot_container_dot_resource__list__pb2._RESOURCELIST _RESOURCEREQUIREMENTS.fields_by_name['requests'].message_type = influxdb__service__sdk_dot_model_dot_container_dot_resource__list__pb2._RESOURCELIST DESCRIPTOR.message_types_by_name['ResourceRequirements'] = _RESOURCEREQUIREMENTS _sym_db.RegisterFileDescriptor(DESCRIPTOR) ResourceRequirements", "_sym_db = _symbol_database.Default() from influxdb_service_sdk.model.container import resource_list_pb2 as influxdb__service__sdk_dot_model_dot_container_dot_resource__list__pb2 DESCRIPTOR", "message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='requests', full_name='container.ResourceRequirements.requests'," ]
[ "setattr(stripe.http_client, lib, None) inst = stripe.http_client.new_default_http_client() assert isinstance(inst, expected) def", "test_should_retry_on_error(self, mocker): client = stripe.http_client.new_default_http_client() client._max_network_retries = lambda: 1 api_connection_error", "stripe.http_client.new_default_http_client() jittered_ones = set( map(lambda _: client._add_jitter_time(1), list(range(100))) ) assert", "None) response_future = asyncio.Future() response_future.set_result([\"\", 200, {\"Request-Id\": \"req_234\"}]) client.request =", "subclass\" ) return check_call def test_request(self, request_mock, mock_response, check_call): mock_response(request_mock,", "\"http://fake.url\" client = TestClient() response_future = asyncio.Future() response_future.set_result([\"\", 200, {\"Request-Id\":", "headers) def test_request_stream( self, mocker, request_mock, mock_response, check_call ): for", "a string stream others a byte stream. body_content = stream.read()", "\"req_123\"}]) client.request = mocker.MagicMock( return_value=response_future ) _, code, _ =", "import urllib3 from stripe import six, util from async_stripe.http_client import", "client._should_retry((None, 409, None), None, 0) is True assert client._should_retry((None, 500,", "stripe.http_client.new_default_http_client() client._max_network_retries = lambda: 1 api_connection_error = mocker.Mock() api_connection_error.should_retry =", "(abs_url, data) data = None headers = {\"my-header\": \"header val\"}", "assert body == '{\"foo\": \"baz\"}' check_call(request_mock, method, abs_url, data, headers)", ") assert expected == actual @contextmanager def mock_max_delay(self, new_value): original_value", "mock_response(mock, body, code): raise NotImplementedError( \"You must implement this in", "stripe.http_client.HTTPClient.INITIAL_DELAY * random_value with self.mock_max_delay(10): expected = [ stripe.http_client.HTTPClient.INITIAL_DELAY, base_value", "2, (None, 409, {\"retry-after\": \"300\"}) ) def test_randomness_added(self): client =", "post_data): client = self.REQUEST_CLIENT(verify_ssl_certs=True) return client.request_with_retries(method, url, headers, post_data) async", "* 4, base_value * 8, base_value * 16, ] self.assert_sleep_times(client,", "self, mocker, request_mock, mock_response, check_call ): for method in VALID_API_METHODS:", "{}, None) class TestTornadoAsyncHTTPClient: # :TODO: Write tests for tornado", "stripe.http_client.new_default_http_client() client._add_jitter_time = lambda t: t # Prefer retry-after if", "client._should_retry((None, code, None), None, 0) is False # These status", "0) is True def test_should_retry_on_stripe_should_retry_false(self, mocker): client = stripe.http_client.new_default_http_client() client._max_network_retries", "def test_encode_array(self): body = {\"foo\": [{\"dob\": {\"month\": 1}, \"name\": \"bat\"}]}", "expected) def test_retry_after_header(self): client = stripe.http_client.new_default_http_client() client._add_jitter_time = lambda t:", "client = TestClient() response_future = asyncio.Future() response_future.set_result([\"\", 200, {\"Request-Id\": \"req_123\"}])", "test_exception(self, request_mock, mock_error): mock_error(request_mock) with pytest.raises(stripe.error.APIConnectionError): self.make_request(\"get\", self.valid_url, {}, None)", "0) is False def test_should_retry_on_stripe_should_retry_true(self, mocker): client = stripe.http_client.new_default_http_client() client._max_network_retries", "\"name\": \"bat\"}]} values = [t for t in stripe.api_requestor._api_encode(body)] assert", "mocker.resetall() def test_exception(self, request_mock, mock_error): mock_error(request_mock) with pytest.raises(stripe.error.APIConnectionError): self.make_request(\"get\", self.valid_url,", "map(lambda i: client._sleep_time_seconds(i + 1), range(until)) ) assert expected ==", "8.0]) def test_initial_delay_as_minimum(self): client = stripe.http_client.new_default_http_client() client._add_jitter_time = lambda t:", "{\"dob\": {\"month\": 1}, \"name\": \"bat\"}} values = [t for t", "\"delete\") class StripeClientTestCase(object): REQUEST_LIBRARIES = [\"AsyncHTTPClient\"] @pytest.fixture def request_mocks(self, mocker):", "is True def test_should_retry_on_error(self, mocker): client = stripe.http_client.new_default_http_client() client._max_network_retries =", "+ 1), range(until)) ) assert expected == actual @contextmanager def", "for val in jittered_ones) class TestRetryConditionsDefaultHttpClient(StripeClientTestCase): def test_should_retry_on_codes(self): one_xx =", "headers = {\"my-header\": \"header val\"} print(dir(self)) print(\"make_request_stream\" in dir(self)) stream,", "client._add_jitter_time = lambda t: t with self.mock_max_delay(10): self.assert_sleep_times(client, [0.5, 1.0,", "request_mock, mock_response, check_call ): for method in VALID_API_METHODS: mock_response(request_mock, \"some", "test_maximum_delay(self): client = stripe.http_client.new_default_http_client() client._add_jitter_time = lambda t: t max_delay", "t: t with self.mock_max_delay(10): self.assert_sleep_times(client, [0.5, 1.0, 2.0, 4.0, 8.0])", "header as true, we would. assert client._should_retry((None, 400, {}), None,", "True def test_should_retry_on_stripe_should_retry_false(self, mocker): client = stripe.http_client.new_default_http_client() client._max_network_retries = lambda:", "in stripe.api_requestor._api_encode(body)] assert (\"foo[dob][month]\", 1) in values assert (\"foo[name]\", \"bat\")", "request_mocks(self, mocker): request_mocks = {} for lib in self.REQUEST_LIBRARIES: request_mocks[lib]", "class StripeClientTestCase(object): REQUEST_LIBRARIES = [\"AsyncHTTPClient\"] @pytest.fixture def request_mocks(self, mocker): request_mocks", "client._add_jitter_time = lambda t: t * random_value base_value = stripe.http_client.HTTPClient.INITIAL_DELAY", "list(range(200, 209)) three_xx = list(range(300, 308)) four_xx = list(range(400, 431))", "These status codes should not be retried by default. for", "api_connection_error, max_test_retries + 1 ) is False ) assert (", "stripe.http_client.HTTPClient.MAX_DELAY = new_value try: yield self finally: stripe.http_client.HTTPClient.MAX_DELAY = original_value", "= stripe.http_client.HTTPClient.INITIAL_DELAY * random_value with self.mock_max_delay(10): expected = [ stripe.http_client.HTTPClient.INITIAL_DELAY,", "with self.mock_max_delay(10): expected = [ stripe.http_client.HTTPClient.INITIAL_DELAY, base_value * 2, base_value", "{\"Request-Id\": \"req_123\"}]) client.request = mocker.MagicMock( return_value=response_future ) _, code, _", "500, headers), None, 0) is False def test_should_retry_on_num_retries(self, mocker): client", "code, _ = self.make_request(method, abs_url, headers, data) assert code ==", "for t in stripe.api_requestor._api_encode(body)] assert (\"foo[0][dob][month]\", 1) in values assert", "def mock_error(mock, error): raise NotImplementedError( \"You must implement this in", ") assert code == 200 # Here we need to", "@pytest.fixture(autouse=True) def setup_stripe(self): orig_attrs = {\"enable_telemetry\": stripe.enable_telemetry} stripe.enable_telemetry = False", "self.mock_max_delay(10): self.assert_sleep_times(client, [0.5, 1.0, 2.0, 4.0, 8.0]) def test_initial_delay_as_minimum(self): client", "list(range(400, 431)) client = stripe.http_client.new_default_http_client() client._max_network_retries = lambda: 1 codes", "three_xx = list(range(300, 308)) four_xx = list(range(400, 431)) client =", "None, 0) is False # These status codes should be", "\"some streamed content\" mocker.resetall() def test_exception(self, request_mock, mock_error): mock_error(request_mock) with", "\"300\"}) ) def test_randomness_added(self): client = stripe.http_client.new_default_http_client() random_value = 0.8", "503, None), None, 0) is True def test_should_retry_on_error(self, mocker): client", "test_sleep_time_exponential_back_off(self): client = stripe.http_client.new_default_http_client() client._add_jitter_time = lambda t: t with", "with pytest.raises(stripe.error.APIConnectionError): self.make_request(\"get\", self.valid_url, {}, None) class TestTornadoAsyncHTTPClient: # :TODO:", "is False assert client._should_retry((None, 400, headers), None, 0) is True", "subclass\" ) return mock_error @pytest.fixture def check_call(self): def check_call( mock,", "): raise NotImplementedError( \"You must implement this in your test", "None) assert code == 200 args, _ = client.request.call_args assert", "original_filters = stripe.http_client.warnings.filters[:] stripe.http_client.warnings.simplefilter(\"ignore\") yield stripe.http_client.warnings.filters = original_filters def check_default(self,", "= mocker.Mock() api_connection_error.should_retry = True assert client._should_retry(None, api_connection_error, 0) is", "streamed content\" mocker.resetall() def test_exception(self, request_mock, mock_error): mock_error(request_mock) with pytest.raises(stripe.error.APIConnectionError):", "= list(range(100, 104)) two_xx = list(range(200, 209)) three_xx = list(range(300,", "but with the header as false, we would not. assert", "= TestClient() response_future = asyncio.Future() response_future.set_result([\"\", 200, {\"Request-Id\": \"req_123\"}]) client.request", "{} for lib in self.REQUEST_LIBRARIES: request_mocks[lib] = mocker.patch(\"async_stripe.http_client.%s\" % (lib,))", "some clients return a string stream others a byte stream.", "pass stripe.enable_telemetry = True url = \"http://fake.url\" client = TestClient()", "= lambda: 1 headers = {\"stripe-should-retry\": \"false\"} # Ordinarily, we", "1) in values assert (\"foo[name]\", \"bat\") in values def test_encode_array(self):", "request_mocks[lib] = mocker.patch(\"async_stripe.http_client.%s\" % (lib,)) return request_mocks class TestNewDefaultHttpClient(StripeClientTestCase): @pytest.fixture(autouse=True)", "max_delay = stripe.http_client.HTTPClient.MAX_DELAY expected = [0.5, 1.0, max_delay, max_delay, max_delay]", "stripe.http_client.HTTPClient.MAX_DELAY = original_value def test_sleep_time_exponential_back_off(self): client = stripe.http_client.new_default_http_client() client._add_jitter_time =", "\"bat\"}} values = [t for t in stripe.api_requestor._api_encode(body)] assert (\"foo[dob][month]\",", "in args[2] telemetry = json.loads(args[2][\"X-Stripe-Client-Telemetry\"]) assert telemetry[\"last_request_metrics\"][\"request_id\"] == \"req_123\" class", "six, util from async_stripe.http_client import TornadoAsyncHTTPClient pytestmark = pytest.mark.asyncio VALID_API_METHODS", "check_call def test_request(self, request_mock, mock_response, check_call): mock_response(request_mock, '{\"foo\": \"baz\"}', 200)", "abs_url = \"%s?%s\" % (abs_url, data) data = None headers", "jittered_ones) class TestRetryConditionsDefaultHttpClient(StripeClientTestCase): def test_should_retry_on_codes(self): one_xx = list(range(100, 104)) two_xx", "headers, params, is_streaming=False ): raise NotImplementedError( \"You must implement this", "TestAPIEncode(StripeClientTestCase): def test_encode_dict(self): body = {\"foo\": {\"dob\": {\"month\": 1}, \"name\":", "method, abs_url, data, headers) def test_request_stream( self, mocker, request_mock, mock_response,", "0) is True assert client._should_retry((None, 500, None), None, 0) is", "codes: assert client._should_retry((None, code, None), None, 0) is False #", "with the header as false, we would not. assert client._should_retry((None,", "TestRetryConditionsDefaultHttpClient(StripeClientTestCase): def test_should_retry_on_codes(self): one_xx = list(range(100, 104)) two_xx = list(range(200,", "2 == client._sleep_time_seconds( 3, (None, 409, {\"retry-after\": \"1\"}) ) #", "post_data): client = self.REQUEST_CLIENT(verify_ssl_certs=True) return await client.request_stream_with_retries( method, url, headers,", "lib in self.REQUEST_LIBRARIES: request_mocks[lib] = mocker.patch(\"async_stripe.http_client.%s\" % (lib,)) return request_mocks", "409, {\"retry-after\": \"1\"}) ) # Ignore crazy-big values assert 1", "[ stripe.http_client.HTTPClient.INITIAL_DELAY, base_value * 2, base_value * 4, base_value *", "None, max_test_retries + 1) is False ) class TestHTTPClient(object): @pytest.fixture(autouse=True)", "expected) def test_jitter_has_randomness_but_within_range(self): client = stripe.http_client.new_default_http_client() jittered_ones = set( map(lambda", "retried by default. assert client._should_retry((None, 409, None), None, 0) is", "post_data) async def make_request_stream(self, method, url, headers, post_data): client =", "base_value * 16, ] self.assert_sleep_times(client, expected) def test_jitter_has_randomness_but_within_range(self): client =", "None), None, 0) is True assert client._should_retry((None, 503, None), None,", "def assert_sleep_times(self, client, expected): until = len(expected) actual = list(", "client = self.REQUEST_CLIENT(verify_ssl_certs=True) return await client.request_stream_with_retries( method, url, headers, post_data", "+ two_xx + three_xx + four_xx codes.remove(409) # These status", "None, 0) is True assert client._should_retry((None, 500, headers), None, 0)", "api_connection_error = mocker.Mock() api_connection_error.should_retry = True assert client._should_retry(None, api_connection_error, 0)", "client.request_stream_with_retries( method, url, headers, post_data ) @pytest.fixture def mock_response(self): def", "= 10 client._max_network_retries = lambda: max_test_retries api_connection_error = mocker.Mock() api_connection_error.should_retry", "lambda t: t max_delay = stripe.http_client.HTTPClient.MAX_DELAY expected = [0.5, 1.0,", "assert isinstance(inst, expected) def test_new_default_http_client_tornado(self): self.check_default((), TornadoAsyncHTTPClient) class TestRetrySleepTimeDefaultHttpClient(StripeClientTestCase): from", "should not be retried by default. for code in codes:", "async def make_request_stream(self, method, url, headers, post_data): client = self.REQUEST_CLIENT(verify_ssl_certs=True)", "lambda: 1 headers = {\"stripe-should-retry\": \"true\"} # Ordinarily, we would", "for method in VALID_API_METHODS: abs_url = self.valid_url data = \"\"", "= stripe.http_client.new_default_http_client() random_value = 0.8 client._add_jitter_time = lambda t: t", "class TestHTTPClient(object): @pytest.fixture(autouse=True) def setup_stripe(self): orig_attrs = {\"enable_telemetry\": stripe.enable_telemetry} stripe.enable_telemetry", "def test_request(self, request_mock, mock_response, check_call): mock_response(request_mock, '{\"foo\": \"baz\"}', 200) for", "): for method in VALID_API_METHODS: mock_response(request_mock, \"some streamed content\", 200)", "stream. body_content = stream.read() if hasattr(body_content, \"decode\"): body_content = body_content.decode(\"utf-8\")", "stripe.http_client.warnings.simplefilter(\"ignore\") yield stripe.http_client.warnings.filters = original_filters def check_default(self, none_libs, expected): for", "{\"retry-after\": \"30\"}) ) # Prefer default if it's bigger assert", "client pass class TestAPIEncode(StripeClientTestCase): def test_encode_dict(self): body = {\"foo\": {\"dob\":", "mock_error(self): def mock_error(mock, error): raise NotImplementedError( \"You must implement this", "None headers = {\"my-header\": \"header val\"} print(dir(self)) print(\"make_request_stream\" in dir(self))", "self.REQUEST_CLIENT(verify_ssl_certs=True) return await client.request_stream_with_retries( method, url, headers, post_data ) @pytest.fixture", "the header as true, we would. assert client._should_retry((None, 400, {}),", "this in your test subclass\" ) return mock_error @pytest.fixture def", "data) data = None headers = {\"my-header\": \"header val\"} print(dir(self))", "+ 1) is False ) class TestHTTPClient(object): @pytest.fixture(autouse=True) def setup_stripe(self):", "method != \"post\": abs_url = \"%s?%s\" % (abs_url, data) data", "test_should_retry_on_num_retries(self, mocker): client = stripe.http_client.new_default_http_client() max_test_retries = 10 client._max_network_retries =", "def make_request_stream(self, method, url, headers, post_data): client = self.REQUEST_CLIENT(verify_ssl_certs=True) return", "your test subclass\" ) return mock_error @pytest.fixture def check_call(self): def", "None), None, 0) is True assert client._should_retry((None, 500, None), None,", "= lambda t: t * 0.001 initial_delay = stripe.http_client.HTTPClient.INITIAL_DELAY self.assert_sleep_times(client,", "\"baz\"}' check_call(request_mock, method, abs_url, data, headers) def test_request_stream( self, mocker,", "Ordinarily, we would not retry a 400, but with the", "def setup_stripe(self): orig_attrs = {\"enable_telemetry\": stripe.enable_telemetry} stripe.enable_telemetry = False yield", "(path,) def make_request(self, method, url, headers, post_data): client = self.REQUEST_CLIENT(verify_ssl_certs=True)", "client._should_retry((None, 500, None), None, 0) is True assert client._should_retry((None, 503,", "\"%s?%s\" % (abs_url, data) data = None headers = {\"my-header\":", "is True assert client._should_retry((None, 500, headers), None, 0) is False", "stripe.http_client.HTTPClient.INITIAL_DELAY, base_value * 2, base_value * 4, base_value * 8,", "@pytest.fixture def request_mock(self, request_mocks): return request_mocks[self.REQUEST_CLIENT.name] @property def valid_url(self, path=\"/foo\"):", "assert client._should_retry((None, 503, None), None, 0) is True def test_should_retry_on_error(self,", "headers, data ) assert code == 200 # Here we", "assert code == 200 assert body == '{\"foo\": \"baz\"}' check_call(request_mock,", "= False yield stripe.enable_telemetry = orig_attrs[\"enable_telemetry\"] async def test_sends_telemetry_on_second_request(self, mocker):", "data) data = None headers = {\"my-header\": \"header val\"} body,", "{\"retry-after\": \"300\"}) ) def test_randomness_added(self): client = stripe.http_client.new_default_http_client() random_value =", "= stripe.http_client.new_default_http_client() client._add_jitter_time = lambda t: t # Prefer retry-after", "in VALID_API_METHODS: mock_response(request_mock, \"some streamed content\", 200) abs_url = self.valid_url", "= {\"stripe-should-retry\": \"true\"} # Ordinarily, we would not retry a", "a 500, but with the header as false, we would", "= client.request.call_args assert \"X-Stripe-Client-Telemetry\" in args[2] telemetry = json.loads(args[2][\"X-Stripe-Client-Telemetry\"]) assert", "retried by default. for code in codes: assert client._should_retry((None, code,", "Ordinarily, we would retry a 500, but with the header", "1 ) is False ) assert ( client._should_retry((None, 409, None),", "# as some clients return a string stream others a", "values def test_encode_array(self): body = {\"foo\": [{\"dob\": {\"month\": 1}, \"name\":", "if it's bigger assert 2 == client._sleep_time_seconds( 3, (None, 409,", "assert client._should_retry((None, 500, headers), None, 0) is False def test_should_retry_on_num_retries(self,", "client._should_retry((None, 400, headers), None, 0) is True def test_should_retry_on_stripe_should_retry_false(self, mocker):", "await client.request_with_retries(\"get\", url, {}, None) assert code == 200 client.request.assert_called_with(\"get\",", "return_value=response_future ) _, code, _ = await client.request_with_retries(\"get\", url, {},", "{}, None) assert code == 200 client.request.assert_called_with(\"get\", url, {}, None)", "body = {\"foo\": [{\"dob\": {\"month\": 1}, \"name\": \"bat\"}]} values =", "stripe import urllib3 from stripe import six, util from async_stripe.http_client", ") is False ) assert ( client._should_retry((None, 409, None), None,", "200 # Here we need to convert and align all", "( client._should_retry( None, api_connection_error, max_test_retries + 1 ) is False", "your test subclass\" ) return mock_response @pytest.fixture def mock_error(self): def", "= {} for lib in self.REQUEST_LIBRARIES: request_mocks[lib] = mocker.patch(\"async_stripe.http_client.%s\" %", "base_value = stripe.http_client.HTTPClient.INITIAL_DELAY * random_value with self.mock_max_delay(10): expected = [", "= lambda t: t * random_value base_value = stripe.http_client.HTTPClient.INITIAL_DELAY *", "= {\"my-header\": \"header val\"} print(dir(self)) print(\"make_request_stream\" in dir(self)) stream, code,", "None headers = {\"my-header\": \"header val\"} body, code, _ =", "= stripe.http_client.HTTPClient.INITIAL_DELAY self.assert_sleep_times(client, [initial_delay] * 5) def test_maximum_delay(self): client =", "stripe.http_client.warnings.filters[:] stripe.http_client.warnings.simplefilter(\"ignore\") yield stripe.http_client.warnings.filters = original_filters def check_default(self, none_libs, expected):", "status codes should be retried by default. assert client._should_retry((None, 409,", "{\"month\": 1}, \"name\": \"bat\"}]} values = [t for t in", "all content on one type (string) # as some clients", "(\"foo[name]\", \"bat\") in values def test_encode_array(self): body = {\"foo\": [{\"dob\":", "VALID_API_METHODS: mock_response(request_mock, \"some streamed content\", 200) abs_url = self.valid_url data", "= stripe.http_client.HTTPClient.MAX_DELAY stripe.http_client.HTTPClient.MAX_DELAY = new_value try: yield self finally: stripe.http_client.HTTPClient.MAX_DELAY", "request_mocks class TestNewDefaultHttpClient(StripeClientTestCase): @pytest.fixture(autouse=True) def setup_warnings(self, request_mocks): original_filters = stripe.http_client.warnings.filters[:]", "client._add_jitter_time = lambda t: t * 0.001 initial_delay = stripe.http_client.HTTPClient.INITIAL_DELAY", "# Prefer retry-after if it's bigger assert 30 == client._sleep_time_seconds(", "mocker): client = stripe.http_client.new_default_http_client() max_test_retries = 10 client._max_network_retries = lambda:", "max_delay, max_delay, max_delay] self.assert_sleep_times(client, expected) def test_retry_after_header(self): client = stripe.http_client.new_default_http_client()", "400, {}), None, 0) is False assert client._should_retry((None, 400, headers),", "def test_retry_after_header(self): client = stripe.http_client.new_default_http_client() client._add_jitter_time = lambda t: t", "it's bigger assert 30 == client._sleep_time_seconds( 2, (None, 409, {\"retry-after\":", "0) is True api_connection_error.should_retry = False assert client._should_retry(None, api_connection_error, 0)", "= stripe.http_client.new_default_http_client() client._max_network_retries = lambda: 1 api_connection_error = mocker.Mock() api_connection_error.should_retry", "data = None headers = {\"my-header\": \"header val\"} body, code,", "\"30\"}) ) # Prefer default if it's bigger assert 2", "t in stripe.api_requestor._api_encode(body)] assert (\"foo[dob][month]\", 1) in values assert (\"foo[name]\",", "isinstance(inst, expected) def test_new_default_http_client_tornado(self): self.check_default((), TornadoAsyncHTTPClient) class TestRetrySleepTimeDefaultHttpClient(StripeClientTestCase): from contextlib", "client = stripe.http_client.new_default_http_client() max_test_retries = 10 client._max_network_retries = lambda: max_test_retries", "200 assert body == '{\"foo\": \"baz\"}' check_call(request_mock, method, abs_url, data,", "values = [t for t in stripe.api_requestor._api_encode(body)] assert (\"foo[dob][month]\", 1)", "but with the header as true, we would. assert client._should_retry((None,", "= orig_attrs[\"enable_telemetry\"] async def test_sends_telemetry_on_second_request(self, mocker): class TestClient(stripe.http_client.HTTPClient): pass stripe.enable_telemetry", "values assert (\"foo[name]\", \"bat\") in values def test_encode_array(self): body =", "= {\"foo\": {\"dob\": {\"month\": 1}, \"name\": \"bat\"}} values = [t", "is False def test_should_retry_on_stripe_should_retry_true(self, mocker): client = stripe.http_client.new_default_http_client() client._max_network_retries =", "@contextmanager def mock_max_delay(self, new_value): original_value = stripe.http_client.HTTPClient.MAX_DELAY stripe.http_client.HTTPClient.MAX_DELAY = new_value", "0) is False def test_should_retry_on_num_retries(self, mocker): client = stripe.http_client.new_default_http_client() max_test_retries", "\"You must implement this in your test subclass\" ) return", "in your test subclass\" ) return check_call def test_request(self, request_mock,", "code == 200 args, _ = client.request.call_args assert \"X-Stripe-Client-Telemetry\" in", "return check_call def test_request(self, request_mock, mock_response, check_call): mock_response(request_mock, '{\"foo\": \"baz\"}',", "{\"foo\": {\"dob\": {\"month\": 1}, \"name\": \"bat\"}} values = [t for", ") return mock_error @pytest.fixture def check_call(self): def check_call( mock, method,", "\"req_234\"}]) client.request = mocker.MagicMock( return_value=response_future ) _, code, _ =", "def mock_max_delay(self, new_value): original_value = stripe.http_client.HTTPClient.MAX_DELAY stripe.http_client.HTTPClient.MAX_DELAY = new_value try:", "absolute_import, division, print_function import pytest import json import asyncio import", "import json import asyncio import stripe import urllib3 from stripe", "assert 30 == client._sleep_time_seconds( 2, (None, 409, {\"retry-after\": \"30\"}) )", "be retried by default. assert client._should_retry((None, 409, None), None, 0)", "409, {\"retry-after\": \"30\"}) ) # Prefer default if it's bigger", "crazy-big values assert 1 == client._sleep_time_seconds( 2, (None, 409, {\"retry-after\":", "= list(range(200, 209)) three_xx = list(range(300, 308)) four_xx = list(range(400,", "True def test_should_retry_on_error(self, mocker): client = stripe.http_client.new_default_http_client() client._max_network_retries = lambda:", "code, _ = await client.request_with_retries(\"get\", url, {}, None) assert code", "\"https://api.stripe.com%s\" % (path,) def make_request(self, method, url, headers, post_data): client", "t: t max_delay = stripe.http_client.HTTPClient.MAX_DELAY expected = [0.5, 1.0, max_delay,", "method in VALID_API_METHODS: mock_response(request_mock, \"some streamed content\", 200) abs_url =", "max_delay, max_delay] self.assert_sleep_times(client, expected) def test_retry_after_header(self): client = stripe.http_client.new_default_http_client() client._add_jitter_time", "== actual @contextmanager def mock_max_delay(self, new_value): original_value = stripe.http_client.HTTPClient.MAX_DELAY stripe.http_client.HTTPClient.MAX_DELAY", "true, we would. assert client._should_retry((None, 400, {}), None, 0) is", "200 args, _ = client.request.call_args assert \"X-Stripe-Client-Telemetry\" in args[2] telemetry", "api_connection_error, 0) is True api_connection_error.should_retry = False assert client._should_retry(None, api_connection_error,", "make_request_stream(self, method, url, headers, post_data): client = self.REQUEST_CLIENT(verify_ssl_certs=True) return await", "telemetry = json.loads(args[2][\"X-Stripe-Client-Telemetry\"]) assert telemetry[\"last_request_metrics\"][\"request_id\"] == \"req_123\" class ClientTestBase(object): @pytest.fixture", "values = [t for t in stripe.api_requestor._api_encode(body)] assert (\"foo[0][dob][month]\", 1)", "VALID_API_METHODS: abs_url = self.valid_url data = \"\" if method !=", "> 1 assert all(0.5 <= val <= 1 for val", "should be retried by default. assert client._should_retry((None, 409, None), None,", "max_test_retries api_connection_error = mocker.Mock() api_connection_error.should_retry = True assert ( client._should_retry(", "class TestNewDefaultHttpClient(StripeClientTestCase): @pytest.fixture(autouse=True) def setup_warnings(self, request_mocks): original_filters = stripe.http_client.warnings.filters[:] stripe.http_client.warnings.simplefilter(\"ignore\")", "def test_should_retry_on_num_retries(self, mocker): client = stripe.http_client.new_default_http_client() max_test_retries = 10 client._max_network_retries", "\"\" if method != \"post\": abs_url = \"%s?%s\" % (abs_url,", "set( map(lambda _: client._add_jitter_time(1), list(range(100))) ) assert len(jittered_ones) > 1", "actual = list( map(lambda i: client._sleep_time_seconds(i + 1), range(until)) )", "is True assert client._should_retry((None, 503, None), None, 0) is True", "client._should_retry((None, 500, headers), None, 0) is False def test_should_retry_on_num_retries(self, mocker):", "client = stripe.http_client.new_default_http_client() client._add_jitter_time = lambda t: t with self.mock_max_delay(10):", "check_default(self, none_libs, expected): for lib in none_libs: setattr(stripe.http_client, lib, None)", "assert code == 200 args, _ = client.request.call_args assert \"X-Stripe-Client-Telemetry\"", "the header as false, we would not. assert client._should_retry((None, 500,", "2, base_value * 4, base_value * 8, base_value * 16,", "retry a 400, but with the header as true, we", "self.valid_url data = \"\" if method != \"post\": abs_url =", "10 client._max_network_retries = lambda: max_test_retries api_connection_error = mocker.Mock() api_connection_error.should_retry =", "\"decode\"): body_content = body_content.decode(\"utf-8\") assert body_content == \"some streamed content\"", "0) is True assert client._should_retry((None, 500, headers), None, 0) is", "[t for t in stripe.api_requestor._api_encode(body)] assert (\"foo[dob][month]\", 1) in values", "stripe.http_client.new_default_http_client() client._max_network_retries = lambda: 1 codes = one_xx + two_xx", "= [0.5, 1.0, max_delay, max_delay, max_delay] self.assert_sleep_times(client, expected) def test_retry_after_header(self):", "= stripe.http_client.new_default_http_client() client._max_network_retries = lambda: 1 headers = {\"stripe-should-retry\": \"false\"}", "False # These status codes should be retried by default.", "None) class TestTornadoAsyncHTTPClient: # :TODO: Write tests for tornado client", "4, base_value * 8, base_value * 16, ] self.assert_sleep_times(client, expected)", "REQUEST_LIBRARIES = [\"AsyncHTTPClient\"] @pytest.fixture def request_mocks(self, mocker): request_mocks = {}", "request_mock(self, request_mocks): return request_mocks[self.REQUEST_CLIENT.name] @property def valid_url(self, path=\"/foo\"): return \"https://api.stripe.com%s\"", "content\" mocker.resetall() def test_exception(self, request_mock, mock_error): mock_error(request_mock) with pytest.raises(stripe.error.APIConnectionError): self.make_request(\"get\",", "random_value base_value = stripe.http_client.HTTPClient.INITIAL_DELAY * random_value with self.mock_max_delay(10): expected =", "self.assert_sleep_times(client, expected) def test_retry_after_header(self): client = stripe.http_client.new_default_http_client() client._add_jitter_time = lambda", "lambda: 1 api_connection_error = mocker.Mock() api_connection_error.should_retry = True assert client._should_retry(None,", "error): raise NotImplementedError( \"You must implement this in your test", "client = stripe.http_client.new_default_http_client() client._add_jitter_time = lambda t: t max_delay =", "check_call(request_mock, method, abs_url, data, headers) def test_request_stream( self, mocker, request_mock,", "TestClient() response_future = asyncio.Future() response_future.set_result([\"\", 200, {\"Request-Id\": \"req_123\"}]) client.request =", "\"post\": abs_url = \"%s?%s\" % (abs_url, data) data = None", "def setup_warnings(self, request_mocks): original_filters = stripe.http_client.warnings.filters[:] stripe.http_client.warnings.simplefilter(\"ignore\") yield stripe.http_client.warnings.filters =", "import absolute_import, division, print_function import pytest import json import asyncio", "= (\"get\", \"post\", \"delete\") class StripeClientTestCase(object): REQUEST_LIBRARIES = [\"AsyncHTTPClient\"] @pytest.fixture", "400, but with the header as true, we would. assert", "# Ordinarily, we would retry a 500, but with the", "range(until)) ) assert expected == actual @contextmanager def mock_max_delay(self, new_value):", "= asyncio.Future() response_future.set_result([\"\", 200, {\"Request-Id\": \"req_234\"}]) client.request = mocker.MagicMock( return_value=response_future", "= \"\" if method != \"post\": abs_url = \"%s?%s\" %", "stripe.http_client.new_default_http_client() client._add_jitter_time = lambda t: t max_delay = stripe.http_client.HTTPClient.MAX_DELAY expected", "clients return a string stream others a byte stream. body_content", "= pytest.mark.asyncio VALID_API_METHODS = (\"get\", \"post\", \"delete\") class StripeClientTestCase(object): REQUEST_LIBRARIES", "def test_randomness_added(self): client = stripe.http_client.new_default_http_client() random_value = 0.8 client._add_jitter_time =", "if hasattr(body_content, \"decode\"): body_content = body_content.decode(\"utf-8\") assert body_content == \"some", "= one_xx + two_xx + three_xx + four_xx codes.remove(409) #", "actual @contextmanager def mock_max_delay(self, new_value): original_value = stripe.http_client.HTTPClient.MAX_DELAY stripe.http_client.HTTPClient.MAX_DELAY =", "streamed content\", 200) abs_url = self.valid_url data = \"\" if", "def test_should_retry_on_codes(self): one_xx = list(range(100, 104)) two_xx = list(range(200, 209))", "test_should_retry_on_codes(self): one_xx = list(range(100, 104)) two_xx = list(range(200, 209)) three_xx", "t: t # Prefer retry-after if it's bigger assert 30", "# Here we need to convert and align all content", "class ClientTestBase(object): @pytest.fixture def request_mock(self, request_mocks): return request_mocks[self.REQUEST_CLIENT.name] @property def", "from async_stripe.http_client import TornadoAsyncHTTPClient pytestmark = pytest.mark.asyncio VALID_API_METHODS = (\"get\",", "{\"my-header\": \"header val\"} body, code, _ = self.make_request(method, abs_url, headers,", "= lambda: 1 headers = {\"stripe-should-retry\": \"true\"} # Ordinarily, we", "for tornado client pass class TestAPIEncode(StripeClientTestCase): def test_encode_dict(self): body =", "\"some streamed content\", 200) abs_url = self.valid_url data = \"\"", "as true, we would. assert client._should_retry((None, 400, {}), None, 0)", "url, {}, None) assert code == 200 args, _ =", ") # Ignore crazy-big values assert 1 == client._sleep_time_seconds( 2,", "pytest.raises(stripe.error.APIConnectionError): self.make_request(\"get\", self.valid_url, {}, None) class TestTornadoAsyncHTTPClient: # :TODO: Write", "base_value * 8, base_value * 16, ] self.assert_sleep_times(client, expected) def", "async_stripe.http_client import TornadoAsyncHTTPClient pytestmark = pytest.mark.asyncio VALID_API_METHODS = (\"get\", \"post\",", "this in your test subclass\" ) return check_call def test_request(self,", "response_future = asyncio.Future() response_future.set_result([\"\", 200, {\"Request-Id\": \"req_123\"}]) client.request = mocker.MagicMock(", "mock_response, check_call): mock_response(request_mock, '{\"foo\": \"baz\"}', 200) for method in VALID_API_METHODS:", "stripe import six, util from async_stripe.http_client import TornadoAsyncHTTPClient pytestmark =", "def test_encode_dict(self): body = {\"foo\": {\"dob\": {\"month\": 1}, \"name\": \"bat\"}}", "yield stripe.enable_telemetry = orig_attrs[\"enable_telemetry\"] async def test_sends_telemetry_on_second_request(self, mocker): class TestClient(stripe.http_client.HTTPClient):", "two_xx + three_xx + four_xx codes.remove(409) # These status codes", "url = \"http://fake.url\" client = TestClient() response_future = asyncio.Future() response_future.set_result([\"\",", "retry a 500, but with the header as false, we", "= stripe.http_client.new_default_http_client() client._add_jitter_time = lambda t: t * 0.001 initial_delay", "codes should be retried by default. assert client._should_retry((None, 409, None),", "stripe.enable_telemetry} stripe.enable_telemetry = False yield stripe.enable_telemetry = orig_attrs[\"enable_telemetry\"] async def", "lambda t: t * 0.001 initial_delay = stripe.http_client.HTTPClient.INITIAL_DELAY self.assert_sleep_times(client, [initial_delay]", "contextlib import contextmanager def assert_sleep_times(self, client, expected): until = len(expected)", "assert client._should_retry(None, api_connection_error, 0) is True api_connection_error.should_retry = False assert", "1.0, 2.0, 4.0, 8.0]) def test_initial_delay_as_minimum(self): client = stripe.http_client.new_default_http_client() client._add_jitter_time", "409, {\"retry-after\": \"300\"}) ) def test_randomness_added(self): client = stripe.http_client.new_default_http_client() random_value", "util from async_stripe.http_client import TornadoAsyncHTTPClient pytestmark = pytest.mark.asyncio VALID_API_METHODS =", "return await client.request_stream_with_retries( method, url, headers, post_data ) @pytest.fixture def", "assert (\"foo[0][dob][month]\", 1) in values assert (\"foo[0][name]\", \"bat\") in values", "method in VALID_API_METHODS: abs_url = self.valid_url data = \"\" if", "contextmanager def assert_sleep_times(self, client, expected): until = len(expected) actual =", "== \"req_123\" class ClientTestBase(object): @pytest.fixture def request_mock(self, request_mocks): return request_mocks[self.REQUEST_CLIENT.name]", "mocker.patch(\"async_stripe.http_client.%s\" % (lib,)) return request_mocks class TestNewDefaultHttpClient(StripeClientTestCase): @pytest.fixture(autouse=True) def setup_warnings(self,", "lambda: 1 codes = one_xx + two_xx + three_xx +", "post_data ) @pytest.fixture def mock_response(self): def mock_response(mock, body, code): raise", ") assert ( client._should_retry((None, 409, None), None, max_test_retries + 1)", "is False def test_should_retry_on_num_retries(self, mocker): client = stripe.http_client.new_default_http_client() max_test_retries =", "test_encode_dict(self): body = {\"foo\": {\"dob\": {\"month\": 1}, \"name\": \"bat\"}} values", "= stripe.http_client.warnings.filters[:] stripe.http_client.warnings.simplefilter(\"ignore\") yield stripe.http_client.warnings.filters = original_filters def check_default(self, none_libs,", "stripe.http_client.HTTPClient.INITIAL_DELAY self.assert_sleep_times(client, [initial_delay] * 5) def test_maximum_delay(self): client = stripe.http_client.new_default_http_client()", "False def test_should_retry_on_num_retries(self, mocker): client = stripe.http_client.new_default_http_client() max_test_retries = 10", "self.assert_sleep_times(client, [initial_delay] * 5) def test_maximum_delay(self): client = stripe.http_client.new_default_http_client() client._add_jitter_time", "with self.mock_max_delay(10): self.assert_sleep_times(client, [0.5, 1.0, 2.0, 4.0, 8.0]) def test_initial_delay_as_minimum(self):", "new_value try: yield self finally: stripe.http_client.HTTPClient.MAX_DELAY = original_value def test_sleep_time_exponential_back_off(self):", "* 16, ] self.assert_sleep_times(client, expected) def test_jitter_has_randomness_but_within_range(self): client = stripe.http_client.new_default_http_client()", "four_xx codes.remove(409) # These status codes should not be retried", "client._sleep_time_seconds(i + 1), range(until)) ) assert expected == actual @contextmanager", "False assert client._should_retry((None, 400, headers), None, 0) is True def", "= mocker.Mock() api_connection_error.should_retry = True assert ( client._should_retry( None, api_connection_error,", "= len(expected) actual = list( map(lambda i: client._sleep_time_seconds(i + 1),", "client.request_with_retries(\"get\", url, {}, None) assert code == 200 client.request.assert_called_with(\"get\", url,", "@property def valid_url(self, path=\"/foo\"): return \"https://api.stripe.com%s\" % (path,) def make_request(self,", "headers, post_data): client = self.REQUEST_CLIENT(verify_ssl_certs=True) return client.request_with_retries(method, url, headers, post_data)", "method, abs_url, headers, data ) assert code == 200 #", "(string) # as some clients return a string stream others", "(\"foo[dob][month]\", 1) in values assert (\"foo[name]\", \"bat\") in values def", "[0.5, 1.0, 2.0, 4.0, 8.0]) def test_initial_delay_as_minimum(self): client = stripe.http_client.new_default_http_client()", "data = None headers = {\"my-header\": \"header val\"} print(dir(self)) print(\"make_request_stream\"", "in values def test_encode_array(self): body = {\"foo\": [{\"dob\": {\"month\": 1},", "is True def test_should_retry_on_stripe_should_retry_false(self, mocker): client = stripe.http_client.new_default_http_client() client._max_network_retries =", "response_future.set_result([\"\", 200, {\"Request-Id\": \"req_123\"}]) client.request = mocker.MagicMock( return_value=response_future ) _,", "code == 200 assert body == '{\"foo\": \"baz\"}' check_call(request_mock, method,", "code, _ = self.make_request_stream( method, abs_url, headers, data ) assert", "in self.REQUEST_LIBRARIES: request_mocks[lib] = mocker.patch(\"async_stripe.http_client.%s\" % (lib,)) return request_mocks class", "expected == actual @contextmanager def mock_max_delay(self, new_value): original_value = stripe.http_client.HTTPClient.MAX_DELAY", "def request_mock(self, request_mocks): return request_mocks[self.REQUEST_CLIENT.name] @property def valid_url(self, path=\"/foo\"): return", "= self.REQUEST_CLIENT(verify_ssl_certs=True) return await client.request_stream_with_retries( method, url, headers, post_data )", "return client.request_with_retries(method, url, headers, post_data) async def make_request_stream(self, method, url,", "await client.request_stream_with_retries( method, url, headers, post_data ) @pytest.fixture def mock_response(self):", "not. assert client._should_retry((None, 500, {}), None, 0) is True assert", "None), None, 0) is True def test_should_retry_on_error(self, mocker): client =", "= self.make_request(method, abs_url, headers, data) assert code == 200 assert", "orig_attrs = {\"enable_telemetry\": stripe.enable_telemetry} stripe.enable_telemetry = False yield stripe.enable_telemetry =", "your test subclass\" ) return check_call def test_request(self, request_mock, mock_response,", "values assert 1 == client._sleep_time_seconds( 2, (None, 409, {\"retry-after\": \"300\"})", "mock_response(request_mock, \"some streamed content\", 200) abs_url = self.valid_url data =", "code, None), None, 0) is False # These status codes", "= stripe.http_client.new_default_http_client() max_test_retries = 10 client._max_network_retries = lambda: max_test_retries api_connection_error", "test_request(self, request_mock, mock_response, check_call): mock_response(request_mock, '{\"foo\": \"baz\"}', 200) for method", "(\"get\", \"post\", \"delete\") class StripeClientTestCase(object): REQUEST_LIBRARIES = [\"AsyncHTTPClient\"] @pytest.fixture def", "209)) three_xx = list(range(300, 308)) four_xx = list(range(400, 431)) client", "codes should not be retried by default. for code in", "would not retry a 400, but with the header as", "code in codes: assert client._should_retry((None, code, None), None, 0) is", "def check_call( mock, method, abs_url, headers, params, is_streaming=False ): raise", "<= 1 for val in jittered_ones) class TestRetryConditionsDefaultHttpClient(StripeClientTestCase): def test_should_retry_on_codes(self):", "def check_default(self, none_libs, expected): for lib in none_libs: setattr(stripe.http_client, lib,", "original_value = stripe.http_client.HTTPClient.MAX_DELAY stripe.http_client.HTTPClient.MAX_DELAY = new_value try: yield self finally:", "TornadoAsyncHTTPClient pytestmark = pytest.mark.asyncio VALID_API_METHODS = (\"get\", \"post\", \"delete\") class", "= \"http://fake.url\" client = TestClient() response_future = asyncio.Future() response_future.set_result([\"\", 200,", "= True url = \"http://fake.url\" client = TestClient() response_future =", "== 200 assert body == '{\"foo\": \"baz\"}' check_call(request_mock, method, abs_url,", "* 0.001 initial_delay = stripe.http_client.HTTPClient.INITIAL_DELAY self.assert_sleep_times(client, [initial_delay] * 5) def", "+ three_xx + four_xx codes.remove(409) # These status codes should", "api_connection_error.should_retry = False assert client._should_retry(None, api_connection_error, 0) is False def", "return mock_error @pytest.fixture def check_call(self): def check_call( mock, method, abs_url,", "inst = stripe.http_client.new_default_http_client() assert isinstance(inst, expected) def test_new_default_http_client_tornado(self): self.check_default((), TornadoAsyncHTTPClient)", "max_test_retries + 1 ) is False ) assert ( client._should_retry((None,", "TestHTTPClient(object): @pytest.fixture(autouse=True) def setup_stripe(self): orig_attrs = {\"enable_telemetry\": stripe.enable_telemetry} stripe.enable_telemetry =", "assert client._should_retry((None, 400, {}), None, 0) is False assert client._should_retry((None,", "assert \"X-Stripe-Client-Telemetry\" in args[2] telemetry = json.loads(args[2][\"X-Stripe-Client-Telemetry\"]) assert telemetry[\"last_request_metrics\"][\"request_id\"] ==", "make_request(self, method, url, headers, post_data): client = self.REQUEST_CLIENT(verify_ssl_certs=True) return client.request_with_retries(method,", "we would not retry a 400, but with the header", "string stream others a byte stream. body_content = stream.read() if", "Here we need to convert and align all content on", "= list(range(300, 308)) four_xx = list(range(400, 431)) client = stripe.http_client.new_default_http_client()", "body_content == \"some streamed content\" mocker.resetall() def test_exception(self, request_mock, mock_error):", "header as false, we would not. assert client._should_retry((None, 500, {}),", "200, {\"Request-Id\": \"req_234\"}]) client.request = mocker.MagicMock( return_value=response_future ) _, code,", "# Ordinarily, we would not retry a 400, but with", "1 for val in jittered_ones) class TestRetryConditionsDefaultHttpClient(StripeClientTestCase): def test_should_retry_on_codes(self): one_xx", "stripe.http_client.new_default_http_client() client._max_network_retries = lambda: 1 headers = {\"stripe-should-retry\": \"false\"} #", "stripe.enable_telemetry = True url = \"http://fake.url\" client = TestClient() response_future", "= body_content.decode(\"utf-8\") assert body_content == \"some streamed content\" mocker.resetall() def", "test_sends_telemetry_on_second_request(self, mocker): class TestClient(stripe.http_client.HTTPClient): pass stripe.enable_telemetry = True url =", "val <= 1 for val in jittered_ones) class TestRetryConditionsDefaultHttpClient(StripeClientTestCase): def", "url, headers, post_data): client = self.REQUEST_CLIENT(verify_ssl_certs=True) return client.request_with_retries(method, url, headers,", "= self.valid_url data = \"\" if method != \"post\": abs_url", "(None, 409, {\"retry-after\": \"300\"}) ) def test_randomness_added(self): client = stripe.http_client.new_default_http_client()", "= stripe.http_client.new_default_http_client() client._add_jitter_time = lambda t: t max_delay = stripe.http_client.HTTPClient.MAX_DELAY", "def test_new_default_http_client_tornado(self): self.check_default((), TornadoAsyncHTTPClient) class TestRetrySleepTimeDefaultHttpClient(StripeClientTestCase): from contextlib import contextmanager", "codes.remove(409) # These status codes should not be retried by", "request_mocks): return request_mocks[self.REQUEST_CLIENT.name] @property def valid_url(self, path=\"/foo\"): return \"https://api.stripe.com%s\" %", "division, print_function import pytest import json import asyncio import stripe", "self.valid_url, {}, None) class TestTornadoAsyncHTTPClient: # :TODO: Write tests for", "2.0, 4.0, 8.0]) def test_initial_delay_as_minimum(self): client = stripe.http_client.new_default_http_client() client._add_jitter_time =", "1}, \"name\": \"bat\"}]} values = [t for t in stripe.api_requestor._api_encode(body)]", "3, (None, 409, {\"retry-after\": \"1\"}) ) # Ignore crazy-big values", ") assert len(jittered_ones) > 1 assert all(0.5 <= val <=", "def test_should_retry_on_stripe_should_retry_true(self, mocker): client = stripe.http_client.new_default_http_client() client._max_network_retries = lambda: 1", "assert client._should_retry((None, 500, {}), None, 0) is True assert client._should_retry((None,", "default if it's bigger assert 2 == client._sleep_time_seconds( 3, (None,", "def request_mocks(self, mocker): request_mocks = {} for lib in self.REQUEST_LIBRARIES:", "test subclass\" ) return mock_response @pytest.fixture def mock_error(self): def mock_error(mock,", "val in jittered_ones) class TestRetryConditionsDefaultHttpClient(StripeClientTestCase): def test_should_retry_on_codes(self): one_xx = list(range(100,", "must implement this in your test subclass\" ) return check_call", "0.8 client._add_jitter_time = lambda t: t * random_value base_value =", "on one type (string) # as some clients return a", "!= \"post\": abs_url = \"%s?%s\" % (abs_url, data) data =", "setup_warnings(self, request_mocks): original_filters = stripe.http_client.warnings.filters[:] stripe.http_client.warnings.simplefilter(\"ignore\") yield stripe.http_client.warnings.filters = original_filters", "_: client._add_jitter_time(1), list(range(100))) ) assert len(jittered_ones) > 1 assert all(0.5", "must implement this in your test subclass\" ) return mock_response", "= \"%s?%s\" % (abs_url, data) data = None headers =", "(None, 409, {\"retry-after\": \"30\"}) ) # Prefer default if it's", "1 api_connection_error = mocker.Mock() api_connection_error.should_retry = True assert client._should_retry(None, api_connection_error,", "None) assert code == 200 client.request.assert_called_with(\"get\", url, {}, None) response_future", "raise NotImplementedError( \"You must implement this in your test subclass\"", ") def test_randomness_added(self): client = stripe.http_client.new_default_http_client() random_value = 0.8 client._add_jitter_time", "telemetry[\"last_request_metrics\"][\"request_id\"] == \"req_123\" class ClientTestBase(object): @pytest.fixture def request_mock(self, request_mocks): return", "mocker, request_mock, mock_response, check_call ): for method in VALID_API_METHODS: mock_response(request_mock,", "= [ stripe.http_client.HTTPClient.INITIAL_DELAY, base_value * 2, base_value * 4, base_value", "method, abs_url, headers, params, is_streaming=False ): raise NotImplementedError( \"You must", "list(range(100))) ) assert len(jittered_ones) > 1 assert all(0.5 <= val", "import pytest import json import asyncio import stripe import urllib3", "{\"month\": 1}, \"name\": \"bat\"}} values = [t for t in", "t: t * random_value base_value = stripe.http_client.HTTPClient.INITIAL_DELAY * random_value with", "1 codes = one_xx + two_xx + three_xx + four_xx", "check_call ): for method in VALID_API_METHODS: mock_response(request_mock, \"some streamed content\",", "client._add_jitter_time = lambda t: t # Prefer retry-after if it's", "# :TODO: Write tests for tornado client pass class TestAPIEncode(StripeClientTestCase):", "= lambda: 1 codes = one_xx + two_xx + three_xx", "test subclass\" ) return check_call def test_request(self, request_mock, mock_response, check_call):", "data = \"\" if method != \"post\": abs_url = \"%s?%s\"", "client._add_jitter_time = lambda t: t max_delay = stripe.http_client.HTTPClient.MAX_DELAY expected =", "implement this in your test subclass\" ) return check_call def", "all(0.5 <= val <= 1 for val in jittered_ones) class", "test_initial_delay_as_minimum(self): client = stripe.http_client.new_default_http_client() client._add_jitter_time = lambda t: t *", "= True assert client._should_retry(None, api_connection_error, 0) is True api_connection_error.should_retry =", "def test_sleep_time_exponential_back_off(self): client = stripe.http_client.new_default_http_client() client._add_jitter_time = lambda t: t", "{\"stripe-should-retry\": \"true\"} # Ordinarily, we would not retry a 400,", "print(dir(self)) print(\"make_request_stream\" in dir(self)) stream, code, _ = self.make_request_stream( method,", "def mock_response(mock, body, code): raise NotImplementedError( \"You must implement this", "we need to convert and align all content on one", "% (lib,)) return request_mocks class TestNewDefaultHttpClient(StripeClientTestCase): @pytest.fixture(autouse=True) def setup_warnings(self, request_mocks):", "client = stripe.http_client.new_default_http_client() client._add_jitter_time = lambda t: t # Prefer", "StripeClientTestCase(object): REQUEST_LIBRARIES = [\"AsyncHTTPClient\"] @pytest.fixture def request_mocks(self, mocker): request_mocks =", "self.assert_sleep_times(client, [0.5, 1.0, 2.0, 4.0, 8.0]) def test_initial_delay_as_minimum(self): client =", "1 headers = {\"stripe-should-retry\": \"false\"} # Ordinarily, we would retry", "orig_attrs[\"enable_telemetry\"] async def test_sends_telemetry_on_second_request(self, mocker): class TestClient(stripe.http_client.HTTPClient): pass stripe.enable_telemetry =", "body = {\"foo\": {\"dob\": {\"month\": 1}, \"name\": \"bat\"}} values =", "{}, None) response_future = asyncio.Future() response_future.set_result([\"\", 200, {\"Request-Id\": \"req_234\"}]) client.request", "ClientTestBase(object): @pytest.fixture def request_mock(self, request_mocks): return request_mocks[self.REQUEST_CLIENT.name] @property def valid_url(self,", "client.request = mocker.MagicMock( return_value=response_future ) _, code, _ = await", "four_xx = list(range(400, 431)) client = stripe.http_client.new_default_http_client() client._max_network_retries = lambda:", "mock_error): mock_error(request_mock) with pytest.raises(stripe.error.APIConnectionError): self.make_request(\"get\", self.valid_url, {}, None) class TestTornadoAsyncHTTPClient:", "TestTornadoAsyncHTTPClient: # :TODO: Write tests for tornado client pass class", "assert client._should_retry((None, code, None), None, 0) is False # These", "mock_error(request_mock) with pytest.raises(stripe.error.APIConnectionError): self.make_request(\"get\", self.valid_url, {}, None) class TestTornadoAsyncHTTPClient: #", "False yield stripe.enable_telemetry = orig_attrs[\"enable_telemetry\"] async def test_sends_telemetry_on_second_request(self, mocker): class", "is False # These status codes should be retried by", "stripe.http_client.warnings.filters = original_filters def check_default(self, none_libs, expected): for lib in", "def test_initial_delay_as_minimum(self): client = stripe.http_client.new_default_http_client() client._add_jitter_time = lambda t: t", "True assert ( client._should_retry( None, api_connection_error, max_test_retries + 1 )", "to convert and align all content on one type (string)", "client = stripe.http_client.new_default_http_client() jittered_ones = set( map(lambda _: client._add_jitter_time(1), list(range(100)))", "4.0, 8.0]) def test_initial_delay_as_minimum(self): client = stripe.http_client.new_default_http_client() client._add_jitter_time = lambda", "{}, None) assert code == 200 args, _ = client.request.call_args", "_ = self.make_request(method, abs_url, headers, data) assert code == 200", "implement this in your test subclass\" ) return mock_error @pytest.fixture", "try: yield self finally: stripe.http_client.HTTPClient.MAX_DELAY = original_value def test_sleep_time_exponential_back_off(self): client", "(None, 409, {\"retry-after\": \"1\"}) ) # Ignore crazy-big values assert", "asyncio import stripe import urllib3 from stripe import six, util", "in jittered_ones) class TestRetryConditionsDefaultHttpClient(StripeClientTestCase): def test_should_retry_on_codes(self): one_xx = list(range(100, 104))", "asyncio.Future() response_future.set_result([\"\", 200, {\"Request-Id\": \"req_234\"}]) client.request = mocker.MagicMock( return_value=response_future )", "assert telemetry[\"last_request_metrics\"][\"request_id\"] == \"req_123\" class ClientTestBase(object): @pytest.fixture def request_mock(self, request_mocks):", "assert 2 == client._sleep_time_seconds( 3, (None, 409, {\"retry-after\": \"1\"}) )", "byte stream. body_content = stream.read() if hasattr(body_content, \"decode\"): body_content =", "request_mocks = {} for lib in self.REQUEST_LIBRARIES: request_mocks[lib] = mocker.patch(\"async_stripe.http_client.%s\"", "none_libs, expected): for lib in none_libs: setattr(stripe.http_client, lib, None) inst", "base_value * 2, base_value * 4, base_value * 8, base_value", "assert ( client._should_retry( None, api_connection_error, max_test_retries + 1 ) is", "for method in VALID_API_METHODS: mock_response(request_mock, \"some streamed content\", 200) abs_url", "random_value = 0.8 client._add_jitter_time = lambda t: t * random_value", "for t in stripe.api_requestor._api_encode(body)] assert (\"foo[dob][month]\", 1) in values assert", "{\"my-header\": \"header val\"} print(dir(self)) print(\"make_request_stream\" in dir(self)) stream, code, _", "lib, None) inst = stripe.http_client.new_default_http_client() assert isinstance(inst, expected) def test_new_default_http_client_tornado(self):", "test_randomness_added(self): client = stripe.http_client.new_default_http_client() random_value = 0.8 client._add_jitter_time = lambda", "initial_delay = stripe.http_client.HTTPClient.INITIAL_DELAY self.assert_sleep_times(client, [initial_delay] * 5) def test_maximum_delay(self): client", "client = stripe.http_client.new_default_http_client() random_value = 0.8 client._add_jitter_time = lambda t:", "client._max_network_retries = lambda: 1 codes = one_xx + two_xx +", "mock_response, check_call ): for method in VALID_API_METHODS: mock_response(request_mock, \"some streamed", "== client._sleep_time_seconds( 3, (None, 409, {\"retry-after\": \"1\"}) ) # Ignore", "assert code == 200 # Here we need to convert", "abs_url, headers, params, is_streaming=False ): raise NotImplementedError( \"You must implement", ":TODO: Write tests for tornado client pass class TestAPIEncode(StripeClientTestCase): def", "def mock_error(self): def mock_error(mock, error): raise NotImplementedError( \"You must implement", "\"header val\"} body, code, _ = self.make_request(method, abs_url, headers, data)", "we would. assert client._should_retry((None, 400, {}), None, 0) is False", "headers = {\"stripe-should-retry\": \"true\"} # Ordinarily, we would not retry", "must implement this in your test subclass\" ) return mock_error", "status codes should not be retried by default. for code", "mocker): request_mocks = {} for lib in self.REQUEST_LIBRARIES: request_mocks[lib] =", "stripe.http_client.HTTPClient.MAX_DELAY expected = [0.5, 1.0, max_delay, max_delay, max_delay] self.assert_sleep_times(client, expected)", "1.0, max_delay, max_delay, max_delay] self.assert_sleep_times(client, expected) def test_retry_after_header(self): client =", "self.assert_sleep_times(client, expected) def test_jitter_has_randomness_but_within_range(self): client = stripe.http_client.new_default_http_client() jittered_ones = set(", "mock_max_delay(self, new_value): original_value = stripe.http_client.HTTPClient.MAX_DELAY stripe.http_client.HTTPClient.MAX_DELAY = new_value try: yield", "0) is False assert client._should_retry((None, 400, headers), None, 0) is", "import contextmanager def assert_sleep_times(self, client, expected): until = len(expected) actual", "5) def test_maximum_delay(self): client = stripe.http_client.new_default_http_client() client._add_jitter_time = lambda t:", "mocker.Mock() api_connection_error.should_retry = True assert client._should_retry(None, api_connection_error, 0) is True", "base_value * 4, base_value * 8, base_value * 16, ]", "= original_filters def check_default(self, none_libs, expected): for lib in none_libs:", "headers), None, 0) is False def test_should_retry_on_num_retries(self, mocker): client =", "client._sleep_time_seconds( 2, (None, 409, {\"retry-after\": \"300\"}) ) def test_randomness_added(self): client", "assert len(jittered_ones) > 1 assert all(0.5 <= val <= 1", "test_should_retry_on_stripe_should_retry_true(self, mocker): client = stripe.http_client.new_default_http_client() client._max_network_retries = lambda: 1 headers", "{\"enable_telemetry\": stripe.enable_telemetry} stripe.enable_telemetry = False yield stripe.enable_telemetry = orig_attrs[\"enable_telemetry\"] async", "import six, util from async_stripe.http_client import TornadoAsyncHTTPClient pytestmark = pytest.mark.asyncio", "True url = \"http://fake.url\" client = TestClient() response_future = asyncio.Future()", "in your test subclass\" ) return mock_error @pytest.fixture def check_call(self):", "await client.request_with_retries(\"get\", url, {}, None) assert code == 200 args,", "= True assert ( client._should_retry( None, api_connection_error, max_test_retries + 1", "stripe.enable_telemetry = False yield stripe.enable_telemetry = orig_attrs[\"enable_telemetry\"] async def test_sends_telemetry_on_second_request(self,", "% (path,) def make_request(self, method, url, headers, post_data): client =", "'{\"foo\": \"baz\"}' check_call(request_mock, method, abs_url, data, headers) def test_request_stream( self,", "0) is False # These status codes should be retried", "in stripe.api_requestor._api_encode(body)] assert (\"foo[0][dob][month]\", 1) in values assert (\"foo[0][name]\", \"bat\")", "code): raise NotImplementedError( \"You must implement this in your test", "mock_response(request_mock, '{\"foo\": \"baz\"}', 200) for method in VALID_API_METHODS: abs_url =", "finally: stripe.http_client.HTTPClient.MAX_DELAY = original_value def test_sleep_time_exponential_back_off(self): client = stripe.http_client.new_default_http_client() client._add_jitter_time", "expected) def test_new_default_http_client_tornado(self): self.check_default((), TornadoAsyncHTTPClient) class TestRetrySleepTimeDefaultHttpClient(StripeClientTestCase): from contextlib import", "test_retry_after_header(self): client = stripe.http_client.new_default_http_client() client._add_jitter_time = lambda t: t #", "t * random_value base_value = stripe.http_client.HTTPClient.INITIAL_DELAY * random_value with self.mock_max_delay(10):", "one_xx = list(range(100, 104)) two_xx = list(range(200, 209)) three_xx =", "None, 0) is True assert client._should_retry((None, 500, None), None, 0)", "True assert client._should_retry((None, 503, None), None, 0) is True def", "\"post\", \"delete\") class StripeClientTestCase(object): REQUEST_LIBRARIES = [\"AsyncHTTPClient\"] @pytest.fixture def request_mocks(self,", "{\"stripe-should-retry\": \"false\"} # Ordinarily, we would retry a 500, but", ") return check_call def test_request(self, request_mock, mock_response, check_call): mock_response(request_mock, '{\"foo\":", "by default. for code in codes: assert client._should_retry((None, code, None),", "\"true\"} # Ordinarily, we would not retry a 400, but", "expected = [0.5, 1.0, max_delay, max_delay, max_delay] self.assert_sleep_times(client, expected) def", "assert client._should_retry((None, 500, None), None, 0) is True assert client._should_retry((None,", "_ = await client.request_with_retries(\"get\", url, {}, None) assert code ==", "hasattr(body_content, \"decode\"): body_content = body_content.decode(\"utf-8\") assert body_content == \"some streamed", "True assert client._should_retry((None, 500, headers), None, 0) is False def", "from stripe import six, util from async_stripe.http_client import TornadoAsyncHTTPClient pytestmark", "class TestClient(stripe.http_client.HTTPClient): pass stripe.enable_telemetry = True url = \"http://fake.url\" client", "= self.REQUEST_CLIENT(verify_ssl_certs=True) return client.request_with_retries(method, url, headers, post_data) async def make_request_stream(self,", "\"baz\"}', 200) for method in VALID_API_METHODS: abs_url = self.valid_url data", "None, 0) is False def test_should_retry_on_num_retries(self, mocker): client = stripe.http_client.new_default_http_client()", "class TestRetrySleepTimeDefaultHttpClient(StripeClientTestCase): from contextlib import contextmanager def assert_sleep_times(self, client, expected):", "def test_exception(self, request_mock, mock_error): mock_error(request_mock) with pytest.raises(stripe.error.APIConnectionError): self.make_request(\"get\", self.valid_url, {},", "client = stripe.http_client.new_default_http_client() client._max_network_retries = lambda: 1 api_connection_error = mocker.Mock()", "false, we would not. assert client._should_retry((None, 500, {}), None, 0)", "setup_stripe(self): orig_attrs = {\"enable_telemetry\": stripe.enable_telemetry} stripe.enable_telemetry = False yield stripe.enable_telemetry", "abs_url, data, headers) def test_request_stream( self, mocker, request_mock, mock_response, check_call", "== 200 args, _ = client.request.call_args assert \"X-Stripe-Client-Telemetry\" in args[2]", "Write tests for tornado client pass class TestAPIEncode(StripeClientTestCase): def test_encode_dict(self):", "json import asyncio import stripe import urllib3 from stripe import", "from contextlib import contextmanager def assert_sleep_times(self, client, expected): until =", "max_test_retries = 10 client._max_network_retries = lambda: max_test_retries api_connection_error = mocker.Mock()", ") class TestHTTPClient(object): @pytest.fixture(autouse=True) def setup_stripe(self): orig_attrs = {\"enable_telemetry\": stripe.enable_telemetry}", "response_future = asyncio.Future() response_future.set_result([\"\", 200, {\"Request-Id\": \"req_234\"}]) client.request = mocker.MagicMock(", "url, headers, post_data): client = self.REQUEST_CLIENT(verify_ssl_certs=True) return await client.request_stream_with_retries( method,", "lib in none_libs: setattr(stripe.http_client, lib, None) inst = stripe.http_client.new_default_http_client() assert", "assert all(0.5 <= val <= 1 for val in jittered_ones)", "pytest import json import asyncio import stripe import urllib3 from", "return request_mocks[self.REQUEST_CLIENT.name] @property def valid_url(self, path=\"/foo\"): return \"https://api.stripe.com%s\" % (path,)", "valid_url(self, path=\"/foo\"): return \"https://api.stripe.com%s\" % (path,) def make_request(self, method, url,", "\"false\"} # Ordinarily, we would retry a 500, but with", "body, code, _ = self.make_request(method, abs_url, headers, data) assert code", "def test_should_retry_on_stripe_should_retry_false(self, mocker): client = stripe.http_client.new_default_http_client() client._max_network_retries = lambda: 1", "= {\"stripe-should-retry\": \"false\"} # Ordinarily, we would retry a 500,", "self.make_request_stream( method, abs_url, headers, data ) assert code == 200", "def test_request_stream( self, mocker, request_mock, mock_response, check_call ): for method", "expected): until = len(expected) actual = list( map(lambda i: client._sleep_time_seconds(i", "as some clients return a string stream others a byte", "not retry a 400, but with the header as true,", "( client._should_retry((None, 409, None), None, max_test_retries + 1) is False", "headers, post_data ) @pytest.fixture def mock_response(self): def mock_response(mock, body, code):", "* 8, base_value * 16, ] self.assert_sleep_times(client, expected) def test_jitter_has_randomness_but_within_range(self):", "lambda: 1 headers = {\"stripe-should-retry\": \"false\"} # Ordinarily, we would", "if it's bigger assert 30 == client._sleep_time_seconds( 2, (None, 409,", "implement this in your test subclass\" ) return mock_response @pytest.fixture", "url, {}, None) response_future = asyncio.Future() response_future.set_result([\"\", 200, {\"Request-Id\": \"req_234\"}])", "max_delay] self.assert_sleep_times(client, expected) def test_retry_after_header(self): client = stripe.http_client.new_default_http_client() client._add_jitter_time =", "we would not. assert client._should_retry((None, 500, {}), None, 0) is", "convert and align all content on one type (string) #", "align all content on one type (string) # as some", "others a byte stream. body_content = stream.read() if hasattr(body_content, \"decode\"):", "api_connection_error = mocker.Mock() api_connection_error.should_retry = True assert ( client._should_retry( None,", "500, {}), None, 0) is True assert client._should_retry((None, 500, headers),", "400, headers), None, 0) is True def test_should_retry_on_stripe_should_retry_false(self, mocker): client", "def make_request(self, method, url, headers, post_data): client = self.REQUEST_CLIENT(verify_ssl_certs=True) return", "@pytest.fixture def mock_error(self): def mock_error(mock, error): raise NotImplementedError( \"You must", "default. for code in codes: assert client._should_retry((None, code, None), None,", "client._sleep_time_seconds( 2, (None, 409, {\"retry-after\": \"30\"}) ) # Prefer default", "a 400, but with the header as true, we would.", "headers, data) assert code == 200 assert body == '{\"foo\":", "True assert client._should_retry(None, api_connection_error, 0) is True api_connection_error.should_retry = False", "@pytest.fixture def check_call(self): def check_call( mock, method, abs_url, headers, params,", "and align all content on one type (string) # as", "stripe.http_client.HTTPClient.MAX_DELAY stripe.http_client.HTTPClient.MAX_DELAY = new_value try: yield self finally: stripe.http_client.HTTPClient.MAX_DELAY =", "assert client._should_retry((None, 409, None), None, 0) is True assert client._should_retry((None,", "None, 0) is True assert client._should_retry((None, 503, None), None, 0)", "None, 0) is False assert client._should_retry((None, 400, headers), None, 0)", "headers = {\"my-header\": \"header val\"} body, code, _ = self.make_request(method,", "+ four_xx codes.remove(409) # These status codes should not be", "{}), None, 0) is True assert client._should_retry((None, 500, headers), None,", "is True assert client._should_retry((None, 500, None), None, 0) is True", "client.request.call_args assert \"X-Stripe-Client-Telemetry\" in args[2] telemetry = json.loads(args[2][\"X-Stripe-Client-Telemetry\"]) assert telemetry[\"last_request_metrics\"][\"request_id\"]", "mock_response(self): def mock_response(mock, body, code): raise NotImplementedError( \"You must implement", "<= val <= 1 for val in jittered_ones) class TestRetryConditionsDefaultHttpClient(StripeClientTestCase):", "0.001 initial_delay = stripe.http_client.HTTPClient.INITIAL_DELAY self.assert_sleep_times(client, [initial_delay] * 5) def test_maximum_delay(self):", "assert client._should_retry((None, 400, headers), None, 0) is True def test_should_retry_on_stripe_should_retry_false(self,", "as false, we would not. assert client._should_retry((None, 500, {}), None,", "client.request.assert_called_with(\"get\", url, {}, None) response_future = asyncio.Future() response_future.set_result([\"\", 200, {\"Request-Id\":", "= [t for t in stripe.api_requestor._api_encode(body)] assert (\"foo[dob][month]\", 1) in", "== client._sleep_time_seconds( 2, (None, 409, {\"retry-after\": \"300\"}) ) def test_randomness_added(self):", "{\"foo\": [{\"dob\": {\"month\": 1}, \"name\": \"bat\"}]} values = [t for", "body == '{\"foo\": \"baz\"}' check_call(request_mock, method, abs_url, data, headers) def", "104)) two_xx = list(range(200, 209)) three_xx = list(range(300, 308)) four_xx", "def test_maximum_delay(self): client = stripe.http_client.new_default_http_client() client._add_jitter_time = lambda t: t", "body_content.decode(\"utf-8\") assert body_content == \"some streamed content\" mocker.resetall() def test_exception(self,", "return request_mocks class TestNewDefaultHttpClient(StripeClientTestCase): @pytest.fixture(autouse=True) def setup_warnings(self, request_mocks): original_filters =", "in your test subclass\" ) return mock_response @pytest.fixture def mock_error(self):", "abs_url = self.valid_url data = \"\" if method != \"post\":", "default. assert client._should_retry((None, 409, None), None, 0) is True assert", "in values assert (\"foo[name]\", \"bat\") in values def test_encode_array(self): body", "\"bat\") in values def test_encode_array(self): body = {\"foo\": [{\"dob\": {\"month\":", "{\"retry-after\": \"1\"}) ) # Ignore crazy-big values assert 1 ==", "two_xx = list(range(200, 209)) three_xx = list(range(300, 308)) four_xx =", "False assert client._should_retry(None, api_connection_error, 0) is False def test_should_retry_on_stripe_should_retry_true(self, mocker):", "list(range(100, 104)) two_xx = list(range(200, 209)) three_xx = list(range(300, 308))", "# These status codes should be retried by default. assert", "client._should_retry(None, api_connection_error, 0) is False def test_should_retry_on_stripe_should_retry_true(self, mocker): client =", "is False ) class TestHTTPClient(object): @pytest.fixture(autouse=True) def setup_stripe(self): orig_attrs =", "expected = [ stripe.http_client.HTTPClient.INITIAL_DELAY, base_value * 2, base_value * 4,", "0) is True def test_should_retry_on_error(self, mocker): client = stripe.http_client.new_default_http_client() client._max_network_retries", "def test_sends_telemetry_on_second_request(self, mocker): class TestClient(stripe.http_client.HTTPClient): pass stripe.enable_telemetry = True url", "print(\"make_request_stream\" in dir(self)) stream, code, _ = self.make_request_stream( method, abs_url,", "self.make_request(\"get\", self.valid_url, {}, None) class TestTornadoAsyncHTTPClient: # :TODO: Write tests", "client._max_network_retries = lambda: 1 api_connection_error = mocker.Mock() api_connection_error.should_retry = True", "is False ) assert ( client._should_retry((None, 409, None), None, max_test_retries", "import TornadoAsyncHTTPClient pytestmark = pytest.mark.asyncio VALID_API_METHODS = (\"get\", \"post\", \"delete\")", "* random_value with self.mock_max_delay(10): expected = [ stripe.http_client.HTTPClient.INITIAL_DELAY, base_value *", "= lambda: max_test_retries api_connection_error = mocker.Mock() api_connection_error.should_retry = True assert", "lambda t: t # Prefer retry-after if it's bigger assert", "client._add_jitter_time(1), list(range(100))) ) assert len(jittered_ones) > 1 assert all(0.5 <=", "mock_error @pytest.fixture def check_call(self): def check_call( mock, method, abs_url, headers,", "params, is_streaming=False ): raise NotImplementedError( \"You must implement this in", "content\", 200) abs_url = self.valid_url data = \"\" if method", "_, code, _ = await client.request_with_retries(\"get\", url, {}, None) assert", "stream, code, _ = self.make_request_stream( method, abs_url, headers, data )", "print_function import pytest import json import asyncio import stripe import", "= 0.8 client._add_jitter_time = lambda t: t * random_value base_value", "t # Prefer retry-after if it's bigger assert 30 ==", "# These status codes should not be retried by default.", "False ) class TestHTTPClient(object): @pytest.fixture(autouse=True) def setup_stripe(self): orig_attrs = {\"enable_telemetry\":", "args, _ = client.request.call_args assert \"X-Stripe-Client-Telemetry\" in args[2] telemetry =", "431)) client = stripe.http_client.new_default_http_client() client._max_network_retries = lambda: 1 codes =", "= lambda t: t with self.mock_max_delay(10): self.assert_sleep_times(client, [0.5, 1.0, 2.0,", "Prefer default if it's bigger assert 2 == client._sleep_time_seconds( 3,", "test subclass\" ) return mock_error @pytest.fixture def check_call(self): def check_call(", "200) abs_url = self.valid_url data = \"\" if method !=", "original_filters def check_default(self, none_libs, expected): for lib in none_libs: setattr(stripe.http_client,", "stripe.api_requestor._api_encode(body)] assert (\"foo[dob][month]\", 1) in values assert (\"foo[name]\", \"bat\") in", "test_should_retry_on_stripe_should_retry_false(self, mocker): client = stripe.http_client.new_default_http_client() client._max_network_retries = lambda: 1 headers", "def mock_response(self): def mock_response(mock, body, code): raise NotImplementedError( \"You must", "is_streaming=False ): raise NotImplementedError( \"You must implement this in your", "__future__ import absolute_import, division, print_function import pytest import json import", "== '{\"foo\": \"baz\"}' check_call(request_mock, method, abs_url, data, headers) def test_request_stream(", "by default. assert client._should_retry((None, 409, None), None, 0) is True", "None, 0) is True def test_should_retry_on_error(self, mocker): client = stripe.http_client.new_default_http_client()", "until = len(expected) actual = list( map(lambda i: client._sleep_time_seconds(i +", "return a string stream others a byte stream. body_content =", "0) is True assert client._should_retry((None, 503, None), None, 0) is", "with the header as true, we would. assert client._should_retry((None, 400,", "= set( map(lambda _: client._add_jitter_time(1), list(range(100))) ) assert len(jittered_ones) >", "stripe.api_requestor._api_encode(body)] assert (\"foo[0][dob][month]\", 1) in values assert (\"foo[0][name]\", \"bat\") in", "jittered_ones = set( map(lambda _: client._add_jitter_time(1), list(range(100))) ) assert len(jittered_ones)", "= list(range(400, 431)) client = stripe.http_client.new_default_http_client() client._max_network_retries = lambda: 1", "t max_delay = stripe.http_client.HTTPClient.MAX_DELAY expected = [0.5, 1.0, max_delay, max_delay,", "lambda t: t * random_value base_value = stripe.http_client.HTTPClient.INITIAL_DELAY * random_value", "client._should_retry((None, 409, None), None, max_test_retries + 1) is False )", "self.mock_max_delay(10): expected = [ stripe.http_client.HTTPClient.INITIAL_DELAY, base_value * 2, base_value *", "assert code == 200 client.request.assert_called_with(\"get\", url, {}, None) response_future =", "== client._sleep_time_seconds( 2, (None, 409, {\"retry-after\": \"30\"}) ) # Prefer", "\"1\"}) ) # Ignore crazy-big values assert 1 == client._sleep_time_seconds(", "three_xx + four_xx codes.remove(409) # These status codes should not", "is True api_connection_error.should_retry = False assert client._should_retry(None, api_connection_error, 0) is", ") return mock_response @pytest.fixture def mock_error(self): def mock_error(mock, error): raise", "would retry a 500, but with the header as false,", "stripe.http_client.new_default_http_client() client._add_jitter_time = lambda t: t with self.mock_max_delay(10): self.assert_sleep_times(client, [0.5,", "method, url, headers, post_data ) @pytest.fixture def mock_response(self): def mock_response(mock,", "client, expected): until = len(expected) actual = list( map(lambda i:", "bigger assert 2 == client._sleep_time_seconds( 3, (None, 409, {\"retry-after\": \"1\"})", "= [\"AsyncHTTPClient\"] @pytest.fixture def request_mocks(self, mocker): request_mocks = {} for", "= stripe.http_client.new_default_http_client() jittered_ones = set( map(lambda _: client._add_jitter_time(1), list(range(100))) )", "mock, method, abs_url, headers, params, is_streaming=False ): raise NotImplementedError( \"You", "(lib,)) return request_mocks class TestNewDefaultHttpClient(StripeClientTestCase): @pytest.fixture(autouse=True) def setup_warnings(self, request_mocks): original_filters", "content on one type (string) # as some clients return", "stream others a byte stream. body_content = stream.read() if hasattr(body_content,", "[{\"dob\": {\"month\": 1}, \"name\": \"bat\"}]} values = [t for t", "None, api_connection_error, max_test_retries + 1 ) is False ) assert", "yield self finally: stripe.http_client.HTTPClient.MAX_DELAY = original_value def test_sleep_time_exponential_back_off(self): client =", "random_value with self.mock_max_delay(10): expected = [ stripe.http_client.HTTPClient.INITIAL_DELAY, base_value * 2,", "client._max_network_retries = lambda: 1 headers = {\"stripe-should-retry\": \"false\"} # Ordinarily,", "\"bat\"}]} values = [t for t in stripe.api_requestor._api_encode(body)] assert (\"foo[0][dob][month]\",", "would not. assert client._should_retry((None, 500, {}), None, 0) is True", "check_call( mock, method, abs_url, headers, params, is_streaming=False ): raise NotImplementedError(", "method, url, headers, post_data): client = self.REQUEST_CLIENT(verify_ssl_certs=True) return await client.request_stream_with_retries(", "def test_jitter_has_randomness_but_within_range(self): client = stripe.http_client.new_default_http_client() jittered_ones = set( map(lambda _:", "= None headers = {\"my-header\": \"header val\"} print(dir(self)) print(\"make_request_stream\" in", "stripe.http_client.new_default_http_client() max_test_retries = 10 client._max_network_retries = lambda: max_test_retries api_connection_error =", "client._should_retry((None, 400, {}), None, 0) is False assert client._should_retry((None, 400,", "= stripe.http_client.new_default_http_client() client._add_jitter_time = lambda t: t with self.mock_max_delay(10): self.assert_sleep_times(client,", "data ) assert code == 200 # Here we need", "= stream.read() if hasattr(body_content, \"decode\"): body_content = body_content.decode(\"utf-8\") assert body_content", "in VALID_API_METHODS: abs_url = self.valid_url data = \"\" if method", ") # Prefer default if it's bigger assert 2 ==", "True api_connection_error.should_retry = False assert client._should_retry(None, api_connection_error, 0) is False", "test_request_stream( self, mocker, request_mock, mock_response, check_call ): for method in", "api_connection_error.should_retry = True assert client._should_retry(None, api_connection_error, 0) is True api_connection_error.should_retry", "body_content = stream.read() if hasattr(body_content, \"decode\"): body_content = body_content.decode(\"utf-8\") assert", "200, {\"Request-Id\": \"req_123\"}]) client.request = mocker.MagicMock( return_value=response_future ) _, code,", "TestRetrySleepTimeDefaultHttpClient(StripeClientTestCase): from contextlib import contextmanager def assert_sleep_times(self, client, expected): until", "200) for method in VALID_API_METHODS: abs_url = self.valid_url data =", "t in stripe.api_requestor._api_encode(body)] assert (\"foo[0][dob][month]\", 1) in values assert (\"foo[0][name]\",", "async def test_sends_telemetry_on_second_request(self, mocker): class TestClient(stripe.http_client.HTTPClient): pass stripe.enable_telemetry = True", "= original_value def test_sleep_time_exponential_back_off(self): client = stripe.http_client.new_default_http_client() client._add_jitter_time = lambda", "code == 200 # Here we need to convert and", "TornadoAsyncHTTPClient) class TestRetrySleepTimeDefaultHttpClient(StripeClientTestCase): from contextlib import contextmanager def assert_sleep_times(self, client,", "request_mocks): original_filters = stripe.http_client.warnings.filters[:] stripe.http_client.warnings.simplefilter(\"ignore\") yield stripe.http_client.warnings.filters = original_filters def", "True assert client._should_retry((None, 500, None), None, 0) is True assert", "asyncio.Future() response_future.set_result([\"\", 200, {\"Request-Id\": \"req_123\"}]) client.request = mocker.MagicMock( return_value=response_future )", "check_call(self): def check_call( mock, method, abs_url, headers, params, is_streaming=False ):", "assert expected == actual @contextmanager def mock_max_delay(self, new_value): original_value =", "import asyncio import stripe import urllib3 from stripe import six,", "assert ( client._should_retry((None, 409, None), None, max_test_retries + 1) is", "None) inst = stripe.http_client.new_default_http_client() assert isinstance(inst, expected) def test_new_default_http_client_tornado(self): self.check_default((),", "code == 200 client.request.assert_called_with(\"get\", url, {}, None) response_future = asyncio.Future()", "client.request_with_retries(method, url, headers, post_data) async def make_request_stream(self, method, url, headers,", "@pytest.fixture(autouse=True) def setup_warnings(self, request_mocks): original_filters = stripe.http_client.warnings.filters[:] stripe.http_client.warnings.simplefilter(\"ignore\") yield stripe.http_client.warnings.filters", "client._should_retry( None, api_connection_error, max_test_retries + 1 ) is False )", "= asyncio.Future() response_future.set_result([\"\", 200, {\"Request-Id\": \"req_123\"}]) client.request = mocker.MagicMock( return_value=response_future", "dir(self)) stream, code, _ = self.make_request_stream( method, abs_url, headers, data", "class TestTornadoAsyncHTTPClient: # :TODO: Write tests for tornado client pass", "self.REQUEST_LIBRARIES: request_mocks[lib] = mocker.patch(\"async_stripe.http_client.%s\" % (lib,)) return request_mocks class TestNewDefaultHttpClient(StripeClientTestCase):", "stripe.http_client.new_default_http_client() random_value = 0.8 client._add_jitter_time = lambda t: t *", "= lambda t: t max_delay = stripe.http_client.HTTPClient.MAX_DELAY expected = [0.5,", "== 200 client.request.assert_called_with(\"get\", url, {}, None) response_future = asyncio.Future() response_future.set_result([\"\",", "assert_sleep_times(self, client, expected): until = len(expected) actual = list( map(lambda", "client._should_retry((None, 500, {}), None, 0) is True assert client._should_retry((None, 500,", "lambda: max_test_retries api_connection_error = mocker.Mock() api_connection_error.should_retry = True assert (", "stripe.http_client.new_default_http_client() client._add_jitter_time = lambda t: t * 0.001 initial_delay =", "[0.5, 1.0, max_delay, max_delay, max_delay] self.assert_sleep_times(client, expected) def test_retry_after_header(self): client", "mocker.Mock() api_connection_error.should_retry = True assert ( client._should_retry( None, api_connection_error, max_test_retries", "one type (string) # as some clients return a string", "tests for tornado client pass class TestAPIEncode(StripeClientTestCase): def test_encode_dict(self): body", "1 headers = {\"stripe-should-retry\": \"true\"} # Ordinarily, we would not", "assert (\"foo[dob][month]\", 1) in values assert (\"foo[name]\", \"bat\") in values", "False ) assert ( client._should_retry((None, 409, None), None, max_test_retries +", "* 5) def test_maximum_delay(self): client = stripe.http_client.new_default_http_client() client._add_jitter_time = lambda", "[initial_delay] * 5) def test_maximum_delay(self): client = stripe.http_client.new_default_http_client() client._add_jitter_time =", "self finally: stripe.http_client.HTTPClient.MAX_DELAY = original_value def test_sleep_time_exponential_back_off(self): client = stripe.http_client.new_default_http_client()", "response_future.set_result([\"\", 200, {\"Request-Id\": \"req_234\"}]) client.request = mocker.MagicMock( return_value=response_future ) _,", "= mocker.MagicMock( return_value=response_future ) _, code, _ = await client.request_with_retries(\"get\",", "test_encode_array(self): body = {\"foo\": [{\"dob\": {\"month\": 1}, \"name\": \"bat\"}]} values", "headers, post_data): client = self.REQUEST_CLIENT(verify_ssl_certs=True) return await client.request_stream_with_retries( method, url,", "yield stripe.http_client.warnings.filters = original_filters def check_default(self, none_libs, expected): for lib", "bigger assert 30 == client._sleep_time_seconds( 2, (None, 409, {\"retry-after\": \"30\"})", "client._should_retry(None, api_connection_error, 0) is True api_connection_error.should_retry = False assert client._should_retry(None,", "url, headers, post_data ) @pytest.fixture def mock_response(self): def mock_response(mock, body,", "original_value def test_sleep_time_exponential_back_off(self): client = stripe.http_client.new_default_http_client() client._add_jitter_time = lambda t:", "self.check_default((), TornadoAsyncHTTPClient) class TestRetrySleepTimeDefaultHttpClient(StripeClientTestCase): from contextlib import contextmanager def assert_sleep_times(self,", "stripe.enable_telemetry = orig_attrs[\"enable_telemetry\"] async def test_sends_telemetry_on_second_request(self, mocker): class TestClient(stripe.http_client.HTTPClient): pass", "abs_url, headers, data ) assert code == 200 # Here", "\"name\": \"bat\"}} values = [t for t in stripe.api_requestor._api_encode(body)] assert", "i: client._sleep_time_seconds(i + 1), range(until)) ) assert expected == actual", "client._sleep_time_seconds( 3, (None, 409, {\"retry-after\": \"1\"}) ) # Ignore crazy-big", "class TestRetryConditionsDefaultHttpClient(StripeClientTestCase): def test_should_retry_on_codes(self): one_xx = list(range(100, 104)) two_xx =", "client = stripe.http_client.new_default_http_client() client._max_network_retries = lambda: 1 codes = one_xx", "max_test_retries + 1) is False ) class TestHTTPClient(object): @pytest.fixture(autouse=True) def", "return mock_response @pytest.fixture def mock_error(self): def mock_error(mock, error): raise NotImplementedError(", "= {\"foo\": [{\"dob\": {\"month\": 1}, \"name\": \"bat\"}]} values = [t", "type (string) # as some clients return a string stream", "None), None, 0) is False # These status codes should", "class TestAPIEncode(StripeClientTestCase): def test_encode_dict(self): body = {\"foo\": {\"dob\": {\"month\": 1},", "urllib3 from stripe import six, util from async_stripe.http_client import TornadoAsyncHTTPClient", "500, None), None, 0) is True assert client._should_retry((None, 503, None),", "VALID_API_METHODS = (\"get\", \"post\", \"delete\") class StripeClientTestCase(object): REQUEST_LIBRARIES = [\"AsyncHTTPClient\"]", "\"req_123\" class ClientTestBase(object): @pytest.fixture def request_mock(self, request_mocks): return request_mocks[self.REQUEST_CLIENT.name] @property", "request_mock, mock_error): mock_error(request_mock) with pytest.raises(stripe.error.APIConnectionError): self.make_request(\"get\", self.valid_url, {}, None) class", "16, ] self.assert_sleep_times(client, expected) def test_jitter_has_randomness_but_within_range(self): client = stripe.http_client.new_default_http_client() jittered_ones", "method, url, headers, post_data): client = self.REQUEST_CLIENT(verify_ssl_certs=True) return client.request_with_retries(method, url,", "\"header val\"} print(dir(self)) print(\"make_request_stream\" in dir(self)) stream, code, _ =", "stripe.http_client.new_default_http_client() client._max_network_retries = lambda: 1 headers = {\"stripe-should-retry\": \"true\"} #", "_ = self.make_request_stream( method, abs_url, headers, data ) assert code", "lambda t: t with self.mock_max_delay(10): self.assert_sleep_times(client, [0.5, 1.0, 2.0, 4.0,", "client = stripe.http_client.new_default_http_client() client._add_jitter_time = lambda t: t * 0.001", "= lambda t: t # Prefer retry-after if it's bigger", "200 client.request.assert_called_with(\"get\", url, {}, None) response_future = asyncio.Future() response_future.set_result([\"\", 200,", "8, base_value * 16, ] self.assert_sleep_times(client, expected) def test_jitter_has_randomness_but_within_range(self): client", "body_content = body_content.decode(\"utf-8\") assert body_content == \"some streamed content\" mocker.resetall()", "# Prefer default if it's bigger assert 2 == client._sleep_time_seconds(", "= self.make_request_stream( method, abs_url, headers, data ) assert code ==", "headers), None, 0) is True def test_should_retry_on_stripe_should_retry_false(self, mocker): client =", "assert (\"foo[name]\", \"bat\") in values def test_encode_array(self): body = {\"foo\":", "one_xx + two_xx + three_xx + four_xx codes.remove(409) # These", "1) is False ) class TestHTTPClient(object): @pytest.fixture(autouse=True) def setup_stripe(self): orig_attrs", "abs_url, headers, data) assert code == 200 assert body ==", "client._should_retry((None, 503, None), None, 0) is True def test_should_retry_on_error(self, mocker):", "url, {}, None) assert code == 200 client.request.assert_called_with(\"get\", url, {},", "for lib in self.REQUEST_LIBRARIES: request_mocks[lib] = mocker.patch(\"async_stripe.http_client.%s\" % (lib,)) return", "client = self.REQUEST_CLIENT(verify_ssl_certs=True) return client.request_with_retries(method, url, headers, post_data) async def", "= None headers = {\"my-header\": \"header val\"} body, code, _", "\"X-Stripe-Client-Telemetry\" in args[2] telemetry = json.loads(args[2][\"X-Stripe-Client-Telemetry\"]) assert telemetry[\"last_request_metrics\"][\"request_id\"] == \"req_123\"", "pytest.mark.asyncio VALID_API_METHODS = (\"get\", \"post\", \"delete\") class StripeClientTestCase(object): REQUEST_LIBRARIES =", "500, but with the header as false, we would not.", "NotImplementedError( \"You must implement this in your test subclass\" )", "codes = one_xx + two_xx + three_xx + four_xx codes.remove(409)", "{\"Request-Id\": \"req_234\"}]) client.request = mocker.MagicMock( return_value=response_future ) _, code, _", "* random_value base_value = stripe.http_client.HTTPClient.INITIAL_DELAY * random_value with self.mock_max_delay(10): expected", "mocker): client = stripe.http_client.new_default_http_client() client._max_network_retries = lambda: 1 headers =", "request_mock, mock_response, check_call): mock_response(request_mock, '{\"foo\": \"baz\"}', 200) for method in", "None), None, max_test_retries + 1) is False ) class TestHTTPClient(object):", "api_connection_error, 0) is False def test_should_retry_on_stripe_should_retry_true(self, mocker): client = stripe.http_client.new_default_http_client()", "= stripe.http_client.new_default_http_client() assert isinstance(inst, expected) def test_new_default_http_client_tornado(self): self.check_default((), TornadoAsyncHTTPClient) class", "data) assert code == 200 assert body == '{\"foo\": \"baz\"}'", "need to convert and align all content on one type", "in dir(self)) stream, code, _ = self.make_request_stream( method, abs_url, headers,", "api_connection_error.should_retry = True assert ( client._should_retry( None, api_connection_error, max_test_retries +", "for lib in none_libs: setattr(stripe.http_client, lib, None) inst = stripe.http_client.new_default_http_client()", "= False assert client._should_retry(None, api_connection_error, 0) is False def test_should_retry_on_stripe_should_retry_true(self,", "# Ignore crazy-big values assert 1 == client._sleep_time_seconds( 2, (None,", "len(expected) actual = list( map(lambda i: client._sleep_time_seconds(i + 1), range(until))", "'{\"foo\": \"baz\"}', 200) for method in VALID_API_METHODS: abs_url = self.valid_url", "test_jitter_has_randomness_but_within_range(self): client = stripe.http_client.new_default_http_client() jittered_ones = set( map(lambda _: client._add_jitter_time(1),", "expected): for lib in none_libs: setattr(stripe.http_client, lib, None) inst =", "client.request_with_retries(\"get\", url, {}, None) assert code == 200 args, _", "self.make_request(method, abs_url, headers, data) assert code == 200 assert body", ") _, code, _ = await client.request_with_retries(\"get\", url, {}, None)", "= [t for t in stripe.api_requestor._api_encode(body)] assert (\"foo[0][dob][month]\", 1) in", "assert 1 == client._sleep_time_seconds( 2, (None, 409, {\"retry-after\": \"300\"}) )", "409, None), None, 0) is True assert client._should_retry((None, 500, None),", "url, headers, post_data) async def make_request_stream(self, method, url, headers, post_data):", "import stripe import urllib3 from stripe import six, util from", "TestNewDefaultHttpClient(StripeClientTestCase): @pytest.fixture(autouse=True) def setup_warnings(self, request_mocks): original_filters = stripe.http_client.warnings.filters[:] stripe.http_client.warnings.simplefilter(\"ignore\") yield", "{}), None, 0) is False assert client._should_retry((None, 400, headers), None,", "none_libs: setattr(stripe.http_client, lib, None) inst = stripe.http_client.new_default_http_client() assert isinstance(inst, expected)", "in codes: assert client._should_retry((None, code, None), None, 0) is False", "TestClient(stripe.http_client.HTTPClient): pass stripe.enable_telemetry = True url = \"http://fake.url\" client =", "not be retried by default. for code in codes: assert", "list( map(lambda i: client._sleep_time_seconds(i + 1), range(until)) ) assert expected", "t * 0.001 initial_delay = stripe.http_client.HTTPClient.INITIAL_DELAY self.assert_sleep_times(client, [initial_delay] * 5)", "return \"https://api.stripe.com%s\" % (path,) def make_request(self, method, url, headers, post_data):", "None, 0) is True def test_should_retry_on_stripe_should_retry_false(self, mocker): client = stripe.http_client.new_default_http_client()", "mock_error(mock, error): raise NotImplementedError( \"You must implement this in your", "val\"} body, code, _ = self.make_request(method, abs_url, headers, data) assert", "headers, post_data) async def make_request_stream(self, method, url, headers, post_data): client", "409, None), None, max_test_retries + 1) is False ) class", "Ignore crazy-big values assert 1 == client._sleep_time_seconds( 2, (None, 409,", "= {\"enable_telemetry\": stripe.enable_telemetry} stripe.enable_telemetry = False yield stripe.enable_telemetry = orig_attrs[\"enable_telemetry\"]", "pytestmark = pytest.mark.asyncio VALID_API_METHODS = (\"get\", \"post\", \"delete\") class StripeClientTestCase(object):", "308)) four_xx = list(range(400, 431)) client = stripe.http_client.new_default_http_client() client._max_network_retries =", "t with self.mock_max_delay(10): self.assert_sleep_times(client, [0.5, 1.0, 2.0, 4.0, 8.0]) def", "def check_call(self): def check_call( mock, method, abs_url, headers, params, is_streaming=False", "[\"AsyncHTTPClient\"] @pytest.fixture def request_mocks(self, mocker): request_mocks = {} for lib", "stripe.http_client.new_default_http_client() assert isinstance(inst, expected) def test_new_default_http_client_tornado(self): self.check_default((), TornadoAsyncHTTPClient) class TestRetrySleepTimeDefaultHttpClient(StripeClientTestCase):", "body, code): raise NotImplementedError( \"You must implement this in your", "mocker): client = stripe.http_client.new_default_http_client() client._max_network_retries = lambda: 1 api_connection_error =", "new_value): original_value = stripe.http_client.HTTPClient.MAX_DELAY stripe.http_client.HTTPClient.MAX_DELAY = new_value try: yield self", "% (abs_url, data) data = None headers = {\"my-header\": \"header", "check_call): mock_response(request_mock, '{\"foo\": \"baz\"}', 200) for method in VALID_API_METHODS: abs_url", "= stripe.http_client.new_default_http_client() client._max_network_retries = lambda: 1 codes = one_xx +", "== 200 # Here we need to convert and align", "+ 1 ) is False ) assert ( client._should_retry((None, 409,", "from __future__ import absolute_import, division, print_function import pytest import json", "list(range(300, 308)) four_xx = list(range(400, 431)) client = stripe.http_client.new_default_http_client() client._max_network_retries", "1}, \"name\": \"bat\"}} values = [t for t in stripe.api_requestor._api_encode(body)]", "def test_should_retry_on_error(self, mocker): client = stripe.http_client.new_default_http_client() client._max_network_retries = lambda: 1", "this in your test subclass\" ) return mock_response @pytest.fixture def", "== \"some streamed content\" mocker.resetall() def test_exception(self, request_mock, mock_error): mock_error(request_mock)", ") @pytest.fixture def mock_response(self): def mock_response(mock, body, code): raise NotImplementedError(", "t: t * 0.001 initial_delay = stripe.http_client.HTTPClient.INITIAL_DELAY self.assert_sleep_times(client, [initial_delay] *", "30 == client._sleep_time_seconds( 2, (None, 409, {\"retry-after\": \"30\"}) ) #", "] self.assert_sleep_times(client, expected) def test_jitter_has_randomness_but_within_range(self): client = stripe.http_client.new_default_http_client() jittered_ones =", "= new_value try: yield self finally: stripe.http_client.HTTPClient.MAX_DELAY = original_value def", "= list( map(lambda i: client._sleep_time_seconds(i + 1), range(until)) ) assert", "Prefer retry-after if it's bigger assert 30 == client._sleep_time_seconds( 2,", "@pytest.fixture def mock_response(self): def mock_response(mock, body, code): raise NotImplementedError( \"You", "pass class TestAPIEncode(StripeClientTestCase): def test_encode_dict(self): body = {\"foo\": {\"dob\": {\"month\":", "client._max_network_retries = lambda: 1 headers = {\"stripe-should-retry\": \"true\"} # Ordinarily,", "headers = {\"stripe-should-retry\": \"false\"} # Ordinarily, we would retry a", "client._max_network_retries = lambda: max_test_retries api_connection_error = mocker.Mock() api_connection_error.should_retry = True", "be retried by default. for code in codes: assert client._should_retry((None,", "These status codes should be retried by default. assert client._should_retry((None,", "False def test_should_retry_on_stripe_should_retry_true(self, mocker): client = stripe.http_client.new_default_http_client() client._max_network_retries = lambda:", "we would retry a 500, but with the header as", "args[2] telemetry = json.loads(args[2][\"X-Stripe-Client-Telemetry\"]) assert telemetry[\"last_request_metrics\"][\"request_id\"] == \"req_123\" class ClientTestBase(object):", "stream.read() if hasattr(body_content, \"decode\"): body_content = body_content.decode(\"utf-8\") assert body_content ==", "tornado client pass class TestAPIEncode(StripeClientTestCase): def test_encode_dict(self): body = {\"foo\":", "assert client._should_retry(None, api_connection_error, 0) is False def test_should_retry_on_stripe_should_retry_true(self, mocker): client", "= stripe.http_client.HTTPClient.MAX_DELAY expected = [0.5, 1.0, max_delay, max_delay, max_delay] self.assert_sleep_times(client,", "path=\"/foo\"): return \"https://api.stripe.com%s\" % (path,) def make_request(self, method, url, headers,", "self.REQUEST_CLIENT(verify_ssl_certs=True) return client.request_with_retries(method, url, headers, post_data) async def make_request_stream(self, method,", "retry-after if it's bigger assert 30 == client._sleep_time_seconds( 2, (None,", "client = stripe.http_client.new_default_http_client() client._max_network_retries = lambda: 1 headers = {\"stripe-should-retry\":", "for code in codes: assert client._should_retry((None, code, None), None, 0)", "if method != \"post\": abs_url = \"%s?%s\" % (abs_url, data)", "request_mocks[self.REQUEST_CLIENT.name] @property def valid_url(self, path=\"/foo\"): return \"https://api.stripe.com%s\" % (path,) def", "_ = client.request.call_args assert \"X-Stripe-Client-Telemetry\" in args[2] telemetry = json.loads(args[2][\"X-Stripe-Client-Telemetry\"])", "it's bigger assert 2 == client._sleep_time_seconds( 3, (None, 409, {\"retry-after\":", "[t for t in stripe.api_requestor._api_encode(body)] assert (\"foo[0][dob][month]\", 1) in values", "= await client.request_with_retries(\"get\", url, {}, None) assert code == 200", "= json.loads(args[2][\"X-Stripe-Client-Telemetry\"]) assert telemetry[\"last_request_metrics\"][\"request_id\"] == \"req_123\" class ClientTestBase(object): @pytest.fixture def", "data, headers) def test_request_stream( self, mocker, request_mock, mock_response, check_call ):", "val\"} print(dir(self)) print(\"make_request_stream\" in dir(self)) stream, code, _ = self.make_request_stream(", "1), range(until)) ) assert expected == actual @contextmanager def mock_max_delay(self,", "json.loads(args[2][\"X-Stripe-Client-Telemetry\"]) assert telemetry[\"last_request_metrics\"][\"request_id\"] == \"req_123\" class ClientTestBase(object): @pytest.fixture def request_mock(self,", "test_new_default_http_client_tornado(self): self.check_default((), TornadoAsyncHTTPClient) class TestRetrySleepTimeDefaultHttpClient(StripeClientTestCase): from contextlib import contextmanager def", "assert body_content == \"some streamed content\" mocker.resetall() def test_exception(self, request_mock,", "2, (None, 409, {\"retry-after\": \"30\"}) ) # Prefer default if", "* 2, base_value * 4, base_value * 8, base_value *", "a byte stream. body_content = stream.read() if hasattr(body_content, \"decode\"): body_content", "= mocker.patch(\"async_stripe.http_client.%s\" % (lib,)) return request_mocks class TestNewDefaultHttpClient(StripeClientTestCase): @pytest.fixture(autouse=True) def", "would. assert client._should_retry((None, 400, {}), None, 0) is False assert", "1 assert all(0.5 <= val <= 1 for val in", "mocker.MagicMock( return_value=response_future ) _, code, _ = await client.request_with_retries(\"get\", url,", "mock_response @pytest.fixture def mock_error(self): def mock_error(mock, error): raise NotImplementedError( \"You", "map(lambda _: client._add_jitter_time(1), list(range(100))) ) assert len(jittered_ones) > 1 assert", "= lambda: 1 api_connection_error = mocker.Mock() api_connection_error.should_retry = True assert", "= stripe.http_client.new_default_http_client() client._max_network_retries = lambda: 1 headers = {\"stripe-should-retry\": \"true\"}", "mocker): class TestClient(stripe.http_client.HTTPClient): pass stripe.enable_telemetry = True url = \"http://fake.url\"", "1 == client._sleep_time_seconds( 2, (None, 409, {\"retry-after\": \"300\"}) ) def", "= {\"my-header\": \"header val\"} body, code, _ = self.make_request(method, abs_url,", "subclass\" ) return mock_response @pytest.fixture def mock_error(self): def mock_error(mock, error):", "def valid_url(self, path=\"/foo\"): return \"https://api.stripe.com%s\" % (path,) def make_request(self, method,", "len(jittered_ones) > 1 assert all(0.5 <= val <= 1 for", "in none_libs: setattr(stripe.http_client, lib, None) inst = stripe.http_client.new_default_http_client() assert isinstance(inst,", "@pytest.fixture def request_mocks(self, mocker): request_mocks = {} for lib in" ]
[ "Store a python dictionary generated from json data at <url>", "url: str): \"\"\" Store a python dictionary generated from json", "<url> in self.data. Returns self. \"\"\" data = subprocess.run( f\"curl", ").stdout self.data = json.loads(data) return self def make_instructions(self): \"\"\" Take", "list of instructions about its html visualization that is parsed", "<filename>http/static/jsonvis.py \"\"\"\\ Provides html file visualization of a json dataset", "elif isinstance(data, list): self._open_list() for item in data: self._iterate(item) self._horiz_rule()", "visualization that is parsed by json.html. \"\"\" self.instructions = []", "for item in data: self._iterate(item) self._horiz_rule() self._close_list() else: self._list_item(data) def", "json data at <url> in self.data. Returns self. \"\"\" data", "= subprocess.run( f\"curl '{url}'\", # Quotes required around url for", "key, value in data.items(): self._iterate(key) self._open_list() self._iterate(value) self._close_list() elif isinstance(data,", "= json.loads(data) return self def make_instructions(self): \"\"\" Take self.data and", "required around url for URL parameters stdout=subprocess.PIPE, shell=True ).stdout self.data", "self.instructions.append(('horiz_rule', None)) def _close_list(self): self.instructions.append(('close_list', None)) def _iterate(self, data: iter):", "self.instructions.append(('list_item', str(data))) def _horiz_rule(self): self.instructions.append(('horiz_rule', None)) def _close_list(self): self.instructions.append(('close_list', None))", "self._horiz_rule() self._close_list() else: self._list_item(data) def download(self, url: str): \"\"\" Store", "\"\"\" Store a python dictionary generated from json data at", "data = subprocess.run( f\"curl '{url}'\", # Quotes required around url", "html file visualization of a json dataset \"\"\" import json", "URL parameters stdout=subprocess.PIPE, shell=True ).stdout self.data = json.loads(data) return self", "None)) def _list_item(self, data): self.instructions.append(('list_item', str(data))) def _horiz_rule(self): self.instructions.append(('horiz_rule', None))", "# Quotes required around url for URL parameters stdout=subprocess.PIPE, shell=True", "str(data))) def _horiz_rule(self): self.instructions.append(('horiz_rule', None)) def _close_list(self): self.instructions.append(('close_list', None)) def", "else: self._list_item(data) def download(self, url: str): \"\"\" Store a python", "Returns self. \"\"\" data = subprocess.run( f\"curl '{url}'\", # Quotes", "JsonVis: def _open_list(self): self.instructions.append(('open_list', None)) def _list_item(self, data): self.instructions.append(('list_item', str(data)))", "subprocess.run( f\"curl '{url}'\", # Quotes required around url for URL", "def make_instructions(self): \"\"\" Take self.data and return a list of", "a list of instructions about its html visualization that is", "_iterate(self, data: iter): if isinstance(data, dict): for key, value in", "shell=True ).stdout self.data = json.loads(data) return self def make_instructions(self): \"\"\"", "at <url> in self.data. Returns self. \"\"\" data = subprocess.run(", "and return a list of instructions about its html visualization", "def _iterate(self, data: iter): if isinstance(data, dict): for key, value", "parameters stdout=subprocess.PIPE, shell=True ).stdout self.data = json.loads(data) return self def", "\"\"\" import json import subprocess class JsonVis: def _open_list(self): self.instructions.append(('open_list',", "return self def make_instructions(self): \"\"\" Take self.data and return a", "its html visualization that is parsed by json.html. \"\"\" self.instructions", "json import subprocess class JsonVis: def _open_list(self): self.instructions.append(('open_list', None)) def", "if isinstance(data, dict): for key, value in data.items(): self._iterate(key) self._open_list()", "return a list of instructions about its html visualization that", "python dictionary generated from json data at <url> in self.data.", "def _list_item(self, data): self.instructions.append(('list_item', str(data))) def _horiz_rule(self): self.instructions.append(('horiz_rule', None)) def", "in data.items(): self._iterate(key) self._open_list() self._iterate(value) self._close_list() elif isinstance(data, list): self._open_list()", "that is parsed by json.html. \"\"\" self.instructions = [] self._open_list()", "import subprocess class JsonVis: def _open_list(self): self.instructions.append(('open_list', None)) def _list_item(self,", "a json dataset \"\"\" import json import subprocess class JsonVis:", "file visualization of a json dataset \"\"\" import json import", "list): self._open_list() for item in data: self._iterate(item) self._horiz_rule() self._close_list() else:", "make_instructions(self): \"\"\" Take self.data and return a list of instructions", "subprocess class JsonVis: def _open_list(self): self.instructions.append(('open_list', None)) def _list_item(self, data):", "stdout=subprocess.PIPE, shell=True ).stdout self.data = json.loads(data) return self def make_instructions(self):", "dataset \"\"\" import json import subprocess class JsonVis: def _open_list(self):", "import json import subprocess class JsonVis: def _open_list(self): self.instructions.append(('open_list', None))", "iter): if isinstance(data, dict): for key, value in data.items(): self._iterate(key)", "self._iterate(key) self._open_list() self._iterate(value) self._close_list() elif isinstance(data, list): self._open_list() for item", "json dataset \"\"\" import json import subprocess class JsonVis: def", "from json data at <url> in self.data. Returns self. \"\"\"", "json.html. \"\"\" self.instructions = [] self._open_list() self._iterate(self.data) self._close_list() return self.instructions", "self.data and return a list of instructions about its html", "in self.data. Returns self. \"\"\" data = subprocess.run( f\"curl '{url}'\",", "Quotes required around url for URL parameters stdout=subprocess.PIPE, shell=True ).stdout", "_close_list(self): self.instructions.append(('close_list', None)) def _iterate(self, data: iter): if isinstance(data, dict):", "data: self._iterate(item) self._horiz_rule() self._close_list() else: self._list_item(data) def download(self, url: str):", "Take self.data and return a list of instructions about its", "\"\"\" data = subprocess.run( f\"curl '{url}'\", # Quotes required around", "data.items(): self._iterate(key) self._open_list() self._iterate(value) self._close_list() elif isinstance(data, list): self._open_list() for", "for key, value in data.items(): self._iterate(key) self._open_list() self._iterate(value) self._close_list() elif", "self._open_list() for item in data: self._iterate(item) self._horiz_rule() self._close_list() else: self._list_item(data)", "\"\"\"\\ Provides html file visualization of a json dataset \"\"\"", "self.instructions.append(('open_list', None)) def _list_item(self, data): self.instructions.append(('list_item', str(data))) def _horiz_rule(self): self.instructions.append(('horiz_rule',", "for URL parameters stdout=subprocess.PIPE, shell=True ).stdout self.data = json.loads(data) return", "self.data. Returns self. \"\"\" data = subprocess.run( f\"curl '{url}'\", #", "self._open_list() self._iterate(value) self._close_list() elif isinstance(data, list): self._open_list() for item in", "f\"curl '{url}'\", # Quotes required around url for URL parameters", "value in data.items(): self._iterate(key) self._open_list() self._iterate(value) self._close_list() elif isinstance(data, list):", "about its html visualization that is parsed by json.html. \"\"\"", "self._iterate(value) self._close_list() elif isinstance(data, list): self._open_list() for item in data:", "\"\"\" Take self.data and return a list of instructions about", "def _close_list(self): self.instructions.append(('close_list', None)) def _iterate(self, data: iter): if isinstance(data,", "data): self.instructions.append(('list_item', str(data))) def _horiz_rule(self): self.instructions.append(('horiz_rule', None)) def _close_list(self): self.instructions.append(('close_list',", "generated from json data at <url> in self.data. Returns self.", "of a json dataset \"\"\" import json import subprocess class", "str): \"\"\" Store a python dictionary generated from json data", "_horiz_rule(self): self.instructions.append(('horiz_rule', None)) def _close_list(self): self.instructions.append(('close_list', None)) def _iterate(self, data:", "is parsed by json.html. \"\"\" self.instructions = [] self._open_list() self._iterate(self.data)", "data at <url> in self.data. Returns self. \"\"\" data =", "self def make_instructions(self): \"\"\" Take self.data and return a list", "in data: self._iterate(item) self._horiz_rule() self._close_list() else: self._list_item(data) def download(self, url:", "a python dictionary generated from json data at <url> in", "isinstance(data, dict): for key, value in data.items(): self._iterate(key) self._open_list() self._iterate(value)", "url for URL parameters stdout=subprocess.PIPE, shell=True ).stdout self.data = json.loads(data)", "self.data = json.loads(data) return self def make_instructions(self): \"\"\" Take self.data", "def _open_list(self): self.instructions.append(('open_list', None)) def _list_item(self, data): self.instructions.append(('list_item', str(data))) def", "None)) def _close_list(self): self.instructions.append(('close_list', None)) def _iterate(self, data: iter): if", "self._list_item(data) def download(self, url: str): \"\"\" Store a python dictionary", "None)) def _iterate(self, data: iter): if isinstance(data, dict): for key,", "html visualization that is parsed by json.html. \"\"\" self.instructions =", "of instructions about its html visualization that is parsed by", "def download(self, url: str): \"\"\" Store a python dictionary generated", "around url for URL parameters stdout=subprocess.PIPE, shell=True ).stdout self.data =", "self._iterate(item) self._horiz_rule() self._close_list() else: self._list_item(data) def download(self, url: str): \"\"\"", "item in data: self._iterate(item) self._horiz_rule() self._close_list() else: self._list_item(data) def download(self,", "'{url}'\", # Quotes required around url for URL parameters stdout=subprocess.PIPE,", "isinstance(data, list): self._open_list() for item in data: self._iterate(item) self._horiz_rule() self._close_list()", "instructions about its html visualization that is parsed by json.html.", "self. \"\"\" data = subprocess.run( f\"curl '{url}'\", # Quotes required", "class JsonVis: def _open_list(self): self.instructions.append(('open_list', None)) def _list_item(self, data): self.instructions.append(('list_item',", "dict): for key, value in data.items(): self._iterate(key) self._open_list() self._iterate(value) self._close_list()", "data: iter): if isinstance(data, dict): for key, value in data.items():", "_open_list(self): self.instructions.append(('open_list', None)) def _list_item(self, data): self.instructions.append(('list_item', str(data))) def _horiz_rule(self):", "def _horiz_rule(self): self.instructions.append(('horiz_rule', None)) def _close_list(self): self.instructions.append(('close_list', None)) def _iterate(self,", "self._close_list() else: self._list_item(data) def download(self, url: str): \"\"\" Store a", "dictionary generated from json data at <url> in self.data. Returns", "json.loads(data) return self def make_instructions(self): \"\"\" Take self.data and return", "visualization of a json dataset \"\"\" import json import subprocess", "download(self, url: str): \"\"\" Store a python dictionary generated from", "parsed by json.html. \"\"\" self.instructions = [] self._open_list() self._iterate(self.data) self._close_list()", "by json.html. \"\"\" self.instructions = [] self._open_list() self._iterate(self.data) self._close_list() return", "self.instructions.append(('close_list', None)) def _iterate(self, data: iter): if isinstance(data, dict): for", "_list_item(self, data): self.instructions.append(('list_item', str(data))) def _horiz_rule(self): self.instructions.append(('horiz_rule', None)) def _close_list(self):", "self._close_list() elif isinstance(data, list): self._open_list() for item in data: self._iterate(item)", "Provides html file visualization of a json dataset \"\"\" import" ]
[ "self.setWindowIcon(QIcon(\"sim2d_game_analyzer/figures/icon.png\")) vbox = QVBoxLayout() tabWidget = QTabWidget() tabWidget.setFont(QtGui.QFont(\"Sanserif\", 12)) self.fmdb_tab", "import QIcon from PyQt5.QtWidgets import (QApplication, QDialog, QGroupBox, QMainWindow, QTabWidget,", "500 left = 100 width = 70*4 height = 130*4", "tabWidget.setFont(QtGui.QFont(\"Sanserif\", 12)) self.fmdb_tab = FMDBTab() tabWidget.addTab(self.fmdb_tab, FMDBTab.NAME) vbox.addWidget(tabWidget) wid =", "130*4 def __init__(self): QMainWindow.__init__(self) self.setGeometry(self.screen().geometry()) self.setWindowTitle(self.title) self.setWindowIcon(QIcon(\"sim2d_game_analyzer/figures/icon.png\")) vbox = QVBoxLayout()", "Game Analyzer\" top = 500 left = 100 width =", "top = 500 left = 100 width = 70*4 height", "= 70*4 height = 130*4 def __init__(self): QMainWindow.__init__(self) self.setGeometry(self.screen().geometry()) self.setWindowTitle(self.title)", "import QEvent, QPoint, Qt from PyQt5.QtGui import QIcon from PyQt5.QtWidgets", "self.setWindowTitle(self.title) self.setWindowIcon(QIcon(\"sim2d_game_analyzer/figures/icon.png\")) vbox = QVBoxLayout() tabWidget = QTabWidget() tabWidget.setFont(QtGui.QFont(\"Sanserif\", 12))", "= QWidget(self) self.setCentralWidget(wid) wid.setLayout(vbox) if __name__ == \"__main__\": app =", "MainWindow(QMainWindow): title = \"Sim2d Game Analyzer\" top = 500 left", "QIcon from PyQt5.QtWidgets import (QApplication, QDialog, QGroupBox, QMainWindow, QTabWidget, QVBoxLayout,", "= 500 left = 100 width = 70*4 height =", "QWidget(self) self.setCentralWidget(wid) wid.setLayout(vbox) if __name__ == \"__main__\": app = QApplication(sys.argv)", "70*4 height = 130*4 def __init__(self): QMainWindow.__init__(self) self.setGeometry(self.screen().geometry()) self.setWindowTitle(self.title) self.setWindowIcon(QIcon(\"sim2d_game_analyzer/figures/icon.png\"))", "FMDBTab class MainWindow(QMainWindow): title = \"Sim2d Game Analyzer\" top =", "QVBoxLayout() tabWidget = QTabWidget() tabWidget.setFont(QtGui.QFont(\"Sanserif\", 12)) self.fmdb_tab = FMDBTab() tabWidget.addTab(self.fmdb_tab,", "class MainWindow(QMainWindow): title = \"Sim2d Game Analyzer\" top = 500", "import QtGui from PyQt5.QtCore import QEvent, QPoint, Qt from PyQt5.QtGui", "QWidget) from sim2d_game_analyzer.fmdb_tab import FMDBTab class MainWindow(QMainWindow): title = \"Sim2d", "100 width = 70*4 height = 130*4 def __init__(self): QMainWindow.__init__(self)", "= FMDBTab() tabWidget.addTab(self.fmdb_tab, FMDBTab.NAME) vbox.addWidget(tabWidget) wid = QWidget(self) self.setCentralWidget(wid) wid.setLayout(vbox)", "QGroupBox, QMainWindow, QTabWidget, QVBoxLayout, QWidget) from sim2d_game_analyzer.fmdb_tab import FMDBTab class", "12)) self.fmdb_tab = FMDBTab() tabWidget.addTab(self.fmdb_tab, FMDBTab.NAME) vbox.addWidget(tabWidget) wid = QWidget(self)", "QDialog, QGroupBox, QMainWindow, QTabWidget, QVBoxLayout, QWidget) from sim2d_game_analyzer.fmdb_tab import FMDBTab", "QVBoxLayout, QWidget) from sim2d_game_analyzer.fmdb_tab import FMDBTab class MainWindow(QMainWindow): title =", "left = 100 width = 70*4 height = 130*4 def", "= QVBoxLayout() tabWidget = QTabWidget() tabWidget.setFont(QtGui.QFont(\"Sanserif\", 12)) self.fmdb_tab = FMDBTab()", "FMDBTab.NAME) vbox.addWidget(tabWidget) wid = QWidget(self) self.setCentralWidget(wid) wid.setLayout(vbox) if __name__ ==", "= 130*4 def __init__(self): QMainWindow.__init__(self) self.setGeometry(self.screen().geometry()) self.setWindowTitle(self.title) self.setWindowIcon(QIcon(\"sim2d_game_analyzer/figures/icon.png\")) vbox =", "QTabWidget() tabWidget.setFont(QtGui.QFont(\"Sanserif\", 12)) self.fmdb_tab = FMDBTab() tabWidget.addTab(self.fmdb_tab, FMDBTab.NAME) vbox.addWidget(tabWidget) wid", "__name__ == \"__main__\": app = QApplication(sys.argv) mainwindow = MainWindow() sys.exit(app.exec())", "height = 130*4 def __init__(self): QMainWindow.__init__(self) self.setGeometry(self.screen().geometry()) self.setWindowTitle(self.title) self.setWindowIcon(QIcon(\"sim2d_game_analyzer/figures/icon.png\")) vbox", "vbox.addWidget(tabWidget) wid = QWidget(self) self.setCentralWidget(wid) wid.setLayout(vbox) if __name__ == \"__main__\":", "width = 70*4 height = 130*4 def __init__(self): QMainWindow.__init__(self) self.setGeometry(self.screen().geometry())", "sys from PyQt5 import QtGui from PyQt5.QtCore import QEvent, QPoint,", "PyQt5.QtGui import QIcon from PyQt5.QtWidgets import (QApplication, QDialog, QGroupBox, QMainWindow,", "self.fmdb_tab = FMDBTab() tabWidget.addTab(self.fmdb_tab, FMDBTab.NAME) vbox.addWidget(tabWidget) wid = QWidget(self) self.setCentralWidget(wid)", "PyQt5.QtCore import QEvent, QPoint, Qt from PyQt5.QtGui import QIcon from", "import (QApplication, QDialog, QGroupBox, QMainWindow, QTabWidget, QVBoxLayout, QWidget) from sim2d_game_analyzer.fmdb_tab", "\"Sim2d Game Analyzer\" top = 500 left = 100 width", "= QTabWidget() tabWidget.setFont(QtGui.QFont(\"Sanserif\", 12)) self.fmdb_tab = FMDBTab() tabWidget.addTab(self.fmdb_tab, FMDBTab.NAME) vbox.addWidget(tabWidget)", "PyQt5.QtWidgets import (QApplication, QDialog, QGroupBox, QMainWindow, QTabWidget, QVBoxLayout, QWidget) from", "import sys from PyQt5 import QtGui from PyQt5.QtCore import QEvent,", "= 100 width = 70*4 height = 130*4 def __init__(self):", "wid = QWidget(self) self.setCentralWidget(wid) wid.setLayout(vbox) if __name__ == \"__main__\": app", "def __init__(self): QMainWindow.__init__(self) self.setGeometry(self.screen().geometry()) self.setWindowTitle(self.title) self.setWindowIcon(QIcon(\"sim2d_game_analyzer/figures/icon.png\")) vbox = QVBoxLayout() tabWidget", "self.setCentralWidget(wid) wid.setLayout(vbox) if __name__ == \"__main__\": app = QApplication(sys.argv) mainwindow", "vbox = QVBoxLayout() tabWidget = QTabWidget() tabWidget.setFont(QtGui.QFont(\"Sanserif\", 12)) self.fmdb_tab =", "QPoint, Qt from PyQt5.QtGui import QIcon from PyQt5.QtWidgets import (QApplication,", "from PyQt5.QtCore import QEvent, QPoint, Qt from PyQt5.QtGui import QIcon", "(QApplication, QDialog, QGroupBox, QMainWindow, QTabWidget, QVBoxLayout, QWidget) from sim2d_game_analyzer.fmdb_tab import", "FMDBTab() tabWidget.addTab(self.fmdb_tab, FMDBTab.NAME) vbox.addWidget(tabWidget) wid = QWidget(self) self.setCentralWidget(wid) wid.setLayout(vbox) if", "tabWidget.addTab(self.fmdb_tab, FMDBTab.NAME) vbox.addWidget(tabWidget) wid = QWidget(self) self.setCentralWidget(wid) wid.setLayout(vbox) if __name__", "wid.setLayout(vbox) if __name__ == \"__main__\": app = QApplication(sys.argv) mainwindow =", "if __name__ == \"__main__\": app = QApplication(sys.argv) mainwindow = MainWindow()", "Analyzer\" top = 500 left = 100 width = 70*4", "__init__(self): QMainWindow.__init__(self) self.setGeometry(self.screen().geometry()) self.setWindowTitle(self.title) self.setWindowIcon(QIcon(\"sim2d_game_analyzer/figures/icon.png\")) vbox = QVBoxLayout() tabWidget =", "sim2d_game_analyzer.fmdb_tab import FMDBTab class MainWindow(QMainWindow): title = \"Sim2d Game Analyzer\"", "title = \"Sim2d Game Analyzer\" top = 500 left =", "Qt from PyQt5.QtGui import QIcon from PyQt5.QtWidgets import (QApplication, QDialog,", "tabWidget = QTabWidget() tabWidget.setFont(QtGui.QFont(\"Sanserif\", 12)) self.fmdb_tab = FMDBTab() tabWidget.addTab(self.fmdb_tab, FMDBTab.NAME)", "QMainWindow.__init__(self) self.setGeometry(self.screen().geometry()) self.setWindowTitle(self.title) self.setWindowIcon(QIcon(\"sim2d_game_analyzer/figures/icon.png\")) vbox = QVBoxLayout() tabWidget = QTabWidget()", "QEvent, QPoint, Qt from PyQt5.QtGui import QIcon from PyQt5.QtWidgets import", "self.setGeometry(self.screen().geometry()) self.setWindowTitle(self.title) self.setWindowIcon(QIcon(\"sim2d_game_analyzer/figures/icon.png\")) vbox = QVBoxLayout() tabWidget = QTabWidget() tabWidget.setFont(QtGui.QFont(\"Sanserif\",", "QtGui from PyQt5.QtCore import QEvent, QPoint, Qt from PyQt5.QtGui import", "from PyQt5.QtWidgets import (QApplication, QDialog, QGroupBox, QMainWindow, QTabWidget, QVBoxLayout, QWidget)", "from sim2d_game_analyzer.fmdb_tab import FMDBTab class MainWindow(QMainWindow): title = \"Sim2d Game", "= \"Sim2d Game Analyzer\" top = 500 left = 100", "QMainWindow, QTabWidget, QVBoxLayout, QWidget) from sim2d_game_analyzer.fmdb_tab import FMDBTab class MainWindow(QMainWindow):", "from PyQt5 import QtGui from PyQt5.QtCore import QEvent, QPoint, Qt", "PyQt5 import QtGui from PyQt5.QtCore import QEvent, QPoint, Qt from", "from PyQt5.QtGui import QIcon from PyQt5.QtWidgets import (QApplication, QDialog, QGroupBox,", "QTabWidget, QVBoxLayout, QWidget) from sim2d_game_analyzer.fmdb_tab import FMDBTab class MainWindow(QMainWindow): title", "import FMDBTab class MainWindow(QMainWindow): title = \"Sim2d Game Analyzer\" top" ]
[ "yield l[i:i + chunk_size] def prepare(raw_organisations, raw_schools, raw_locations, raw_teachers, raw_students):", "class Sheet: \"Data container object to hold the contents of", "= [x for x in sheet.iter_rows()] if rows: titles =", "for x in schools} yield [make_relation(\"ClpLocation\", location[\"id\"], \"schools\", \"ClpSchool\", school_keys[schoolId],", "schools, locations) = process_sheet( sheet, SCHOOL_TITLES, [ORGANISATION_FIELDS, SCHOOL_FIELDS, LOCATION_FIELDS]) elif", "= {} for x in teachers: # get an existing", "copies: for to_remove in keys_to_remove: d.pop(to_remove, None) return copies def", "teachers for schoolId in teacher.get(\"schools\", [])] # Build student ->", "= relative_to_absolute(os.path.join(EXTRACT_OUTPUT_DIR + \"-relations\" + str(i), \"relations\")) os.makedirs(nodes_dir, exist_ok=True) path", "\"teachers\") for teacher in teachers for schoolId in teacher.get(\"schools\", [])]", "\"SCHOOL_P_STATE\", \"SCHOOL_P_POSTCODE\", \"SCHOOL_S_ADDRESS1\", \"SCHOOL_S_SUBURB\", \"SCHOOL_S_STATE\", \"SCHOOL_S_POSTCODE\", \"LOCATION_NAME\", \"LOC_ELECTORATE\", \"LOC_S_ADDRESS1\", \"LOC_S_SUBURB\",", "return os.path.join(path_to_py, relative_path) def extract(fields, row_as_dict): data = {} for", "inject_required(\"ClpTeacher\", uniques.values()) return injected def extract_from_xlsx(file_path): for sheet in convert_xlsx(file_path):", "for defn in field_defns] return structs def unique(key, dicts): t", "\"ORG_ELECTORATE\": \"ELECTORATE\", \"S_ADDRESS1\": \"ADDRESS\", \"S_SUBURB\": \"SUBURB\", \"S_STATE\": \"STATE\", \"S_POSTCODE\": \"POSTCODE\",", "that for being related to a different school. # We", "of Sheet objects, in which row has been converted into", "to_remove in keys_to_remove: d.pop(to_remove, None) return copies def write_nodes(*list_of_lists): for", "x[\"_typeName\"] = type_name x[\"id\"] = cuid.cuid() x[\"createdAt\"] = x[\"updatedAt\"] =", "\"CLP_SCHOOL_ID\", \"ORGANISATION_ID\": \"CLP_ORGANISATION_ID\", \"SCHOOL_S_ADDRESS1\": \"ADDRESS\", \"SCHOOL_S_SUBURB\": \"SUBURB\", \"SCHOOL_S_STATE\": \"STATE\", \"SCHOOL_S_POSTCODE\":", "f: nodes = { \"valueType\": \"relations\", \"values\": list(one_list) } f.write(json.dumps(nodes))", "\"relations\", \"values\": list(one_list) } f.write(json.dumps(nodes)) def chunks(n, l): \"\"\"Yield n", "x[\"dateOfBirth\"] = convert_dob_to_datetime(x[\"dateOfBirth\"]) return injected def prepare_teachers(teachers): # Like locations,", "for teacher in teachers for schoolId in teacher.get(\"schools\", [])] #", "\"S_STATE\": \"STATE\", \"S_POSTCODE\": \"POSTCODE\", } SCHOOL_FIELDS = {\"SCHOOL_NAME\": \"NAME\", \"SCH_ELECTORATE\":", "def prepare_locations(locations): # There are multiple locations, each of which", "for schoolId in teacher.get(\"schools\", [])] # Build student -> school", "datetime from openpyxl import load_workbook import cuid # https://github.com/necaris/cuid.py -", "- create uuid's in the format that graphcool expects SOURCE_XLSX", "STUDENT_TITLES = [\"SCHOOL_NAME\", \"SCHOOL_ID\", \"STUDENT_ID\", \"STUDENT_SRN\", \"LOCATION_NAME\", \"STUDENT_LNAME\", \"STUDENT_FNAME\", \"DOB\",", "\"DEGREE_YEAR\", \"ORGANISATION_ID\", \"SCHOOL_ID\"] STUDENT_TITLES = [\"SCHOOL_NAME\", \"SCHOOL_ID\", \"STUDENT_ID\", \"STUDENT_SRN\", \"LOCATION_NAME\",", "str(i), \"nodes\")) os.makedirs(nodes_dir, exist_ok=True) path = os.path.join(nodes_dir, \"1.json\") with open(path,", "org_keys = {x[\"clpOrganisationId\"]: x[\"id\"] for x in organisations} yield [make_relation(\"ClpOrganisation\",", "*keys_to_remove): \"Return iterable that contains copies of the given dictionary", "{\"_typeName\": entity1, \"id\": id1, \"fieldName\": field1}, {\"_typeName\": entity2, \"id\": id2,", "\"S_POSTCODE\": \"POSTCODE\", } SCHOOL_FIELDS = {\"SCHOOL_NAME\": \"NAME\", \"SCH_ELECTORATE\": \"ELECTORATE\", \"SCHOOL_ID\":", "(i, one_list) in enumerate(list_of_lists): nodes_dir = relative_to_absolute(os.path.join(EXTRACT_OUTPUT_DIR + str(i), \"nodes\"))", "= [\"ORGANISATION_ID\", \"ORGANISATION_NAME\", \"ORG_ELECTORATE\", \"P_ADDRESS1\", \"P_SUBURB\", \"P_STATE\", \"P_POSTCODE\", \"S_ADDRESS1\", \"S_SUBURB\",", "enumerate(list_of_lists): nodes_dir = relative_to_absolute(os.path.join(EXTRACT_OUTPUT_DIR + str(i), \"nodes\")) os.makedirs(nodes_dir, exist_ok=True) path", "\"TEACHER_LANGUAGES\", \"P_ADDRESS1\", \"P_ADDRESS2\", \"P_SUBURB\", \"P_STATE\", \"P_POSTCODE\", \"TELEPHONE\", \"TEL_EVENING\", \"EMAIL\", \"MOBILE\",", "return \"\".join(bits) def relative_to_absolute(relative_path): path_to_py = os.path.abspath(os.path.dirname(__file__)) return os.path.join(path_to_py, relative_path)", "uniques = unique(\"clpSchoolId\", schools) injected = inject_required(\"ClpSchool\", uniques) return injected", "collect all the schools that the same teacher is teaching", "[convert_row_to_dict(titles, row) for row in rows[1:]] yield Sheet(sheet.title, titles, dicts)", "d.pop(to_remove, None) return copies def write_nodes(*list_of_lists): for (i, one_list) in", "\"schools\"), copy_without(teachers, \"organisationId\", \"organisationName\", \"schools\", \"schoolName\"), *chunks(3, copy_without(students, \"schoolId\", \"schoolName\",", "entity2, \"id\": id2, \"fieldName\": field2} ] def generate_relations(organisations, schools, locations,", "similar-sized chunks from l.\"\"\" chunk_size = 1 + len(l) //", "camel case. 'PARENT_ORGANISATION_ID' => 'parentOrganisationId'\"\"\" bits = [(x.lower() if i", "else x.title()) for (i, x) in enumerate(s.split(\"_\"))] return \"\".join(bits) def", "the new location location = uniques.setdefault(x[\"name\"], x) related_schools = location.setdefault(\"schools\",", "string from 99/MON/YY to a ISO date\" dt = datetime.datetime.strptime(s,", "] def generate_relations(organisations, schools, locations, teachers, students): # Build school", "schoolId in teacher.get(\"schools\", [])] # Build student -> school relations", "pip install cuid import os.path import json import datetime from", "\"STATE\", \"P_POSTCODE\": \"POSTCODE\", \"TELEPHONE\": \"DAY_PHONE\", \"TEL_EVENING\": \"EVENING_PHONE\", \"EMAIL\": \"EMAIL\", \"MOBILE\":", "\"SCHOOL_ID\": \"CLP_SCHOOL_ID\", \"ORGANISATION_ID\": \"CLP_ORGANISATION_ID\", \"SCHOOL_S_ADDRESS1\": \"ADDRESS\", \"SCHOOL_S_SUBURB\": \"SUBURB\", \"SCHOOL_S_STATE\": \"STATE\",", "return fat_orgs def prepare_schools(schools): uniques = unique(\"clpSchoolId\", schools) injected =", "write_nodes(*list_of_lists): for (i, one_list) in enumerate(list_of_lists): nodes_dir = relative_to_absolute(os.path.join(EXTRACT_OUTPUT_DIR +", "yield [make_relation(\"ClpLocation\", location[\"id\"], \"schools\", \"ClpSchool\", school_keys[schoolId], \"locations\") for location in", "dicts: x[\"_typeName\"] = type_name x[\"id\"] = cuid.cuid() x[\"createdAt\"] = x[\"updatedAt\"]", "\"STUDENT_SRN\": \"SRN\", \"LOCATION_NAME\": \"LOCATION\", \"STUDENT_LNAME\": \"FAMILY_NAME\", \"STUDENT_FNAME\": \"GIVEN_NAMES\", \"DOB\": \"DATE_OF_BIRTH\",", "\"P_ADDRESS2\": \"ADDRESS2\", \"P_SUBURB\": \"SUBURB\", \"P_STATE\": \"STATE\", \"P_POSTCODE\": \"POSTCODE\", \"TELEPHONE\": \"DAY_PHONE\",", "in teachers: # get an existing teacher with that id,", "relations school_keys = {x[\"clpSchoolId\"]: x[\"id\"] for x in schools} yield", "\"S_ADDRESS1\", \"S_SUBURB\", \"S_STATE\", \"S_POSTCODE\", \"SCHOOL_NAME\", \"SCH_ELECTORATE\", \"SCHOOL_ID\", \"SCHOOL_P_ADDRESS1\", \"SCHOOL_P_SUBURB\", \"SCHOOL_P_STATE\",", "= teacher.setdefault(\"schools\", list()) related_schools.append(x.pop(\"schoolId\")) injected = inject_required(\"ClpTeacher\", uniques.values()) return injected", "within an excel spreadsheet\" def __init__(self, name, titles=None, rows=None): self.name", "in school_keys] def main(): xlsx_path = relative_to_absolute(SOURCE_XLSX) raw_collections = extract_from_xlsx(xlsx_path)", "sheet.name == \"Teacher\": (teachers, ) = process_sheet(sheet, TEACHER_TITLES, [TEACHER_FIELDS]) elif", "\"ORG_ELECTORATE\", \"P_ADDRESS1\", \"P_SUBURB\", \"P_STATE\", \"P_POSTCODE\", \"S_ADDRESS1\", \"S_SUBURB\", \"S_STATE\", \"S_POSTCODE\", \"SCHOOL_NAME\",", "[x.copy() for x in dicts] for d in copies: for", "field_defns] return structs def unique(key, dicts): t = {x[key]: x", "with open(path, \"w\") as f: nodes = { \"valueType\": \"relations\",", "relative_path) def extract(fields, row_as_dict): data = {} for (k, v)", "= {x[\"clpOrganisationId\"]: x[\"id\"] for x in organisations} yield [make_relation(\"ClpOrganisation\", org_keys[x[\"clpOrganisationId\"]],", "\"school\", \"ClpSchool\", school_keys[student[\"schoolId\"]], \"students\") for student in students if student[\"schoolId\"]", "\"DAY_PHONE\", \"TEL_EVENING\": \"EVENING_PHONE\", \"EMAIL\": \"EMAIL\", \"MOBILE\": \"MOBILE\", \"LEVEL_TAUGHT\": \"LEVEL_TAUGHT\", \"LEVEL_OF_EDUCATION\":", "has been converted into a dictionary\"\"\" work_book = load_workbook(filename=xlsx_file, read_only=True,", "for x in injected: x[\"dateOfBirth\"] = convert_dob_to_datetime(x[\"dateOfBirth\"]) return injected def", "x in locations: # get an existing location with the", "related_schools.append(x.pop(\"clpSchoolId\")) injected = inject_required(\"ClpLocation\", uniques.values()) # FIX THIS - Current", "school_keys[student[\"schoolId\"]], \"students\") for student in students if student[\"schoolId\"] in school_keys]", "that meet at the same location. uniques = {} for", "relative_to_absolute(SOURCE_XLSX) raw_collections = extract_from_xlsx(xlsx_path) (organisations, schools, locations, teachers, students) =", "\"DATE_OF_BIRTH\", \"TEL\": \"PHONE\", \"LOCATION_NAME_1\": \"DAY_SCHOOL\", } class Sheet: \"Data container", "\"ORGANISATION_NAME\", \"SCHOOL_NAME\": \"SCHOOL_NAME\", \"TITLE\": \"TITLE\", \"LNAME\": \"FAMILY_NAME\", \"FNAME\": \"GIVEN_NAMES\", \"TEACHER_LANGUAGES\":", "x in organisations} yield [make_relation(\"ClpOrganisation\", org_keys[x[\"clpOrganisationId\"]], \"schools\", \"ClpSchool\", x[\"id\"], \"organisation\")", "multiple locations, each of which is identitical except that for", "os.path.join(nodes_dir, \"1.json\") with open(path, \"w\") as f: nodes = {", "return injected def prepare_teachers(teachers): # Like locations, the same teacher", "t.values() def now_as_iso8601(): return datetime.datetime.now().replace(microsecond=0).isoformat() + \"Z\" def inject_required(type_name, dicts):", "{x[\"clpOrganisationId\"]: x[\"id\"] for x in organisations} yield [make_relation(\"ClpOrganisation\", org_keys[x[\"clpOrganisationId\"]], \"schools\",", "[cell.value for cell in rows[0]] dicts = [convert_row_to_dict(titles, row) for", "Sheet objects, in which row has been converted into a", "uniques = unique(\"clpStudentId\", students) injected = inject_required(\"ClpStudent\", uniques) for x", "id :( Make one up for the time being for", "id2, field2): return [ {\"_typeName\": entity1, \"id\": id1, \"fieldName\": field1},", "data[to_camel(v)] = row_as_dict[k] return data def process_sheet(sheet, titles, field_defns): if", "copies def write_nodes(*list_of_lists): for (i, one_list) in enumerate(list_of_lists): nodes_dir =", "to a different school. # We have to collect all", "\"LOCATION_NAME\", \"LOC_ELECTORATE\", \"LOC_S_ADDRESS1\", \"LOC_S_SUBURB\", \"LOC_S_STATE\", \"LOC_S_POSTCODE\"] ORGANISATION_FIELDS = {\"ORGANISATION_ID\": \"CLP_ORGANISATION_ID\",", "= location.setdefault(\"schools\", list()) related_schools.append(x.pop(\"clpSchoolId\")) injected = inject_required(\"ClpLocation\", uniques.values()) # FIX", "\".0\" def prepare_students(students): uniques = unique(\"clpStudentId\", students) injected = inject_required(\"ClpStudent\",", "= relative_to_absolute(os.path.join(EXTRACT_OUTPUT_DIR + str(i), \"nodes\")) os.makedirs(nodes_dir, exist_ok=True) path = os.path.join(nodes_dir,", "row in rows[1:]] yield Sheet(sheet.title, titles, dicts) else: yield Sheet(sheet.title)", "\"EMAIL\", \"MOBILE\", \"LEVEL_TAUGHT\", \"LEVEL_OF_EDUCATION\", \"FIELD_OF_EDUCATION\", \"DEGREE_COUNTRY\", \"DEGREE_YEAR\", \"ORGANISATION_ID\", \"SCHOOL_ID\"] STUDENT_TITLES", "dt = datetime.datetime.strptime(s, \"%d/%b/%y\") return dt.isoformat() + \".0Z\" # GraphCool", "= process_sheet( sheet, SCHOOL_TITLES, [ORGANISATION_FIELDS, SCHOOL_FIELDS, LOCATION_FIELDS]) elif sheet.name ==", "There are multiple locations, each of which is identitical except", "to a ISO date\" dt = datetime.datetime.strptime(s, \"%d/%b/%y\") return dt.isoformat()", "= convert_dob_to_datetime(x[\"dateOfBirth\"]) return injected def prepare_teachers(teachers): # Like locations, the", "def prepare(raw_organisations, raw_schools, raw_locations, raw_teachers, raw_students): return ( prepare_organisations(raw_organisations), prepare_schools(raw_schools),", "} LOCATION_FIELDS = {\"LOCATION_NAME\": \"NAME\", \"LOC_ELECTORATE\": \"ELECTORATE\", \"SCHOOL_ID\": \"CLP_SCHOOL_ID\", \"LOC_S_ADDRESS1\":", "\"ORGANISATION_ID\", \"SCHOOL_ID\": \"SCHOOL_ID\", } STUDENT_FIELDS = {\"SCHOOL_NAME\": \"SCHOOL_NAME\", \"SCHOOL_ID\": \"SCHOOL_ID\",", "id2, \"fieldName\": field2} ] def generate_relations(organisations, schools, locations, teachers, students):", "x.title()) for (i, x) in enumerate(s.split(\"_\"))] return \"\".join(bits) def relative_to_absolute(relative_path):", "in injected: x[\"dateOfBirth\"] = convert_dob_to_datetime(x[\"dateOfBirth\"]) return injected def prepare_teachers(teachers): #", "not None: data[titles[i]] = str(cell.value) return data def convert_xlsx(xlsx_file): \"\"\"Convert", "entity2, id2, field2): return [ {\"_typeName\": entity1, \"id\": id1, \"fieldName\":", "work_book: rows = [x for x in sheet.iter_rows()] if rows:", "def prepare_teachers(teachers): # Like locations, the same teacher can have", "teacher can have multiple records, # each of which is", "rows=None): self.name = name self.titles = titles or [] self.rows", "} class Sheet: \"Data container object to hold the contents", "location id :( Make one up for the time being", "\"nodes\")) os.makedirs(nodes_dir, exist_ok=True) path = os.path.join(nodes_dir, \"1.json\") with open(path, \"w\")", "fat_orgs def prepare_schools(schools): uniques = unique(\"clpSchoolId\", schools) injected = inject_required(\"ClpSchool\",", "the required fields that graphcool import required\" for x in", "prepare_locations(raw_locations), prepare_teachers(raw_teachers), prepare_students(raw_students) ) def make_relation(entity1, id1, field1, entity2, id2,", "in the format that graphcool expects SOURCE_XLSX = \"./data/CLP_combined.xlsx\" EXTRACT_OUTPUT_DIR", "injected = inject_required(\"ClpTeacher\", uniques.values()) return injected def extract_from_xlsx(file_path): for sheet", "in work_book: rows = [x for x in sheet.iter_rows()] if", "x) for (i, x) in enumerate(titles) if x != sheet.titles[i]])", "*chunks(3, copy_without(students, \"schoolId\", \"schoolName\", \"location\"))) write_relations(generate_relations(organisations, schools, locations, teachers, students))", "the same teacher is teaching at. uniques = {} for", "x[\"id\"] for x in schools} yield [make_relation(\"ClpLocation\", location[\"id\"], \"schools\", \"ClpSchool\",", "\"LOC_S_SUBURB\", \"LOC_S_STATE\", \"LOC_S_POSTCODE\"] ORGANISATION_FIELDS = {\"ORGANISATION_ID\": \"CLP_ORGANISATION_ID\", \"ORGANISATION_NAME\": \"NAME\", \"ORG_ELECTORATE\":", "now_as_iso8601() return list(dicts) def prepare_organisations(organisations): unique_orgs = unique(\"clpOrganisationId\", organisations) fat_orgs", "= \"../server/extract\" SCHOOL_TITLES = [\"ORGANISATION_ID\", \"ORGANISATION_NAME\", \"ORG_ELECTORATE\", \"P_ADDRESS1\", \"P_SUBURB\", \"P_STATE\",", "\"DAY_SCHOOL\", } class Sheet: \"Data container object to hold the", "in sheet.rows] for defn in field_defns] return structs def unique(key,", "process_sheet( sheet, SCHOOL_TITLES, [ORGANISATION_FIELDS, SCHOOL_FIELDS, LOCATION_FIELDS]) elif sheet.name == \"Teacher\":", "get an existing teacher with that id, or add the", "\"P_SUBURB\": \"SUBURB\", \"P_STATE\": \"STATE\", \"P_POSTCODE\": \"POSTCODE\", \"TELEPHONE\": \"DAY_PHONE\", \"TEL_EVENING\": \"EVENING_PHONE\",", "iterable that contains copies of the given dictionary with all", "copy_without(dicts, *keys_to_remove): \"Return iterable that contains copies of the given", "len(l) // n for i in range(0, len(l), chunk_size): yield", "os.makedirs(nodes_dir, exist_ok=True) path = os.path.join(nodes_dir, \"1.json\") with open(path, \"w\") as", "\"MOBILE\": \"MOBILE\", \"LEVEL_TAUGHT\": \"LEVEL_TAUGHT\", \"LEVEL_OF_EDUCATION\": \"EDUCATION_LEVEL\", \"FIELD_OF_EDUCATION\": \"EDUCATION_FIELD\", \"DEGREE_COUNTRY\": \"EDUCATION_COUNTRY\",", "\"LEVEL_OF_EDUCATION\": \"EDUCATION_LEVEL\", \"FIELD_OF_EDUCATION\": \"EDUCATION_FIELD\", \"DEGREE_COUNTRY\": \"EDUCATION_COUNTRY\", \"DEGREE_YEAR\": \"EDUCATION_YEAR\", \"ORGANISATION_ID\": \"ORGANISATION_ID\",", "prepare_organisations(organisations): unique_orgs = unique(\"clpOrganisationId\", organisations) fat_orgs = inject_required(\"ClpOrganisation\", unique_orgs) return", "sheet in convert_xlsx(file_path): if sheet.name == \"SCHOOL-ORG\": (organisations, schools, locations)", "dicts = [convert_row_to_dict(titles, row) for row in rows[1:]] yield Sheet(sheet.title,", "== \"Teacher\": (teachers, ) = process_sheet(sheet, TEACHER_TITLES, [TEACHER_FIELDS]) elif sheet.name", "inject_required(type_name, dicts): \"Inject the required fields that graphcool import required\"", "\"valueType\": \"relations\", \"values\": list(one_list) } f.write(json.dumps(nodes)) def chunks(n, l): \"\"\"Yield", "successive similar-sized chunks from l.\"\"\" chunk_size = 1 + len(l)", "teachers, students) def copy_without(dicts, *keys_to_remove): \"Return iterable that contains copies", "collect all the schools that meet at the same location.", "the new teacher record teacher = uniques.setdefault(x[\"clpTeacherId\"], x) related_schools =", "sheet.name) return (organisations, schools, locations, teachers, students) def copy_without(dicts, *keys_to_remove):", "os.path.abspath(os.path.dirname(__file__)) return os.path.join(path_to_py, relative_path) def extract(fields, row_as_dict): data = {}", "LOCATION_FIELDS]) elif sheet.name == \"Teacher\": (teachers, ) = process_sheet(sheet, TEACHER_TITLES,", "TEACHER_TITLES = [\"TEACHER_ID\", \"ORGANISATION_NAME\", \"SCHOOL_NAME\", \"TEACHER_NAME\", \"TITLE\", \"LNAME\", \"FNAME\", \"TEACHER_LANGUAGES\",", "return [ {\"_typeName\": entity1, \"id\": id1, \"fieldName\": field1}, {\"_typeName\": entity2,", "= {\"LOCATION_NAME\": \"NAME\", \"LOC_ELECTORATE\": \"ELECTORATE\", \"SCHOOL_ID\": \"CLP_SCHOOL_ID\", \"LOC_S_ADDRESS1\": \"ADDRESS\", \"LOC_S_SUBURB\":", "\"MOBILE\", \"LEVEL_TAUGHT\": \"LEVEL_TAUGHT\", \"LEVEL_OF_EDUCATION\": \"EDUCATION_LEVEL\", \"FIELD_OF_EDUCATION\": \"EDUCATION_FIELD\", \"DEGREE_COUNTRY\": \"EDUCATION_COUNTRY\", \"DEGREE_YEAR\":", "that graphcool import required\" for x in dicts: x[\"_typeName\"] =", "# FIX THIS - Current extract doesn't include the CLP", "# Like locations, the same teacher can have multiple records,", "# get an existing teacher with that id, or add", "\"S_SUBURB\": \"SUBURB\", \"S_STATE\": \"STATE\", \"S_POSTCODE\": \"POSTCODE\", } SCHOOL_FIELDS = {\"SCHOOL_NAME\":", "convert_xlsx(file_path): if sheet.name == \"SCHOOL-ORG\": (organisations, schools, locations) = process_sheet(", "can have multiple records, # each of which is identitical", "hold the contents of one sheet within an excel spreadsheet\"", "school_keys] def main(): xlsx_path = relative_to_absolute(SOURCE_XLSX) raw_collections = extract_from_xlsx(xlsx_path) (organisations,", "in enumerate(list_of_lists): nodes_dir = relative_to_absolute(os.path.join(EXTRACT_OUTPUT_DIR + \"-relations\" + str(i), \"relations\"))", "copy_without(teachers, \"organisationId\", \"organisationName\", \"schools\", \"schoolName\"), *chunks(3, copy_without(students, \"schoolId\", \"schoolName\", \"location\")))", "been converted into a dictionary\"\"\" work_book = load_workbook(filename=xlsx_file, read_only=True, data_only=True)", "required fields that graphcool import required\" for x in dicts:", "[\"ORGANISATION_ID\", \"ORGANISATION_NAME\", \"ORG_ELECTORATE\", \"P_ADDRESS1\", \"P_SUBURB\", \"P_STATE\", \"P_POSTCODE\", \"S_ADDRESS1\", \"S_SUBURB\", \"S_STATE\",", "teachers, students) = prepare(*raw_collections) write_nodes( organisations, copy_without(schools, \"clpOrganisationId\"), copy_without(locations, \"schools\"),", "x[\"updatedAt\"] = now_as_iso8601() return list(dicts) def prepare_organisations(organisations): unique_orgs = unique(\"clpOrganisationId\",", "in locations for schoolId in location.get(\"schools\", [])] # Build teacher", "titles or [] self.rows = rows or [] def convert_row_to_dict(titles,", "\"\".join(bits) def relative_to_absolute(relative_path): path_to_py = os.path.abspath(os.path.dirname(__file__)) return os.path.join(path_to_py, relative_path) def", "an existing teacher with that id, or add the new", "given keys removed\" copies = [x.copy() for x in dicts]", "elif sheet.name == \"Teacher\": (teachers, ) = process_sheet(sheet, TEACHER_TITLES, [TEACHER_FIELDS])", "uuid's in the format that graphcool expects SOURCE_XLSX = \"./data/CLP_combined.xlsx\"", "given name, or add the new location location = uniques.setdefault(x[\"name\"],", "in enumerate(titles) if x != sheet.titles[i]]) return [] structs =", "= x[\"updatedAt\"] = now_as_iso8601() return list(dicts) def prepare_organisations(organisations): unique_orgs =", "Make one up for the time being for x in", "\"LOCATION_NAME\", \"STUDENT_LNAME\", \"STUDENT_FNAME\", \"DOB\", \"TEL\", \"LOCATION_NAME_1\"] TEACHER_FIELDS = {\"TEACHER_ID\": \"CLP_TEACHER_ID\",", "unique_orgs = unique(\"clpOrganisationId\", organisations) fat_orgs = inject_required(\"ClpOrganisation\", unique_orgs) return fat_orgs", "= rows or [] def convert_row_to_dict(titles, row): data = {}", "for (i, x) in enumerate(titles) if x != sheet.titles[i]]) return", "\"Return iterable that contains copies of the given dictionary with", "entity1, \"id\": id1, \"fieldName\": field1}, {\"_typeName\": entity2, \"id\": id2, \"fieldName\":", "Build location -> school relations school_keys = {x[\"clpSchoolId\"]: x[\"id\"] for", "= {\"ORGANISATION_ID\": \"CLP_ORGANISATION_ID\", \"ORGANISATION_NAME\": \"NAME\", \"ORG_ELECTORATE\": \"ELECTORATE\", \"S_ADDRESS1\": \"ADDRESS\", \"S_SUBURB\":", "\"schools\", \"ClpSchool\", school_keys[schoolId], \"locations\") for location in locations for schoolId", "up for the time being for x in injected: x[\"clpLocationId\"]", "\"organisationName\", \"schools\", \"schoolName\"), *chunks(3, copy_without(students, \"schoolId\", \"schoolName\", \"location\"))) write_relations(generate_relations(organisations, schools,", "structs = [[extract(defn, x) for x in sheet.rows] for defn", "schools] # Build location -> school relations school_keys = {x[\"clpSchoolId\"]:", "time being for x in injected: x[\"clpLocationId\"] = cuid.cuid() return", "'PARENT_ORGANISATION_ID' => 'parentOrganisationId'\"\"\" bits = [(x.lower() if i == 0", "titles, dicts) else: yield Sheet(sheet.title) def to_camel(s): \"\"\"Convert an underscored", "def convert_row_to_dict(titles, row): data = {} for (i, cell) in", "{\"SCHOOL_NAME\": \"NAME\", \"SCH_ELECTORATE\": \"ELECTORATE\", \"SCHOOL_ID\": \"CLP_SCHOOL_ID\", \"ORGANISATION_ID\": \"CLP_ORGANISATION_ID\", \"SCHOOL_S_ADDRESS1\": \"ADDRESS\",", "new location location = uniques.setdefault(x[\"name\"], x) related_schools = location.setdefault(\"schools\", list())", "the \".0\" def prepare_students(students): uniques = unique(\"clpStudentId\", students) injected =", "injected = inject_required(\"ClpStudent\", uniques) for x in injected: x[\"dateOfBirth\"] =", "def extract_from_xlsx(file_path): for sheet in convert_xlsx(file_path): if sheet.name == \"SCHOOL-ORG\":", "# pip install openpyxl # pip install cuid import os.path", "\"SCHOOL_S_STATE\": \"STATE\", \"SCHOOL_S_POSTCODE\": \"POSTCODE\", } LOCATION_FIELDS = {\"LOCATION_NAME\": \"NAME\", \"LOC_ELECTORATE\":", "\"Data container object to hold the contents of one sheet", "x) in enumerate(s.split(\"_\"))] return \"\".join(bits) def relative_to_absolute(relative_path): path_to_py = os.path.abspath(os.path.dirname(__file__))", "unique(\"clpSchoolId\", schools) injected = inject_required(\"ClpSchool\", uniques) return injected def prepare_locations(locations):", "x in injected: x[\"clpLocationId\"] = cuid.cuid() return injected def convert_dob_to_datetime(s):", "main(): xlsx_path = relative_to_absolute(SOURCE_XLSX) raw_collections = extract_from_xlsx(xlsx_path) (organisations, schools, locations,", "\"FNAME\": \"GIVEN_NAMES\", \"TEACHER_LANGUAGES\": \"LANGUAGES\", \"P_ADDRESS1\": \"ADDRESS1\", \"P_ADDRESS2\": \"ADDRESS2\", \"P_SUBURB\": \"SUBURB\",", "__init__(self, name, titles=None, rows=None): self.name = name self.titles = titles", "in rows[1:]] yield Sheet(sheet.title, titles, dicts) else: yield Sheet(sheet.title) def", "= process_sheet(sheet, STUDENT_TITLES, [STUDENT_FIELDS]) else: print(\"Ignoring sheet:\", sheet.name) return (organisations,", "+ \"-relations\" + str(i), \"relations\")) os.makedirs(nodes_dir, exist_ok=True) path = os.path.join(nodes_dir,", "location with the given name, or add the new location", "\"POSTCODE\"} TEACHER_TITLES = [\"TEACHER_ID\", \"ORGANISATION_NAME\", \"SCHOOL_NAME\", \"TEACHER_NAME\", \"TITLE\", \"LNAME\", \"FNAME\",", "same teacher is teaching at. uniques = {} for x", "\"DEGREE_COUNTRY\", \"DEGREE_YEAR\", \"ORGANISATION_ID\", \"SCHOOL_ID\"] STUDENT_TITLES = [\"SCHOOL_NAME\", \"SCHOOL_ID\", \"STUDENT_ID\", \"STUDENT_SRN\",", "\"TEL\": \"PHONE\", \"LOCATION_NAME_1\": \"DAY_SCHOOL\", } class Sheet: \"Data container object", "raw_schools, raw_locations, raw_teachers, raw_students): return ( prepare_organisations(raw_organisations), prepare_schools(raw_schools), prepare_locations(raw_locations), prepare_teachers(raw_teachers),", "self.titles = titles or [] self.rows = rows or []", "\"EMAIL\": \"EMAIL\", \"MOBILE\": \"MOBILE\", \"LEVEL_TAUGHT\": \"LEVEL_TAUGHT\", \"LEVEL_OF_EDUCATION\": \"EDUCATION_LEVEL\", \"FIELD_OF_EDUCATION\": \"EDUCATION_FIELD\",", "xlsx_path = relative_to_absolute(SOURCE_XLSX) raw_collections = extract_from_xlsx(xlsx_path) (organisations, schools, locations, teachers,", "# GraphCool import insists on microseconds, hence the \".0\" def", "sheet.iter_rows()] if rows: titles = [cell.value for cell in rows[0]]", "make_relation(entity1, id1, field1, entity2, id2, field2): return [ {\"_typeName\": entity1,", "= name self.titles = titles or [] self.rows = rows", "\"DOB\": \"DATE_OF_BIRTH\", \"TEL\": \"PHONE\", \"LOCATION_NAME_1\": \"DAY_SCHOOL\", } class Sheet: \"Data", "\"ClpSchool\", x[\"id\"], \"organisation\") for x in schools] # Build location", "for (i, x) in enumerate(s.split(\"_\"))] return \"\".join(bits) def relative_to_absolute(relative_path): path_to_py", "\"ELECTORATE\", \"S_ADDRESS1\": \"ADDRESS\", \"S_SUBURB\": \"SUBURB\", \"S_STATE\": \"STATE\", \"S_POSTCODE\": \"POSTCODE\", }", "in organisations} yield [make_relation(\"ClpOrganisation\", org_keys[x[\"clpOrganisationId\"]], \"schools\", \"ClpSchool\", x[\"id\"], \"organisation\") for", "= {x[key]: x for x in dicts} return t.values() def", "as f: nodes = { \"valueType\": \"relations\", \"values\": list(one_list) }", "x in schools} yield [make_relation(\"ClpLocation\", location[\"id\"], \"schools\", \"ClpSchool\", school_keys[schoolId], \"locations\")", "= {\"TEACHER_ID\": \"CLP_TEACHER_ID\", \"ORGANISATION_NAME\": \"ORGANISATION_NAME\", \"SCHOOL_NAME\": \"SCHOOL_NAME\", \"TITLE\": \"TITLE\", \"LNAME\":", "def make_relation(entity1, id1, field1, entity2, id2, field2): return [ {\"_typeName\":", "schools, locations, teachers, students): # Build school -> organisation relations", "unique(key, dicts): t = {x[key]: x for x in dicts}", "Current extract doesn't include the CLP location id :( Make", "in enumerate(row): if cell.Value is not None: data[titles[i]] = str(cell.value)", "// n for i in range(0, len(l), chunk_size): yield l[i:i", "= [convert_row_to_dict(titles, row) for row in rows[1:]] yield Sheet(sheet.title, titles,", "identitical except that for being related to a different school.", "\"schoolName\", \"location\"))) write_relations(generate_relations(organisations, schools, locations, teachers, students)) if __name__ ==", "locations, teachers, students) = prepare(*raw_collections) write_nodes( organisations, copy_without(schools, \"clpOrganisationId\"), copy_without(locations,", "if cell.Value is not None: data[titles[i]] = str(cell.value) return data", "data[titles[i]] = str(cell.value) return data def convert_xlsx(xlsx_file): \"\"\"Convert the given", "def chunks(n, l): \"\"\"Yield n successive similar-sized chunks from l.\"\"\"", "for (i, one_list) in enumerate(list_of_lists): nodes_dir = relative_to_absolute(os.path.join(EXTRACT_OUTPUT_DIR + \"-relations\"", "[])] # Build student -> school relations yield [make_relation(\"ClpStudent\", student[\"id\"],", "same location. uniques = {} for x in locations: #", "related_schools = teacher.setdefault(\"schools\", list()) related_schools.append(x.pop(\"schoolId\")) injected = inject_required(\"ClpTeacher\", uniques.values()) return", "field1, entity2, id2, field2): return [ {\"_typeName\": entity1, \"id\": id1,", "chunks from l.\"\"\" chunk_size = 1 + len(l) // n", "copies = [x.copy() for x in dicts] for d in", "\"id\": id1, \"fieldName\": field1}, {\"_typeName\": entity2, \"id\": id2, \"fieldName\": field2}", "to collect all the schools that the same teacher is", "if student[\"schoolId\"] in school_keys] def main(): xlsx_path = relative_to_absolute(SOURCE_XLSX) raw_collections", "for the time being for x in injected: x[\"clpLocationId\"] =", "write_relations(list_of_lists): for (i, one_list) in enumerate(list_of_lists): nodes_dir = relative_to_absolute(os.path.join(EXTRACT_OUTPUT_DIR +", "the time being for x in injected: x[\"clpLocationId\"] = cuid.cuid()", "GraphCool import insists on microseconds, hence the \".0\" def prepare_students(students):", "excel spreadsheet\" def __init__(self, name, titles=None, rows=None): self.name = name", "injected def prepare_locations(locations): # There are multiple locations, each of", "TEACHER_FIELDS = {\"TEACHER_ID\": \"CLP_TEACHER_ID\", \"ORGANISATION_NAME\": \"ORGANISATION_NAME\", \"SCHOOL_NAME\": \"SCHOOL_NAME\", \"TITLE\": \"TITLE\",", "the given dictionary with all the given keys removed\" copies", "relations yield [make_relation(\"ClpTeacher\", teacher[\"id\"], \"schools\", \"ClpSchool\", school_keys[schoolId], \"teachers\") for teacher", "\".0Z\" # GraphCool import insists on microseconds, hence the \".0\"", "prepare(*raw_collections) write_nodes( organisations, copy_without(schools, \"clpOrganisationId\"), copy_without(locations, \"schools\"), copy_without(teachers, \"organisationId\", \"organisationName\",", "Sheet: \"Data container object to hold the contents of one", "We have to collect all the schools that meet at", "[STUDENT_FIELDS]) else: print(\"Ignoring sheet:\", sheet.name) return (organisations, schools, locations, teachers,", "sheet in work_book: rows = [x for x in sheet.iter_rows()]", "prepare_students(raw_students) ) def make_relation(entity1, id1, field1, entity2, id2, field2): return", "\"ELECTORATE\", \"SCHOOL_ID\": \"CLP_SCHOOL_ID\", \"ORGANISATION_ID\": \"CLP_ORGANISATION_ID\", \"SCHOOL_S_ADDRESS1\": \"ADDRESS\", \"SCHOOL_S_SUBURB\": \"SUBURB\", \"SCHOOL_S_STATE\":", "v) in fields.items(): data[to_camel(v)] = row_as_dict[k] return data def process_sheet(sheet,", "nodes_dir = relative_to_absolute(os.path.join(EXTRACT_OUTPUT_DIR + str(i), \"nodes\")) os.makedirs(nodes_dir, exist_ok=True) path =", "a dictionary\"\"\" work_book = load_workbook(filename=xlsx_file, read_only=True, data_only=True) for sheet in", "# Build teacher -> school relations yield [make_relation(\"ClpTeacher\", teacher[\"id\"], \"schools\",", "x) in enumerate(titles) if x != sheet.titles[i]]) return [] structs", "on microseconds, hence the \".0\" def prepare_students(students): uniques = unique(\"clpStudentId\",", "\"SCH_ELECTORATE\", \"SCHOOL_ID\", \"SCHOOL_P_ADDRESS1\", \"SCHOOL_P_SUBURB\", \"SCHOOL_P_STATE\", \"SCHOOL_P_POSTCODE\", \"SCHOOL_S_ADDRESS1\", \"SCHOOL_S_SUBURB\", \"SCHOOL_S_STATE\", \"SCHOOL_S_POSTCODE\",", "\"SCHOOL_P_SUBURB\", \"SCHOOL_P_STATE\", \"SCHOOL_P_POSTCODE\", \"SCHOOL_S_ADDRESS1\", \"SCHOOL_S_SUBURB\", \"SCHOOL_S_STATE\", \"SCHOOL_S_POSTCODE\", \"LOCATION_NAME\", \"LOC_ELECTORATE\", \"LOC_S_ADDRESS1\",", "\"SCHOOL_S_POSTCODE\", \"LOCATION_NAME\", \"LOC_ELECTORATE\", \"LOC_S_ADDRESS1\", \"LOC_S_SUBURB\", \"LOC_S_STATE\", \"LOC_S_POSTCODE\"] ORGANISATION_FIELDS = {\"ORGANISATION_ID\":", "= str(cell.value) return data def convert_xlsx(xlsx_file): \"\"\"Convert the given XLSX", "for cell in rows[0]] dicts = [convert_row_to_dict(titles, row) for row", "STUDENT_TITLES, [STUDENT_FIELDS]) else: print(\"Ignoring sheet:\", sheet.name) return (organisations, schools, locations,", "\"FAMILY_NAME\", \"FNAME\": \"GIVEN_NAMES\", \"TEACHER_LANGUAGES\": \"LANGUAGES\", \"P_ADDRESS1\": \"ADDRESS1\", \"P_ADDRESS2\": \"ADDRESS2\", \"P_SUBURB\":", "None) return copies def write_nodes(*list_of_lists): for (i, one_list) in enumerate(list_of_lists):", "to collect all the schools that meet at the same", "for to_remove in keys_to_remove: d.pop(to_remove, None) return copies def write_nodes(*list_of_lists):", "list(one_list) } f.write(json.dumps(nodes)) def chunks(n, l): \"\"\"Yield n successive similar-sized", "\"./data/CLP_combined.xlsx\" EXTRACT_OUTPUT_DIR = \"../server/extract\" SCHOOL_TITLES = [\"ORGANISATION_ID\", \"ORGANISATION_NAME\", \"ORG_ELECTORATE\", \"P_ADDRESS1\",", "\"%d/%b/%y\") return dt.isoformat() + \".0Z\" # GraphCool import insists on", "all the given keys removed\" copies = [x.copy() for x", "\"SRN\", \"LOCATION_NAME\": \"LOCATION\", \"STUDENT_LNAME\": \"FAMILY_NAME\", \"STUDENT_FNAME\": \"GIVEN_NAMES\", \"DOB\": \"DATE_OF_BIRTH\", \"TEL\":", "for i in range(0, len(l), chunk_size): yield l[i:i + chunk_size]", "else: yield Sheet(sheet.title) def to_camel(s): \"\"\"Convert an underscored title into", "teacher is teaching at. uniques = {} for x in", "to hold the contents of one sheet within an excel", "\"SUBURB\", \"P_STATE\": \"STATE\", \"P_POSTCODE\": \"POSTCODE\", \"TELEPHONE\": \"DAY_PHONE\", \"TEL_EVENING\": \"EVENING_PHONE\", \"EMAIL\":", "\"Student\": (students, ) = process_sheet(sheet, STUDENT_TITLES, [STUDENT_FIELDS]) else: print(\"Ignoring sheet:\",", "if rows: titles = [cell.value for cell in rows[0]] dicts", "= [cell.value for cell in rows[0]] dicts = [convert_row_to_dict(titles, row)", "yield Sheet(sheet.title, titles, dicts) else: yield Sheet(sheet.title) def to_camel(s): \"\"\"Convert", "\"STUDENT_ID\", \"STUDENT_SRN\", \"LOCATION_NAME\", \"STUDENT_LNAME\", \"STUDENT_FNAME\", \"DOB\", \"TEL\", \"LOCATION_NAME_1\"] TEACHER_FIELDS =", "\"LOC_S_SUBURB\": \"SUBURB\", \"LOC_S_STATE\": \"STATE\", \"LOC_S_POSTCODE\": \"POSTCODE\"} TEACHER_TITLES = [\"TEACHER_ID\", \"ORGANISATION_NAME\",", "i == 0 else x.title()) for (i, x) in enumerate(s.split(\"_\"))]", "{x[key]: x for x in dicts} return t.values() def now_as_iso8601():", "\"w\") as f: nodes = { \"valueType\": \"relations\", \"values\": list(one_list)", "1 + len(l) // n for i in range(0, len(l),", "for (k, v) in fields.items(): data[to_camel(v)] = row_as_dict[k] return data", "def unique(key, dicts): t = {x[key]: x for x in", "an underscored title into camel case. 'PARENT_ORGANISATION_ID' => 'parentOrganisationId'\"\"\" bits", ") def make_relation(entity1, id1, field1, entity2, id2, field2): return [", "print(\"Ignoring sheet:\", sheet.name) return (organisations, schools, locations, teachers, students) def", "[] structs = [[extract(defn, x) for x in sheet.rows] for", "one_list) in enumerate(list_of_lists): nodes_dir = relative_to_absolute(os.path.join(EXTRACT_OUTPUT_DIR + str(i), \"nodes\")) os.makedirs(nodes_dir,", "organisation relations org_keys = {x[\"clpOrganisationId\"]: x[\"id\"] for x in organisations}", "\"LANGUAGES\", \"P_ADDRESS1\": \"ADDRESS1\", \"P_ADDRESS2\": \"ADDRESS2\", \"P_SUBURB\": \"SUBURB\", \"P_STATE\": \"STATE\", \"P_POSTCODE\":", "return (organisations, schools, locations, teachers, students) def copy_without(dicts, *keys_to_remove): \"Return", "data = {} for (i, cell) in enumerate(row): if cell.Value", "for x in sheet.rows] for defn in field_defns] return structs", "SCHOOL_FIELDS = {\"SCHOOL_NAME\": \"NAME\", \"SCH_ELECTORATE\": \"ELECTORATE\", \"SCHOOL_ID\": \"CLP_SCHOOL_ID\", \"ORGANISATION_ID\": \"CLP_ORGANISATION_ID\",", "# Build school -> organisation relations org_keys = {x[\"clpOrganisationId\"]: x[\"id\"]", "add the new location location = uniques.setdefault(x[\"name\"], x) related_schools =", "{} for x in teachers: # get an existing teacher", "\"SCHOOL_NAME\", \"TEACHER_NAME\", \"TITLE\", \"LNAME\", \"FNAME\", \"TEACHER_LANGUAGES\", \"P_ADDRESS1\", \"P_ADDRESS2\", \"P_SUBURB\", \"P_STATE\",", "Like locations, the same teacher can have multiple records, #", "doesn't have expected titles:\", [(i, x) for (i, x) in", "x) related_schools = location.setdefault(\"schools\", list()) related_schools.append(x.pop(\"clpSchoolId\")) injected = inject_required(\"ClpLocation\", uniques.values())", "SCHOOL_FIELDS, LOCATION_FIELDS]) elif sheet.name == \"Teacher\": (teachers, ) = process_sheet(sheet,", "= os.path.abspath(os.path.dirname(__file__)) return os.path.join(path_to_py, relative_path) def extract(fields, row_as_dict): data =", "x in dicts] for d in copies: for to_remove in", "dicts) else: yield Sheet(sheet.title) def to_camel(s): \"\"\"Convert an underscored title", "0 else x.title()) for (i, x) in enumerate(s.split(\"_\"))] return \"\".join(bits)", "teachers: # get an existing teacher with that id, or", "l[i:i + chunk_size] def prepare(raw_organisations, raw_schools, raw_locations, raw_teachers, raw_students): return", "[make_relation(\"ClpStudent\", student[\"id\"], \"school\", \"ClpSchool\", school_keys[student[\"schoolId\"]], \"students\") for student in students", "the schools that meet at the same location. uniques =", "in sheet.iter_rows()] if rows: titles = [cell.value for cell in", "= {} for (i, cell) in enumerate(row): if cell.Value is", "prepare_teachers(raw_teachers), prepare_students(raw_students) ) def make_relation(entity1, id1, field1, entity2, id2, field2):", "sheet.rows] for defn in field_defns] return structs def unique(key, dicts):", "+ str(i), \"relations\")) os.makedirs(nodes_dir, exist_ok=True) path = os.path.join(nodes_dir, \"1.json\") with", "len(l), chunk_size): yield l[i:i + chunk_size] def prepare(raw_organisations, raw_schools, raw_locations,", "a different school. # We have to collect all the", "relations org_keys = {x[\"clpOrganisationId\"]: x[\"id\"] for x in organisations} yield", "\"SCHOOL_S_ADDRESS1\", \"SCHOOL_S_SUBURB\", \"SCHOOL_S_STATE\", \"SCHOOL_S_POSTCODE\", \"LOCATION_NAME\", \"LOC_ELECTORATE\", \"LOC_S_ADDRESS1\", \"LOC_S_SUBURB\", \"LOC_S_STATE\", \"LOC_S_POSTCODE\"]", "\"MOBILE\", \"LEVEL_TAUGHT\", \"LEVEL_OF_EDUCATION\", \"FIELD_OF_EDUCATION\", \"DEGREE_COUNTRY\", \"DEGREE_YEAR\", \"ORGANISATION_ID\", \"SCHOOL_ID\"] STUDENT_TITLES =", "# There are multiple locations, each of which is identitical", "\"valueType\": \"nodes\", \"values\": one_list } f.write(json.dumps(nodes)) def write_relations(list_of_lists): for (i,", "\"TEL_EVENING\", \"EMAIL\", \"MOBILE\", \"LEVEL_TAUGHT\", \"LEVEL_OF_EDUCATION\", \"FIELD_OF_EDUCATION\", \"DEGREE_COUNTRY\", \"DEGREE_YEAR\", \"ORGANISATION_ID\", \"SCHOOL_ID\"]", "== \"SCHOOL-ORG\": (organisations, schools, locations) = process_sheet( sheet, SCHOOL_TITLES, [ORGANISATION_FIELDS,", "sheet.titles: print(\"Sheet doesn't have expected titles:\", [(i, x) for (i,", "import cuid # https://github.com/necaris/cuid.py - create uuid's in the format", "-> school relations yield [make_relation(\"ClpTeacher\", teacher[\"id\"], \"schools\", \"ClpSchool\", school_keys[schoolId], \"teachers\")", "injected = inject_required(\"ClpSchool\", uniques) return injected def prepare_locations(locations): # There", "school relations school_keys = {x[\"clpSchoolId\"]: x[\"id\"] for x in schools}", "\"ClpSchool\", school_keys[schoolId], \"teachers\") for teacher in teachers for schoolId in", ":( Make one up for the time being for x", "with the given name, or add the new location location", "prepare_schools(schools): uniques = unique(\"clpSchoolId\", schools) injected = inject_required(\"ClpSchool\", uniques) return", "in convert_xlsx(file_path): if sheet.name == \"SCHOOL-ORG\": (organisations, schools, locations) =", "\"EDUCATION_FIELD\", \"DEGREE_COUNTRY\": \"EDUCATION_COUNTRY\", \"DEGREE_YEAR\": \"EDUCATION_YEAR\", \"ORGANISATION_ID\": \"ORGANISATION_ID\", \"SCHOOL_ID\": \"SCHOOL_ID\", }", "cell) in enumerate(row): if cell.Value is not None: data[titles[i]] =", "to iterable of Sheet objects, in which row has been", "if titles != sheet.titles: print(\"Sheet doesn't have expected titles:\", [(i,", "or add the new teacher record teacher = uniques.setdefault(x[\"clpTeacherId\"], x)", "if x != sheet.titles[i]]) return [] structs = [[extract(defn, x)", "nodes = { \"valueType\": \"nodes\", \"values\": one_list } f.write(json.dumps(nodes)) def", "for row in rows[1:]] yield Sheet(sheet.title, titles, dicts) else: yield", "expected titles:\", [(i, x) for (i, x) in enumerate(titles) if", "def write_relations(list_of_lists): for (i, one_list) in enumerate(list_of_lists): nodes_dir = relative_to_absolute(os.path.join(EXTRACT_OUTPUT_DIR", "for x in sheet.iter_rows()] if rows: titles = [cell.value for", "return [] structs = [[extract(defn, x) for x in sheet.rows]", "( prepare_organisations(raw_organisations), prepare_schools(raw_schools), prepare_locations(raw_locations), prepare_teachers(raw_teachers), prepare_students(raw_students) ) def make_relation(entity1, id1,", "for x in teachers: # get an existing teacher with", "titles, field_defns): if titles != sheet.titles: print(\"Sheet doesn't have expected", "prepare_locations(locations): # There are multiple locations, each of which is", "\"FAMILY_NAME\", \"STUDENT_FNAME\": \"GIVEN_NAMES\", \"DOB\": \"DATE_OF_BIRTH\", \"TEL\": \"PHONE\", \"LOCATION_NAME_1\": \"DAY_SCHOOL\", }", "name, or add the new location location = uniques.setdefault(x[\"name\"], x)", "\"POSTCODE\", } LOCATION_FIELDS = {\"LOCATION_NAME\": \"NAME\", \"LOC_ELECTORATE\": \"ELECTORATE\", \"SCHOOL_ID\": \"CLP_SCHOOL_ID\",", "at the same location. uniques = {} for x in", "= uniques.setdefault(x[\"name\"], x) related_schools = location.setdefault(\"schools\", list()) related_schools.append(x.pop(\"clpSchoolId\")) injected =", "f: nodes = { \"valueType\": \"nodes\", \"values\": one_list } f.write(json.dumps(nodes))", "\"LOC_S_STATE\", \"LOC_S_POSTCODE\"] ORGANISATION_FIELDS = {\"ORGANISATION_ID\": \"CLP_ORGANISATION_ID\", \"ORGANISATION_NAME\": \"NAME\", \"ORG_ELECTORATE\": \"ELECTORATE\",", "inject_required(\"ClpStudent\", uniques) for x in injected: x[\"dateOfBirth\"] = convert_dob_to_datetime(x[\"dateOfBirth\"]) return", "== \"Student\": (students, ) = process_sheet(sheet, STUDENT_TITLES, [STUDENT_FIELDS]) else: print(\"Ignoring", "= { \"valueType\": \"nodes\", \"values\": one_list } f.write(json.dumps(nodes)) def write_relations(list_of_lists):", "locations: # get an existing location with the given name,", "list()) related_schools.append(x.pop(\"schoolId\")) injected = inject_required(\"ClpTeacher\", uniques.values()) return injected def extract_from_xlsx(file_path):", "id1, field1, entity2, id2, field2): return [ {\"_typeName\": entity1, \"id\":", "in dicts: x[\"_typeName\"] = type_name x[\"id\"] = cuid.cuid() x[\"createdAt\"] =", "[] self.rows = rows or [] def convert_row_to_dict(titles, row): data", "} f.write(json.dumps(nodes)) def write_relations(list_of_lists): for (i, one_list) in enumerate(list_of_lists): nodes_dir", "type_name x[\"id\"] = cuid.cuid() x[\"createdAt\"] = x[\"updatedAt\"] = now_as_iso8601() return", "schools that meet at the same location. uniques = {}", "x in sheet.rows] for defn in field_defns] return structs def", "rows[1:]] yield Sheet(sheet.title, titles, dicts) else: yield Sheet(sheet.title) def to_camel(s):", "\"P_SUBURB\", \"P_STATE\", \"P_POSTCODE\", \"S_ADDRESS1\", \"S_SUBURB\", \"S_STATE\", \"S_POSTCODE\", \"SCHOOL_NAME\", \"SCH_ELECTORATE\", \"SCHOOL_ID\",", "is identitical except that for being related to a different", "of one sheet within an excel spreadsheet\" def __init__(self, name,", "each of which is identitical except that for being related", "dicts] for d in copies: for to_remove in keys_to_remove: d.pop(to_remove,", "school relations yield [make_relation(\"ClpTeacher\", teacher[\"id\"], \"schools\", \"ClpSchool\", school_keys[schoolId], \"teachers\") for", "is not None: data[titles[i]] = str(cell.value) return data def convert_xlsx(xlsx_file):", "\"ORGANISATION_ID\": \"ORGANISATION_ID\", \"SCHOOL_ID\": \"SCHOOL_ID\", } STUDENT_FIELDS = {\"SCHOOL_NAME\": \"SCHOOL_NAME\", \"SCHOOL_ID\":", "[[extract(defn, x) for x in sheet.rows] for defn in field_defns]", "in which row has been converted into a dictionary\"\"\" work_book", "+ \"Z\" def inject_required(type_name, dicts): \"Inject the required fields that", "\"SCH_ELECTORATE\": \"ELECTORATE\", \"SCHOOL_ID\": \"CLP_SCHOOL_ID\", \"ORGANISATION_ID\": \"CLP_ORGANISATION_ID\", \"SCHOOL_S_ADDRESS1\": \"ADDRESS\", \"SCHOOL_S_SUBURB\": \"SUBURB\",", "x[\"id\"] = cuid.cuid() x[\"createdAt\"] = x[\"updatedAt\"] = now_as_iso8601() return list(dicts)", "case. 'PARENT_ORGANISATION_ID' => 'parentOrganisationId'\"\"\" bits = [(x.lower() if i ==", "schools} yield [make_relation(\"ClpLocation\", location[\"id\"], \"schools\", \"ClpSchool\", school_keys[schoolId], \"locations\") for location", "{} for (i, cell) in enumerate(row): if cell.Value is not", "cell in rows[0]] dicts = [convert_row_to_dict(titles, row) for row in", "sheet.name == \"Student\": (students, ) = process_sheet(sheet, STUDENT_TITLES, [STUDENT_FIELDS]) else:", "import insists on microseconds, hence the \".0\" def prepare_students(students): uniques", "\"values\": list(one_list) } f.write(json.dumps(nodes)) def chunks(n, l): \"\"\"Yield n successive", "(organisations, schools, locations) = process_sheet( sheet, SCHOOL_TITLES, [ORGANISATION_FIELDS, SCHOOL_FIELDS, LOCATION_FIELDS])", "def to_camel(s): \"\"\"Convert an underscored title into camel case. 'PARENT_ORGANISATION_ID'", "(i, one_list) in enumerate(list_of_lists): nodes_dir = relative_to_absolute(os.path.join(EXTRACT_OUTPUT_DIR + \"-relations\" +", "generate_relations(organisations, schools, locations, teachers, students): # Build school -> organisation", "students) = prepare(*raw_collections) write_nodes( organisations, copy_without(schools, \"clpOrganisationId\"), copy_without(locations, \"schools\"), copy_without(teachers,", "[x for x in sheet.iter_rows()] if rows: titles = [cell.value", "cuid # https://github.com/necaris/cuid.py - create uuid's in the format that", "\"P_STATE\": \"STATE\", \"P_POSTCODE\": \"POSTCODE\", \"TELEPHONE\": \"DAY_PHONE\", \"TEL_EVENING\": \"EVENING_PHONE\", \"EMAIL\": \"EMAIL\",", "if i == 0 else x.title()) for (i, x) in", "of which is identitical except that for being related to", "new teacher record teacher = uniques.setdefault(x[\"clpTeacherId\"], x) related_schools = teacher.setdefault(\"schools\",", "{\"LOCATION_NAME\": \"NAME\", \"LOC_ELECTORATE\": \"ELECTORATE\", \"SCHOOL_ID\": \"CLP_SCHOOL_ID\", \"LOC_S_ADDRESS1\": \"ADDRESS\", \"LOC_S_SUBURB\": \"SUBURB\",", "# https://github.com/necaris/cuid.py - create uuid's in the format that graphcool", "def generate_relations(organisations, schools, locations, teachers, students): # Build school ->", "openpyxl # pip install cuid import os.path import json import", "\"LOCATION_NAME\": \"LOCATION\", \"STUDENT_LNAME\": \"FAMILY_NAME\", \"STUDENT_FNAME\": \"GIVEN_NAMES\", \"DOB\": \"DATE_OF_BIRTH\", \"TEL\": \"PHONE\",", "same teacher can have multiple records, # each of which", "fields that graphcool import required\" for x in dicts: x[\"_typeName\"]", "[ORGANISATION_FIELDS, SCHOOL_FIELDS, LOCATION_FIELDS]) elif sheet.name == \"Teacher\": (teachers, ) =", "return injected def extract_from_xlsx(file_path): for sheet in convert_xlsx(file_path): if sheet.name", "if sheet.name == \"SCHOOL-ORG\": (organisations, schools, locations) = process_sheet( sheet,", "\"SCHOOL-ORG\": (organisations, schools, locations) = process_sheet( sheet, SCHOOL_TITLES, [ORGANISATION_FIELDS, SCHOOL_FIELDS,", "x[\"createdAt\"] = x[\"updatedAt\"] = now_as_iso8601() return list(dicts) def prepare_organisations(organisations): unique_orgs", "\"SCHOOL_ID\"] STUDENT_TITLES = [\"SCHOOL_NAME\", \"SCHOOL_ID\", \"STUDENT_ID\", \"STUDENT_SRN\", \"LOCATION_NAME\", \"STUDENT_LNAME\", \"STUDENT_FNAME\",", "spreadsheet to iterable of Sheet objects, in which row has", "being related to a different school. # We have to", "x[\"id\"], \"organisation\") for x in schools] # Build location ->", "from 99/MON/YY to a ISO date\" dt = datetime.datetime.strptime(s, \"%d/%b/%y\")", "(k, v) in fields.items(): data[to_camel(v)] = row_as_dict[k] return data def", "copies of the given dictionary with all the given keys", "students if student[\"schoolId\"] in school_keys] def main(): xlsx_path = relative_to_absolute(SOURCE_XLSX)", "path_to_py = os.path.abspath(os.path.dirname(__file__)) return os.path.join(path_to_py, relative_path) def extract(fields, row_as_dict): data", "sheet, SCHOOL_TITLES, [ORGANISATION_FIELDS, SCHOOL_FIELDS, LOCATION_FIELDS]) elif sheet.name == \"Teacher\": (teachers,", "sheet.name == \"SCHOOL-ORG\": (organisations, schools, locations) = process_sheet( sheet, SCHOOL_TITLES,", "\"STUDENT_LNAME\", \"STUDENT_FNAME\", \"DOB\", \"TEL\", \"LOCATION_NAME_1\"] TEACHER_FIELDS = {\"TEACHER_ID\": \"CLP_TEACHER_ID\", \"ORGANISATION_NAME\":", "in locations: # get an existing location with the given", "self.rows = rows or [] def convert_row_to_dict(titles, row): data =", "os.path.join(path_to_py, relative_path) def extract(fields, row_as_dict): data = {} for (k,", "one_list) in enumerate(list_of_lists): nodes_dir = relative_to_absolute(os.path.join(EXTRACT_OUTPUT_DIR + \"-relations\" + str(i),", "} STUDENT_FIELDS = {\"SCHOOL_NAME\": \"SCHOOL_NAME\", \"SCHOOL_ID\": \"SCHOOL_ID\", \"STUDENT_ID\": \"CLP_STUDENT_ID\", \"STUDENT_SRN\":", "!= sheet.titles: print(\"Sheet doesn't have expected titles:\", [(i, x) for", "meet at the same location. uniques = {} for x", "\"ADDRESS\", \"LOC_S_SUBURB\": \"SUBURB\", \"LOC_S_STATE\": \"STATE\", \"LOC_S_POSTCODE\": \"POSTCODE\"} TEACHER_TITLES = [\"TEACHER_ID\",", "chunk_size = 1 + len(l) // n for i in", "return injected def prepare_locations(locations): # There are multiple locations, each", "format that graphcool expects SOURCE_XLSX = \"./data/CLP_combined.xlsx\" EXTRACT_OUTPUT_DIR = \"../server/extract\"", "the CLP location id :( Make one up for the", "for x in schools] # Build location -> school relations", "dicts} return t.values() def now_as_iso8601(): return datetime.datetime.now().replace(microsecond=0).isoformat() + \"Z\" def", "location -> school relations school_keys = {x[\"clpSchoolId\"]: x[\"id\"] for x", "injected def prepare_teachers(teachers): # Like locations, the same teacher can", "location.get(\"schools\", [])] # Build teacher -> school relations yield [make_relation(\"ClpTeacher\",", "+ chunk_size] def prepare(raw_organisations, raw_schools, raw_locations, raw_teachers, raw_students): return (", "!= sheet.titles[i]]) return [] structs = [[extract(defn, x) for x", "= titles or [] self.rows = rows or [] def", "\"P_STATE\", \"P_POSTCODE\", \"S_ADDRESS1\", \"S_SUBURB\", \"S_STATE\", \"S_POSTCODE\", \"SCHOOL_NAME\", \"SCH_ELECTORATE\", \"SCHOOL_ID\", \"SCHOOL_P_ADDRESS1\",", "\"\"\"Convert the given XLSX spreadsheet to iterable of Sheet objects,", "are multiple locations, each of which is identitical except that", "locations for schoolId in location.get(\"schools\", [])] # Build teacher ->", "\"SCHOOL_NAME\", \"TITLE\": \"TITLE\", \"LNAME\": \"FAMILY_NAME\", \"FNAME\": \"GIVEN_NAMES\", \"TEACHER_LANGUAGES\": \"LANGUAGES\", \"P_ADDRESS1\":", "\"LOC_S_POSTCODE\": \"POSTCODE\"} TEACHER_TITLES = [\"TEACHER_ID\", \"ORGANISATION_NAME\", \"SCHOOL_NAME\", \"TEACHER_NAME\", \"TITLE\", \"LNAME\",", "uniques.values()) # FIX THIS - Current extract doesn't include the", "\"ADDRESS1\", \"P_ADDRESS2\": \"ADDRESS2\", \"P_SUBURB\": \"SUBURB\", \"P_STATE\": \"STATE\", \"P_POSTCODE\": \"POSTCODE\", \"TELEPHONE\":", "the string from 99/MON/YY to a ISO date\" dt =", "locations, teachers, students): # Build school -> organisation relations org_keys", "school_keys[schoolId], \"teachers\") for teacher in teachers for schoolId in teacher.get(\"schools\",", "\"NAME\", \"LOC_ELECTORATE\": \"ELECTORATE\", \"SCHOOL_ID\": \"CLP_SCHOOL_ID\", \"LOC_S_ADDRESS1\": \"ADDRESS\", \"LOC_S_SUBURB\": \"SUBURB\", \"LOC_S_STATE\":", "existing teacher with that id, or add the new teacher", "load_workbook(filename=xlsx_file, read_only=True, data_only=True) for sheet in work_book: rows = [x", "open(path, \"w\") as f: nodes = { \"valueType\": \"relations\", \"values\":", "\"students\") for student in students if student[\"schoolId\"] in school_keys] def", "all the schools that meet at the same location. uniques", "Sheet(sheet.title, titles, dicts) else: yield Sheet(sheet.title) def to_camel(s): \"\"\"Convert an", "sheet within an excel spreadsheet\" def __init__(self, name, titles=None, rows=None):", "(teachers, ) = process_sheet(sheet, TEACHER_TITLES, [TEACHER_FIELDS]) elif sheet.name == \"Student\":", "(organisations, schools, locations, teachers, students) def copy_without(dicts, *keys_to_remove): \"Return iterable", "\"S_ADDRESS1\": \"ADDRESS\", \"S_SUBURB\": \"SUBURB\", \"S_STATE\": \"STATE\", \"S_POSTCODE\": \"POSTCODE\", } SCHOOL_FIELDS", "import datetime from openpyxl import load_workbook import cuid # https://github.com/necaris/cuid.py", "prepare_students(students): uniques = unique(\"clpStudentId\", students) injected = inject_required(\"ClpStudent\", uniques) for", "location in locations for schoolId in location.get(\"schools\", [])] # Build", "{ \"valueType\": \"relations\", \"values\": list(one_list) } f.write(json.dumps(nodes)) def chunks(n, l):", "have expected titles:\", [(i, x) for (i, x) in enumerate(titles)", "\"\"\"Convert an underscored title into camel case. 'PARENT_ORGANISATION_ID' => 'parentOrganisationId'\"\"\"", "field_defns): if titles != sheet.titles: print(\"Sheet doesn't have expected titles:\",", "# We have to collect all the schools that meet", "\"LOC_S_ADDRESS1\", \"LOC_S_SUBURB\", \"LOC_S_STATE\", \"LOC_S_POSTCODE\"] ORGANISATION_FIELDS = {\"ORGANISATION_ID\": \"CLP_ORGANISATION_ID\", \"ORGANISATION_NAME\": \"NAME\",", "for student in students if student[\"schoolId\"] in school_keys] def main():", "\"schools\", \"ClpSchool\", school_keys[schoolId], \"teachers\") for teacher in teachers for schoolId", "[make_relation(\"ClpTeacher\", teacher[\"id\"], \"schools\", \"ClpSchool\", school_keys[schoolId], \"teachers\") for teacher in teachers", "= \"./data/CLP_combined.xlsx\" EXTRACT_OUTPUT_DIR = \"../server/extract\" SCHOOL_TITLES = [\"ORGANISATION_ID\", \"ORGANISATION_NAME\", \"ORG_ELECTORATE\",", "return ( prepare_organisations(raw_organisations), prepare_schools(raw_schools), prepare_locations(raw_locations), prepare_teachers(raw_teachers), prepare_students(raw_students) ) def make_relation(entity1,", "school relations yield [make_relation(\"ClpStudent\", student[\"id\"], \"school\", \"ClpSchool\", school_keys[student[\"schoolId\"]], \"students\") for", "\"SCHOOL_ID\": \"CLP_SCHOOL_ID\", \"LOC_S_ADDRESS1\": \"ADDRESS\", \"LOC_S_SUBURB\": \"SUBURB\", \"LOC_S_STATE\": \"STATE\", \"LOC_S_POSTCODE\": \"POSTCODE\"}", "into camel case. 'PARENT_ORGANISATION_ID' => 'parentOrganisationId'\"\"\" bits = [(x.lower() if", "\"SCHOOL_ID\", \"STUDENT_ID\", \"STUDENT_SRN\", \"LOCATION_NAME\", \"STUDENT_LNAME\", \"STUDENT_FNAME\", \"DOB\", \"TEL\", \"LOCATION_NAME_1\"] TEACHER_FIELDS", "in enumerate(s.split(\"_\"))] return \"\".join(bits) def relative_to_absolute(relative_path): path_to_py = os.path.abspath(os.path.dirname(__file__)) return", "graphcool expects SOURCE_XLSX = \"./data/CLP_combined.xlsx\" EXTRACT_OUTPUT_DIR = \"../server/extract\" SCHOOL_TITLES =", "\"DEGREE_YEAR\": \"EDUCATION_YEAR\", \"ORGANISATION_ID\": \"ORGANISATION_ID\", \"SCHOOL_ID\": \"SCHOOL_ID\", } STUDENT_FIELDS = {\"SCHOOL_NAME\":", "pip install openpyxl # pip install cuid import os.path import", "in copies: for to_remove in keys_to_remove: d.pop(to_remove, None) return copies", "= { \"valueType\": \"relations\", \"values\": list(one_list) } f.write(json.dumps(nodes)) def chunks(n,", "in injected: x[\"clpLocationId\"] = cuid.cuid() return injected def convert_dob_to_datetime(s): \"Convert", "def convert_dob_to_datetime(s): \"Convert the string from 99/MON/YY to a ISO", "\"ORGANISATION_NAME\", \"SCHOOL_NAME\", \"TEACHER_NAME\", \"TITLE\", \"LNAME\", \"FNAME\", \"TEACHER_LANGUAGES\", \"P_ADDRESS1\", \"P_ADDRESS2\", \"P_SUBURB\",", "= {x[\"clpSchoolId\"]: x[\"id\"] for x in schools} yield [make_relation(\"ClpLocation\", location[\"id\"],", "\"location\"))) write_relations(generate_relations(organisations, schools, locations, teachers, students)) if __name__ == \"__main__\":", "\"TELEPHONE\", \"TEL_EVENING\", \"EMAIL\", \"MOBILE\", \"LEVEL_TAUGHT\", \"LEVEL_OF_EDUCATION\", \"FIELD_OF_EDUCATION\", \"DEGREE_COUNTRY\", \"DEGREE_YEAR\", \"ORGANISATION_ID\",", "into a dictionary\"\"\" work_book = load_workbook(filename=xlsx_file, read_only=True, data_only=True) for sheet", "TEACHER_TITLES, [TEACHER_FIELDS]) elif sheet.name == \"Student\": (students, ) = process_sheet(sheet,", "ISO date\" dt = datetime.datetime.strptime(s, \"%d/%b/%y\") return dt.isoformat() + \".0Z\"", "= inject_required(\"ClpStudent\", uniques) for x in injected: x[\"dateOfBirth\"] = convert_dob_to_datetime(x[\"dateOfBirth\"])", "str(cell.value) return data def convert_xlsx(xlsx_file): \"\"\"Convert the given XLSX spreadsheet", "def prepare_organisations(organisations): unique_orgs = unique(\"clpOrganisationId\", organisations) fat_orgs = inject_required(\"ClpOrganisation\", unique_orgs)", "for (i, one_list) in enumerate(list_of_lists): nodes_dir = relative_to_absolute(os.path.join(EXTRACT_OUTPUT_DIR + str(i),", "return injected def convert_dob_to_datetime(s): \"Convert the string from 99/MON/YY to", "one up for the time being for x in injected:", "{\"TEACHER_ID\": \"CLP_TEACHER_ID\", \"ORGANISATION_NAME\": \"ORGANISATION_NAME\", \"SCHOOL_NAME\": \"SCHOOL_NAME\", \"TITLE\": \"TITLE\", \"LNAME\": \"FAMILY_NAME\",", "\"POSTCODE\", \"TELEPHONE\": \"DAY_PHONE\", \"TEL_EVENING\": \"EVENING_PHONE\", \"EMAIL\": \"EMAIL\", \"MOBILE\": \"MOBILE\", \"LEVEL_TAUGHT\":", "def inject_required(type_name, dicts): \"Inject the required fields that graphcool import", "\"SCHOOL_P_POSTCODE\", \"SCHOOL_S_ADDRESS1\", \"SCHOOL_S_SUBURB\", \"SCHOOL_S_STATE\", \"SCHOOL_S_POSTCODE\", \"LOCATION_NAME\", \"LOC_ELECTORATE\", \"LOC_S_ADDRESS1\", \"LOC_S_SUBURB\", \"LOC_S_STATE\",", "location location = uniques.setdefault(x[\"name\"], x) related_schools = location.setdefault(\"schools\", list()) related_schools.append(x.pop(\"clpSchoolId\"))", "teacher record teacher = uniques.setdefault(x[\"clpTeacherId\"], x) related_schools = teacher.setdefault(\"schools\", list())", "with open(path, \"w\") as f: nodes = { \"valueType\": \"nodes\",", "student in students if student[\"schoolId\"] in school_keys] def main(): xlsx_path", "\"DOB\", \"TEL\", \"LOCATION_NAME_1\"] TEACHER_FIELDS = {\"TEACHER_ID\": \"CLP_TEACHER_ID\", \"ORGANISATION_NAME\": \"ORGANISATION_NAME\", \"SCHOOL_NAME\":", "\"FNAME\", \"TEACHER_LANGUAGES\", \"P_ADDRESS1\", \"P_ADDRESS2\", \"P_SUBURB\", \"P_STATE\", \"P_POSTCODE\", \"TELEPHONE\", \"TEL_EVENING\", \"EMAIL\",", "which is identitical except that for being related to a", "\"TITLE\", \"LNAME\": \"FAMILY_NAME\", \"FNAME\": \"GIVEN_NAMES\", \"TEACHER_LANGUAGES\": \"LANGUAGES\", \"P_ADDRESS1\": \"ADDRESS1\", \"P_ADDRESS2\":", "students) injected = inject_required(\"ClpStudent\", uniques) for x in injected: x[\"dateOfBirth\"]", "extract doesn't include the CLP location id :( Make one", "else: print(\"Ignoring sheet:\", sheet.name) return (organisations, schools, locations, teachers, students)", "{\"ORGANISATION_ID\": \"CLP_ORGANISATION_ID\", \"ORGANISATION_NAME\": \"NAME\", \"ORG_ELECTORATE\": \"ELECTORATE\", \"S_ADDRESS1\": \"ADDRESS\", \"S_SUBURB\": \"SUBURB\",", "\"P_ADDRESS2\", \"P_SUBURB\", \"P_STATE\", \"P_POSTCODE\", \"TELEPHONE\", \"TEL_EVENING\", \"EMAIL\", \"MOBILE\", \"LEVEL_TAUGHT\", \"LEVEL_OF_EDUCATION\",", "row has been converted into a dictionary\"\"\" work_book = load_workbook(filename=xlsx_file,", "dictionary\"\"\" work_book = load_workbook(filename=xlsx_file, read_only=True, data_only=True) for sheet in work_book:", "\"values\": one_list } f.write(json.dumps(nodes)) def write_relations(list_of_lists): for (i, one_list) in", "import load_workbook import cuid # https://github.com/necaris/cuid.py - create uuid's in", "for (i, cell) in enumerate(row): if cell.Value is not None:", "injected: x[\"clpLocationId\"] = cuid.cuid() return injected def convert_dob_to_datetime(s): \"Convert the", "teaching at. uniques = {} for x in teachers: #", "x in teachers: # get an existing teacher with that", "have to collect all the schools that meet at the", "+ len(l) // n for i in range(0, len(l), chunk_size):", "in fields.items(): data[to_camel(v)] = row_as_dict[k] return data def process_sheet(sheet, titles,", "\"EDUCATION_LEVEL\", \"FIELD_OF_EDUCATION\": \"EDUCATION_FIELD\", \"DEGREE_COUNTRY\": \"EDUCATION_COUNTRY\", \"DEGREE_YEAR\": \"EDUCATION_YEAR\", \"ORGANISATION_ID\": \"ORGANISATION_ID\", \"SCHOOL_ID\":", "process_sheet(sheet, titles, field_defns): if titles != sheet.titles: print(\"Sheet doesn't have", "= unique(\"clpStudentId\", students) injected = inject_required(\"ClpStudent\", uniques) for x in", "write_relations(generate_relations(organisations, schools, locations, teachers, students)) if __name__ == \"__main__\": main()", "nodes = { \"valueType\": \"relations\", \"values\": list(one_list) } f.write(json.dumps(nodes)) def", "with all the given keys removed\" copies = [x.copy() for", "d in copies: for to_remove in keys_to_remove: d.pop(to_remove, None) return", "location. uniques = {} for x in locations: # get", "elif sheet.name == \"Student\": (students, ) = process_sheet(sheet, STUDENT_TITLES, [STUDENT_FIELDS])", "return list(dicts) def prepare_organisations(organisations): unique_orgs = unique(\"clpOrganisationId\", organisations) fat_orgs =", "or add the new location location = uniques.setdefault(x[\"name\"], x) related_schools", "\"LEVEL_TAUGHT\", \"LEVEL_OF_EDUCATION\", \"FIELD_OF_EDUCATION\", \"DEGREE_COUNTRY\", \"DEGREE_YEAR\", \"ORGANISATION_ID\", \"SCHOOL_ID\"] STUDENT_TITLES = [\"SCHOOL_NAME\",", "install openpyxl # pip install cuid import os.path import json", "def relative_to_absolute(relative_path): path_to_py = os.path.abspath(os.path.dirname(__file__)) return os.path.join(path_to_py, relative_path) def extract(fields,", "\"LOCATION\", \"STUDENT_LNAME\": \"FAMILY_NAME\", \"STUDENT_FNAME\": \"GIVEN_NAMES\", \"DOB\": \"DATE_OF_BIRTH\", \"TEL\": \"PHONE\", \"LOCATION_NAME_1\":", "Build teacher -> school relations yield [make_relation(\"ClpTeacher\", teacher[\"id\"], \"schools\", \"ClpSchool\",", "prepare(raw_organisations, raw_schools, raw_locations, raw_teachers, raw_students): return ( prepare_organisations(raw_organisations), prepare_schools(raw_schools), prepare_locations(raw_locations),", "create uuid's in the format that graphcool expects SOURCE_XLSX =", "raw_collections = extract_from_xlsx(xlsx_path) (organisations, schools, locations, teachers, students) = prepare(*raw_collections)", "data def convert_xlsx(xlsx_file): \"\"\"Convert the given XLSX spreadsheet to iterable", "\"clpOrganisationId\"), copy_without(locations, \"schools\"), copy_without(teachers, \"organisationId\", \"organisationName\", \"schools\", \"schoolName\"), *chunks(3, copy_without(students,", "get an existing location with the given name, or add", "school_keys[schoolId], \"locations\") for location in locations for schoolId in location.get(\"schools\",", "= prepare(*raw_collections) write_nodes( organisations, copy_without(schools, \"clpOrganisationId\"), copy_without(locations, \"schools\"), copy_without(teachers, \"organisationId\",", "[])] # Build teacher -> school relations yield [make_relation(\"ClpTeacher\", teacher[\"id\"],", "def write_nodes(*list_of_lists): for (i, one_list) in enumerate(list_of_lists): nodes_dir = relative_to_absolute(os.path.join(EXTRACT_OUTPUT_DIR", "= 1 + len(l) // n for i in range(0,", "\"SCHOOL_ID\", } STUDENT_FIELDS = {\"SCHOOL_NAME\": \"SCHOOL_NAME\", \"SCHOOL_ID\": \"SCHOOL_ID\", \"STUDENT_ID\": \"CLP_STUDENT_ID\",", "the given keys removed\" copies = [x.copy() for x in", "related_schools.append(x.pop(\"schoolId\")) injected = inject_required(\"ClpTeacher\", uniques.values()) return injected def extract_from_xlsx(file_path): for", "\"EMAIL\", \"MOBILE\": \"MOBILE\", \"LEVEL_TAUGHT\": \"LEVEL_TAUGHT\", \"LEVEL_OF_EDUCATION\": \"EDUCATION_LEVEL\", \"FIELD_OF_EDUCATION\": \"EDUCATION_FIELD\", \"DEGREE_COUNTRY\":", "\"LEVEL_TAUGHT\", \"LEVEL_OF_EDUCATION\": \"EDUCATION_LEVEL\", \"FIELD_OF_EDUCATION\": \"EDUCATION_FIELD\", \"DEGREE_COUNTRY\": \"EDUCATION_COUNTRY\", \"DEGREE_YEAR\": \"EDUCATION_YEAR\", \"ORGANISATION_ID\":", "return data def process_sheet(sheet, titles, field_defns): if titles != sheet.titles:", "\"POSTCODE\", } SCHOOL_FIELDS = {\"SCHOOL_NAME\": \"NAME\", \"SCH_ELECTORATE\": \"ELECTORATE\", \"SCHOOL_ID\": \"CLP_SCHOOL_ID\",", "at. uniques = {} for x in teachers: # get", "in schools] # Build location -> school relations school_keys =", "= [(x.lower() if i == 0 else x.title()) for (i,", "\"locations\") for location in locations for schoolId in location.get(\"schools\", [])]", "being for x in injected: x[\"clpLocationId\"] = cuid.cuid() return injected", "for being related to a different school. # We have", "in location.get(\"schools\", [])] # Build teacher -> school relations yield", "return t.values() def now_as_iso8601(): return datetime.datetime.now().replace(microsecond=0).isoformat() + \"Z\" def inject_required(type_name,", "We have to collect all the schools that the same", "have multiple records, # each of which is identitical except", "school -> organisation relations org_keys = {x[\"clpOrganisationId\"]: x[\"id\"] for x", "= [\"SCHOOL_NAME\", \"SCHOOL_ID\", \"STUDENT_ID\", \"STUDENT_SRN\", \"LOCATION_NAME\", \"STUDENT_LNAME\", \"STUDENT_FNAME\", \"DOB\", \"TEL\",", "open(path, \"w\") as f: nodes = { \"valueType\": \"nodes\", \"values\":", "process_sheet(sheet, STUDENT_TITLES, [STUDENT_FIELDS]) else: print(\"Ignoring sheet:\", sheet.name) return (organisations, schools,", "the same teacher can have multiple records, # each of", "teacher.get(\"schools\", [])] # Build student -> school relations yield [make_relation(\"ClpStudent\",", "return dt.isoformat() + \".0Z\" # GraphCool import insists on microseconds,", "relations yield [make_relation(\"ClpStudent\", student[\"id\"], \"school\", \"ClpSchool\", school_keys[student[\"schoolId\"]], \"students\") for student", "[ {\"_typeName\": entity1, \"id\": id1, \"fieldName\": field1}, {\"_typeName\": entity2, \"id\":", "\"SUBURB\", \"LOC_S_STATE\": \"STATE\", \"LOC_S_POSTCODE\": \"POSTCODE\"} TEACHER_TITLES = [\"TEACHER_ID\", \"ORGANISATION_NAME\", \"SCHOOL_NAME\",", "\"P_POSTCODE\", \"TELEPHONE\", \"TEL_EVENING\", \"EMAIL\", \"MOBILE\", \"LEVEL_TAUGHT\", \"LEVEL_OF_EDUCATION\", \"FIELD_OF_EDUCATION\", \"DEGREE_COUNTRY\", \"DEGREE_YEAR\",", "import os.path import json import datetime from openpyxl import load_workbook", "{\"SCHOOL_NAME\": \"SCHOOL_NAME\", \"SCHOOL_ID\": \"SCHOOL_ID\", \"STUDENT_ID\": \"CLP_STUDENT_ID\", \"STUDENT_SRN\": \"SRN\", \"LOCATION_NAME\": \"LOCATION\",", "\"TEACHER_NAME\", \"TITLE\", \"LNAME\", \"FNAME\", \"TEACHER_LANGUAGES\", \"P_ADDRESS1\", \"P_ADDRESS2\", \"P_SUBURB\", \"P_STATE\", \"P_POSTCODE\",", "given XLSX spreadsheet to iterable of Sheet objects, in which", "= unique(\"clpOrganisationId\", organisations) fat_orgs = inject_required(\"ClpOrganisation\", unique_orgs) return fat_orgs def", "the same location. uniques = {} for x in locations:", "x in sheet.iter_rows()] if rows: titles = [cell.value for cell", "\"w\") as f: nodes = { \"valueType\": \"nodes\", \"values\": one_list", "in field_defns] return structs def unique(key, dicts): t = {x[key]:", "iterable of Sheet objects, in which row has been converted", "# each of which is identitical except that for being", "x) for x in sheet.rows] for defn in field_defns] return", "record teacher = uniques.setdefault(x[\"clpTeacherId\"], x) related_schools = teacher.setdefault(\"schools\", list()) related_schools.append(x.pop(\"schoolId\"))", "sheet:\", sheet.name) return (organisations, schools, locations, teachers, students) def copy_without(dicts,", "= {\"SCHOOL_NAME\": \"SCHOOL_NAME\", \"SCHOOL_ID\": \"SCHOOL_ID\", \"STUDENT_ID\": \"CLP_STUDENT_ID\", \"STUDENT_SRN\": \"SRN\", \"LOCATION_NAME\":", "for sheet in convert_xlsx(file_path): if sheet.name == \"SCHOOL-ORG\": (organisations, schools,", "related to a different school. # We have to collect", "as f: nodes = { \"valueType\": \"nodes\", \"values\": one_list }", "https://github.com/necaris/cuid.py - create uuid's in the format that graphcool expects", "\"schools\", \"schoolName\"), *chunks(3, copy_without(students, \"schoolId\", \"schoolName\", \"location\"))) write_relations(generate_relations(organisations, schools, locations,", "convert_dob_to_datetime(s): \"Convert the string from 99/MON/YY to a ISO date\"", "\"NAME\", \"ORG_ELECTORATE\": \"ELECTORATE\", \"S_ADDRESS1\": \"ADDRESS\", \"S_SUBURB\": \"SUBURB\", \"S_STATE\": \"STATE\", \"S_POSTCODE\":", "\"LOCATION_NAME_1\"] TEACHER_FIELDS = {\"TEACHER_ID\": \"CLP_TEACHER_ID\", \"ORGANISATION_NAME\": \"ORGANISATION_NAME\", \"SCHOOL_NAME\": \"SCHOOL_NAME\", \"TITLE\":", "\"TEACHER_LANGUAGES\": \"LANGUAGES\", \"P_ADDRESS1\": \"ADDRESS1\", \"P_ADDRESS2\": \"ADDRESS2\", \"P_SUBURB\": \"SUBURB\", \"P_STATE\": \"STATE\",", "\"SCHOOL_ID\", \"SCHOOL_P_ADDRESS1\", \"SCHOOL_P_SUBURB\", \"SCHOOL_P_STATE\", \"SCHOOL_P_POSTCODE\", \"SCHOOL_S_ADDRESS1\", \"SCHOOL_S_SUBURB\", \"SCHOOL_S_STATE\", \"SCHOOL_S_POSTCODE\", \"LOCATION_NAME\",", "(i, cell) in enumerate(row): if cell.Value is not None: data[titles[i]]", "\"EVENING_PHONE\", \"EMAIL\": \"EMAIL\", \"MOBILE\": \"MOBILE\", \"LEVEL_TAUGHT\": \"LEVEL_TAUGHT\", \"LEVEL_OF_EDUCATION\": \"EDUCATION_LEVEL\", \"FIELD_OF_EDUCATION\":", "graphcool import required\" for x in dicts: x[\"_typeName\"] = type_name", "\"SUBURB\", \"SCHOOL_S_STATE\": \"STATE\", \"SCHOOL_S_POSTCODE\": \"POSTCODE\", } LOCATION_FIELDS = {\"LOCATION_NAME\": \"NAME\",", "data_only=True) for sheet in work_book: rows = [x for x", "\"nodes\", \"values\": one_list } f.write(json.dumps(nodes)) def write_relations(list_of_lists): for (i, one_list)", "schools, locations, teachers, students) def copy_without(dicts, *keys_to_remove): \"Return iterable that", "teacher[\"id\"], \"schools\", \"ClpSchool\", school_keys[schoolId], \"teachers\") for teacher in teachers for", "def copy_without(dicts, *keys_to_remove): \"Return iterable that contains copies of the", "dicts): \"Inject the required fields that graphcool import required\" for", "\"schools\", \"ClpSchool\", x[\"id\"], \"organisation\") for x in schools] # Build", "\"ORGANISATION_ID\": \"CLP_ORGANISATION_ID\", \"SCHOOL_S_ADDRESS1\": \"ADDRESS\", \"SCHOOL_S_SUBURB\": \"SUBURB\", \"SCHOOL_S_STATE\": \"STATE\", \"SCHOOL_S_POSTCODE\": \"POSTCODE\",", "from l.\"\"\" chunk_size = 1 + len(l) // n for", "raw_students): return ( prepare_organisations(raw_organisations), prepare_schools(raw_schools), prepare_locations(raw_locations), prepare_teachers(raw_teachers), prepare_students(raw_students) ) def", "\"STUDENT_LNAME\": \"FAMILY_NAME\", \"STUDENT_FNAME\": \"GIVEN_NAMES\", \"DOB\": \"DATE_OF_BIRTH\", \"TEL\": \"PHONE\", \"LOCATION_NAME_1\": \"DAY_SCHOOL\",", "rows: titles = [cell.value for cell in rows[0]] dicts =", "with that id, or add the new teacher record teacher", "relative_to_absolute(os.path.join(EXTRACT_OUTPUT_DIR + str(i), \"nodes\")) os.makedirs(nodes_dir, exist_ok=True) path = os.path.join(nodes_dir, \"1.json\")", "row_as_dict[k] return data def process_sheet(sheet, titles, field_defns): if titles !=", "\"\"\"Yield n successive similar-sized chunks from l.\"\"\" chunk_size = 1", "extract_from_xlsx(xlsx_path) (organisations, schools, locations, teachers, students) = prepare(*raw_collections) write_nodes( organisations,", "\"GIVEN_NAMES\", \"TEACHER_LANGUAGES\": \"LANGUAGES\", \"P_ADDRESS1\": \"ADDRESS1\", \"P_ADDRESS2\": \"ADDRESS2\", \"P_SUBURB\": \"SUBURB\", \"P_STATE\":", "def prepare_students(students): uniques = unique(\"clpStudentId\", students) injected = inject_required(\"ClpStudent\", uniques)", "have to collect all the schools that the same teacher", "school. # We have to collect all the schools that", "location = uniques.setdefault(x[\"name\"], x) related_schools = location.setdefault(\"schools\", list()) related_schools.append(x.pop(\"clpSchoolId\")) injected", "field2} ] def generate_relations(organisations, schools, locations, teachers, students): # Build", "\"ADDRESS\", \"S_SUBURB\": \"SUBURB\", \"S_STATE\": \"STATE\", \"S_POSTCODE\": \"POSTCODE\", } SCHOOL_FIELDS =", "cuid.cuid() x[\"createdAt\"] = x[\"updatedAt\"] = now_as_iso8601() return list(dicts) def prepare_organisations(organisations):", "extract_from_xlsx(file_path): for sheet in convert_xlsx(file_path): if sheet.name == \"SCHOOL-ORG\": (organisations,", "l.\"\"\" chunk_size = 1 + len(l) // n for i", "\"S_POSTCODE\", \"SCHOOL_NAME\", \"SCH_ELECTORATE\", \"SCHOOL_ID\", \"SCHOOL_P_ADDRESS1\", \"SCHOOL_P_SUBURB\", \"SCHOOL_P_STATE\", \"SCHOOL_P_POSTCODE\", \"SCHOOL_S_ADDRESS1\", \"SCHOOL_S_SUBURB\",", "contents of one sheet within an excel spreadsheet\" def __init__(self,", "= [x.copy() for x in dicts] for d in copies:", "that the same teacher is teaching at. uniques = {}", "def convert_xlsx(xlsx_file): \"\"\"Convert the given XLSX spreadsheet to iterable of", "\"CLP_ORGANISATION_ID\", \"SCHOOL_S_ADDRESS1\": \"ADDRESS\", \"SCHOOL_S_SUBURB\": \"SUBURB\", \"SCHOOL_S_STATE\": \"STATE\", \"SCHOOL_S_POSTCODE\": \"POSTCODE\", }", "\"STUDENT_FNAME\": \"GIVEN_NAMES\", \"DOB\": \"DATE_OF_BIRTH\", \"TEL\": \"PHONE\", \"LOCATION_NAME_1\": \"DAY_SCHOOL\", } class", "organisations} yield [make_relation(\"ClpOrganisation\", org_keys[x[\"clpOrganisationId\"]], \"schools\", \"ClpSchool\", x[\"id\"], \"organisation\") for x", "\"CLP_STUDENT_ID\", \"STUDENT_SRN\": \"SRN\", \"LOCATION_NAME\": \"LOCATION\", \"STUDENT_LNAME\": \"FAMILY_NAME\", \"STUDENT_FNAME\": \"GIVEN_NAMES\", \"DOB\":", "def __init__(self, name, titles=None, rows=None): self.name = name self.titles =", "(i, x) in enumerate(s.split(\"_\"))] return \"\".join(bits) def relative_to_absolute(relative_path): path_to_py =", "locations, teachers, students) def copy_without(dicts, *keys_to_remove): \"Return iterable that contains", "name, titles=None, rows=None): self.name = name self.titles = titles or", "\"organisation\") for x in schools] # Build location -> school", "\"id\": id2, \"fieldName\": field2} ] def generate_relations(organisations, schools, locations, teachers,", "\"LEVEL_OF_EDUCATION\", \"FIELD_OF_EDUCATION\", \"DEGREE_COUNTRY\", \"DEGREE_YEAR\", \"ORGANISATION_ID\", \"SCHOOL_ID\"] STUDENT_TITLES = [\"SCHOOL_NAME\", \"SCHOOL_ID\",", "\"S_STATE\", \"S_POSTCODE\", \"SCHOOL_NAME\", \"SCH_ELECTORATE\", \"SCHOOL_ID\", \"SCHOOL_P_ADDRESS1\", \"SCHOOL_P_SUBURB\", \"SCHOOL_P_STATE\", \"SCHOOL_P_POSTCODE\", \"SCHOOL_S_ADDRESS1\",", "in keys_to_remove: d.pop(to_remove, None) return copies def write_nodes(*list_of_lists): for (i,", "in students if student[\"schoolId\"] in school_keys] def main(): xlsx_path =", "\"SCHOOL_NAME\": \"SCHOOL_NAME\", \"TITLE\": \"TITLE\", \"LNAME\": \"FAMILY_NAME\", \"FNAME\": \"GIVEN_NAMES\", \"TEACHER_LANGUAGES\": \"LANGUAGES\",", "list(dicts) def prepare_organisations(organisations): unique_orgs = unique(\"clpOrganisationId\", organisations) fat_orgs = inject_required(\"ClpOrganisation\",", "x[\"id\"] for x in organisations} yield [make_relation(\"ClpOrganisation\", org_keys[x[\"clpOrganisationId\"]], \"schools\", \"ClpSchool\",", "for x in dicts} return t.values() def now_as_iso8601(): return datetime.datetime.now().replace(microsecond=0).isoformat()", "row_as_dict): data = {} for (k, v) in fields.items(): data[to_camel(v)]", "\"schoolId\", \"schoolName\", \"location\"))) write_relations(generate_relations(organisations, schools, locations, teachers, students)) if __name__", "\"P_ADDRESS1\", \"P_SUBURB\", \"P_STATE\", \"P_POSTCODE\", \"S_ADDRESS1\", \"S_SUBURB\", \"S_STATE\", \"S_POSTCODE\", \"SCHOOL_NAME\", \"SCH_ELECTORATE\",", "(students, ) = process_sheet(sheet, STUDENT_TITLES, [STUDENT_FIELDS]) else: print(\"Ignoring sheet:\", sheet.name)", "unique(\"clpOrganisationId\", organisations) fat_orgs = inject_required(\"ClpOrganisation\", unique_orgs) return fat_orgs def prepare_schools(schools):", "print(\"Sheet doesn't have expected titles:\", [(i, x) for (i, x)", "\"ADDRESS\", \"SCHOOL_S_SUBURB\": \"SUBURB\", \"SCHOOL_S_STATE\": \"STATE\", \"SCHOOL_S_POSTCODE\": \"POSTCODE\", } LOCATION_FIELDS =", "cuid import os.path import json import datetime from openpyxl import", "\"EDUCATION_YEAR\", \"ORGANISATION_ID\": \"ORGANISATION_ID\", \"SCHOOL_ID\": \"SCHOOL_ID\", } STUDENT_FIELDS = {\"SCHOOL_NAME\": \"SCHOOL_NAME\",", "SCHOOL_TITLES, [ORGANISATION_FIELDS, SCHOOL_FIELDS, LOCATION_FIELDS]) elif sheet.name == \"Teacher\": (teachers, )", "\"1.json\") with open(path, \"w\") as f: nodes = { \"valueType\":", "in teacher.get(\"schools\", [])] # Build student -> school relations yield", "return datetime.datetime.now().replace(microsecond=0).isoformat() + \"Z\" def inject_required(type_name, dicts): \"Inject the required", "that id, or add the new teacher record teacher =", "in dicts} return t.values() def now_as_iso8601(): return datetime.datetime.now().replace(microsecond=0).isoformat() + \"Z\"", "uniques) for x in injected: x[\"dateOfBirth\"] = convert_dob_to_datetime(x[\"dateOfBirth\"]) return injected", "x for x in dicts} return t.values() def now_as_iso8601(): return", "l): \"\"\"Yield n successive similar-sized chunks from l.\"\"\" chunk_size =", "org_keys[x[\"clpOrganisationId\"]], \"schools\", \"ClpSchool\", x[\"id\"], \"organisation\") for x in schools] #", "dictionary with all the given keys removed\" copies = [x.copy()", "injected def extract_from_xlsx(file_path): for sheet in convert_xlsx(file_path): if sheet.name ==", "= os.path.join(nodes_dir, \"1.json\") with open(path, \"w\") as f: nodes =", "school_keys = {x[\"clpSchoolId\"]: x[\"id\"] for x in schools} yield [make_relation(\"ClpLocation\",", "XLSX spreadsheet to iterable of Sheet objects, in which row", "cuid.cuid() return injected def convert_dob_to_datetime(s): \"Convert the string from 99/MON/YY", "multiple records, # each of which is identitical except that", "FIX THIS - Current extract doesn't include the CLP location", "convert_xlsx(xlsx_file): \"\"\"Convert the given XLSX spreadsheet to iterable of Sheet", "uniques = {} for x in locations: # get an", "} SCHOOL_FIELDS = {\"SCHOOL_NAME\": \"NAME\", \"SCH_ELECTORATE\": \"ELECTORATE\", \"SCHOOL_ID\": \"CLP_SCHOOL_ID\", \"ORGANISATION_ID\":", "SCHOOL_TITLES = [\"ORGANISATION_ID\", \"ORGANISATION_NAME\", \"ORG_ELECTORATE\", \"P_ADDRESS1\", \"P_SUBURB\", \"P_STATE\", \"P_POSTCODE\", \"S_ADDRESS1\",", "def now_as_iso8601(): return datetime.datetime.now().replace(microsecond=0).isoformat() + \"Z\" def inject_required(type_name, dicts): \"Inject", "doesn't include the CLP location id :( Make one up", "{x[\"clpSchoolId\"]: x[\"id\"] for x in schools} yield [make_relation(\"ClpLocation\", location[\"id\"], \"schools\",", "in enumerate(list_of_lists): nodes_dir = relative_to_absolute(os.path.join(EXTRACT_OUTPUT_DIR + str(i), \"nodes\")) os.makedirs(nodes_dir, exist_ok=True)", "\"SCHOOL_NAME\", \"SCH_ELECTORATE\", \"SCHOOL_ID\", \"SCHOOL_P_ADDRESS1\", \"SCHOOL_P_SUBURB\", \"SCHOOL_P_STATE\", \"SCHOOL_P_POSTCODE\", \"SCHOOL_S_ADDRESS1\", \"SCHOOL_S_SUBURB\", \"SCHOOL_S_STATE\",", "student -> school relations yield [make_relation(\"ClpStudent\", student[\"id\"], \"school\", \"ClpSchool\", school_keys[student[\"schoolId\"]],", "\"TELEPHONE\": \"DAY_PHONE\", \"TEL_EVENING\": \"EVENING_PHONE\", \"EMAIL\": \"EMAIL\", \"MOBILE\": \"MOBILE\", \"LEVEL_TAUGHT\": \"LEVEL_TAUGHT\",", "yield Sheet(sheet.title) def to_camel(s): \"\"\"Convert an underscored title into camel", "uniques.values()) return injected def extract_from_xlsx(file_path): for sheet in convert_xlsx(file_path): if", "} f.write(json.dumps(nodes)) def chunks(n, l): \"\"\"Yield n successive similar-sized chunks", "for x in organisations} yield [make_relation(\"ClpOrganisation\", org_keys[x[\"clpOrganisationId\"]], \"schools\", \"ClpSchool\", x[\"id\"],", "import required\" for x in dicts: x[\"_typeName\"] = type_name x[\"id\"]", "load_workbook import cuid # https://github.com/necaris/cuid.py - create uuid's in the", "\"ClpSchool\", school_keys[student[\"schoolId\"]], \"students\") for student in students if student[\"schoolId\"] in", "def prepare_schools(schools): uniques = unique(\"clpSchoolId\", schools) injected = inject_required(\"ClpSchool\", uniques)", "{ \"valueType\": \"nodes\", \"values\": one_list } f.write(json.dumps(nodes)) def write_relations(list_of_lists): for", "schools that the same teacher is teaching at. uniques =", "one_list } f.write(json.dumps(nodes)) def write_relations(list_of_lists): for (i, one_list) in enumerate(list_of_lists):", "add the new teacher record teacher = uniques.setdefault(x[\"clpTeacherId\"], x) related_schools", "= row_as_dict[k] return data def process_sheet(sheet, titles, field_defns): if titles", "= inject_required(\"ClpLocation\", uniques.values()) # FIX THIS - Current extract doesn't", "\"DEGREE_COUNTRY\": \"EDUCATION_COUNTRY\", \"DEGREE_YEAR\": \"EDUCATION_YEAR\", \"ORGANISATION_ID\": \"ORGANISATION_ID\", \"SCHOOL_ID\": \"SCHOOL_ID\", } STUDENT_FIELDS", "\"ADDRESS2\", \"P_SUBURB\": \"SUBURB\", \"P_STATE\": \"STATE\", \"P_POSTCODE\": \"POSTCODE\", \"TELEPHONE\": \"DAY_PHONE\", \"TEL_EVENING\":", "[make_relation(\"ClpOrganisation\", org_keys[x[\"clpOrganisationId\"]], \"schools\", \"ClpSchool\", x[\"id\"], \"organisation\") for x in schools]", "yield [make_relation(\"ClpTeacher\", teacher[\"id\"], \"schools\", \"ClpSchool\", school_keys[schoolId], \"teachers\") for teacher in", "yield [make_relation(\"ClpOrganisation\", org_keys[x[\"clpOrganisationId\"]], \"schools\", \"ClpSchool\", x[\"id\"], \"organisation\") for x in", "rows = [x for x in sheet.iter_rows()] if rows: titles", "extract(fields, row_as_dict): data = {} for (k, v) in fields.items():", "return structs def unique(key, dicts): t = {x[key]: x for", "or [] def convert_row_to_dict(titles, row): data = {} for (i,", "microseconds, hence the \".0\" def prepare_students(students): uniques = unique(\"clpStudentId\", students)", "locations) = process_sheet( sheet, SCHOOL_TITLES, [ORGANISATION_FIELDS, SCHOOL_FIELDS, LOCATION_FIELDS]) elif sheet.name", "the format that graphcool expects SOURCE_XLSX = \"./data/CLP_combined.xlsx\" EXTRACT_OUTPUT_DIR =", "include the CLP location id :( Make one up for", "rows or [] def convert_row_to_dict(titles, row): data = {} for", "copy_without(schools, \"clpOrganisationId\"), copy_without(locations, \"schools\"), copy_without(teachers, \"organisationId\", \"organisationName\", \"schools\", \"schoolName\"), *chunks(3,", "titles=None, rows=None): self.name = name self.titles = titles or []", "= type_name x[\"id\"] = cuid.cuid() x[\"createdAt\"] = x[\"updatedAt\"] = now_as_iso8601()", "\"ORGANISATION_NAME\", \"ORG_ELECTORATE\", \"P_ADDRESS1\", \"P_SUBURB\", \"P_STATE\", \"P_POSTCODE\", \"S_ADDRESS1\", \"S_SUBURB\", \"S_STATE\", \"S_POSTCODE\",", "\"ELECTORATE\", \"SCHOOL_ID\": \"CLP_SCHOOL_ID\", \"LOC_S_ADDRESS1\": \"ADDRESS\", \"LOC_S_SUBURB\": \"SUBURB\", \"LOC_S_STATE\": \"STATE\", \"LOC_S_POSTCODE\":", "title into camel case. 'PARENT_ORGANISATION_ID' => 'parentOrganisationId'\"\"\" bits = [(x.lower()", "[(x.lower() if i == 0 else x.title()) for (i, x)", "fat_orgs = inject_required(\"ClpOrganisation\", unique_orgs) return fat_orgs def prepare_schools(schools): uniques =", "= relative_to_absolute(SOURCE_XLSX) raw_collections = extract_from_xlsx(xlsx_path) (organisations, schools, locations, teachers, students)", "EXTRACT_OUTPUT_DIR = \"../server/extract\" SCHOOL_TITLES = [\"ORGANISATION_ID\", \"ORGANISATION_NAME\", \"ORG_ELECTORATE\", \"P_ADDRESS1\", \"P_SUBURB\",", "keys_to_remove: d.pop(to_remove, None) return copies def write_nodes(*list_of_lists): for (i, one_list)", "organisations) fat_orgs = inject_required(\"ClpOrganisation\", unique_orgs) return fat_orgs def prepare_schools(schools): uniques", "x in dicts: x[\"_typeName\"] = type_name x[\"id\"] = cuid.cuid() x[\"createdAt\"]", "\"ClpSchool\", school_keys[schoolId], \"locations\") for location in locations for schoolId in", "\"Convert the string from 99/MON/YY to a ISO date\" dt", "x != sheet.titles[i]]) return [] structs = [[extract(defn, x) for", "except that for being related to a different school. #", "path = os.path.join(nodes_dir, \"1.json\") with open(path, \"w\") as f: nodes", "{} for (k, v) in fields.items(): data[to_camel(v)] = row_as_dict[k] return", "existing location with the given name, or add the new", "# pip install cuid import os.path import json import datetime", "that contains copies of the given dictionary with all the", "different school. # We have to collect all the schools", "hence the \".0\" def prepare_students(students): uniques = unique(\"clpStudentId\", students) injected", "enumerate(row): if cell.Value is not None: data[titles[i]] = str(cell.value) return", "\"S_SUBURB\", \"S_STATE\", \"S_POSTCODE\", \"SCHOOL_NAME\", \"SCH_ELECTORATE\", \"SCHOOL_ID\", \"SCHOOL_P_ADDRESS1\", \"SCHOOL_P_SUBURB\", \"SCHOOL_P_STATE\", \"SCHOOL_P_POSTCODE\",", "a ISO date\" dt = datetime.datetime.strptime(s, \"%d/%b/%y\") return dt.isoformat() +", "of the given dictionary with all the given keys removed\"", "\"SCHOOL_S_ADDRESS1\": \"ADDRESS\", \"SCHOOL_S_SUBURB\": \"SUBURB\", \"SCHOOL_S_STATE\": \"STATE\", \"SCHOOL_S_POSTCODE\": \"POSTCODE\", } LOCATION_FIELDS", "= [\"TEACHER_ID\", \"ORGANISATION_NAME\", \"SCHOOL_NAME\", \"TEACHER_NAME\", \"TITLE\", \"LNAME\", \"FNAME\", \"TEACHER_LANGUAGES\", \"P_ADDRESS1\",", "\"LNAME\", \"FNAME\", \"TEACHER_LANGUAGES\", \"P_ADDRESS1\", \"P_ADDRESS2\", \"P_SUBURB\", \"P_STATE\", \"P_POSTCODE\", \"TELEPHONE\", \"TEL_EVENING\",", "\"ORGANISATION_NAME\": \"ORGANISATION_NAME\", \"SCHOOL_NAME\": \"SCHOOL_NAME\", \"TITLE\": \"TITLE\", \"LNAME\": \"FAMILY_NAME\", \"FNAME\": \"GIVEN_NAMES\",", "dicts): t = {x[key]: x for x in dicts} return", "inject_required(\"ClpOrganisation\", unique_orgs) return fat_orgs def prepare_schools(schools): uniques = unique(\"clpSchoolId\", schools)", "unique(\"clpStudentId\", students) injected = inject_required(\"ClpStudent\", uniques) for x in injected:", "prepare_teachers(teachers): # Like locations, the same teacher can have multiple", "in schools} yield [make_relation(\"ClpLocation\", location[\"id\"], \"schools\", \"ClpSchool\", school_keys[schoolId], \"locations\") for", "\"LOC_S_POSTCODE\"] ORGANISATION_FIELDS = {\"ORGANISATION_ID\": \"CLP_ORGANISATION_ID\", \"ORGANISATION_NAME\": \"NAME\", \"ORG_ELECTORATE\": \"ELECTORATE\", \"S_ADDRESS1\":", "teacher -> school relations yield [make_relation(\"ClpTeacher\", teacher[\"id\"], \"schools\", \"ClpSchool\", school_keys[schoolId],", "teacher in teachers for schoolId in teacher.get(\"schools\", [])] # Build", "\"LOC_S_STATE\": \"STATE\", \"LOC_S_POSTCODE\": \"POSTCODE\"} TEACHER_TITLES = [\"TEACHER_ID\", \"ORGANISATION_NAME\", \"SCHOOL_NAME\", \"TEACHER_NAME\",", "in range(0, len(l), chunk_size): yield l[i:i + chunk_size] def prepare(raw_organisations,", "openpyxl import load_workbook import cuid # https://github.com/necaris/cuid.py - create uuid's", "\"SUBURB\", \"S_STATE\": \"STATE\", \"S_POSTCODE\": \"POSTCODE\", } SCHOOL_FIELDS = {\"SCHOOL_NAME\": \"NAME\",", "objects, in which row has been converted into a dictionary\"\"\"", "for schoolId in location.get(\"schools\", [])] # Build teacher -> school", "\"Z\" def inject_required(type_name, dicts): \"Inject the required fields that graphcool", "= now_as_iso8601() return list(dicts) def prepare_organisations(organisations): unique_orgs = unique(\"clpOrganisationId\", organisations)", "\"STATE\", \"LOC_S_POSTCODE\": \"POSTCODE\"} TEACHER_TITLES = [\"TEACHER_ID\", \"ORGANISATION_NAME\", \"SCHOOL_NAME\", \"TEACHER_NAME\", \"TITLE\",", "now_as_iso8601(): return datetime.datetime.now().replace(microsecond=0).isoformat() + \"Z\" def inject_required(type_name, dicts): \"Inject the", "which row has been converted into a dictionary\"\"\" work_book =", "x) related_schools = teacher.setdefault(\"schools\", list()) related_schools.append(x.pop(\"schoolId\")) injected = inject_required(\"ClpTeacher\", uniques.values())", "\"relations\")) os.makedirs(nodes_dir, exist_ok=True) path = os.path.join(nodes_dir, \"1.json\") with open(path, \"w\")", "- Current extract doesn't include the CLP location id :(", "\"GIVEN_NAMES\", \"DOB\": \"DATE_OF_BIRTH\", \"TEL\": \"PHONE\", \"LOCATION_NAME_1\": \"DAY_SCHOOL\", } class Sheet:", "{\"_typeName\": entity2, \"id\": id2, \"fieldName\": field2} ] def generate_relations(organisations, schools,", "{} for x in locations: # get an existing location", "[\"TEACHER_ID\", \"ORGANISATION_NAME\", \"SCHOOL_NAME\", \"TEACHER_NAME\", \"TITLE\", \"LNAME\", \"FNAME\", \"TEACHER_LANGUAGES\", \"P_ADDRESS1\", \"P_ADDRESS2\",", "yield [make_relation(\"ClpStudent\", student[\"id\"], \"school\", \"ClpSchool\", school_keys[student[\"schoolId\"]], \"students\") for student in", "\"P_ADDRESS1\": \"ADDRESS1\", \"P_ADDRESS2\": \"ADDRESS2\", \"P_SUBURB\": \"SUBURB\", \"P_STATE\": \"STATE\", \"P_POSTCODE\": \"POSTCODE\",", "= process_sheet(sheet, TEACHER_TITLES, [TEACHER_FIELDS]) elif sheet.name == \"Student\": (students, )", "\"TITLE\": \"TITLE\", \"LNAME\": \"FAMILY_NAME\", \"FNAME\": \"GIVEN_NAMES\", \"TEACHER_LANGUAGES\": \"LANGUAGES\", \"P_ADDRESS1\": \"ADDRESS1\",", "\"TITLE\", \"LNAME\", \"FNAME\", \"TEACHER_LANGUAGES\", \"P_ADDRESS1\", \"P_ADDRESS2\", \"P_SUBURB\", \"P_STATE\", \"P_POSTCODE\", \"TELEPHONE\",", "field2): return [ {\"_typeName\": entity1, \"id\": id1, \"fieldName\": field1}, {\"_typeName\":", "\"FIELD_OF_EDUCATION\": \"EDUCATION_FIELD\", \"DEGREE_COUNTRY\": \"EDUCATION_COUNTRY\", \"DEGREE_YEAR\": \"EDUCATION_YEAR\", \"ORGANISATION_ID\": \"ORGANISATION_ID\", \"SCHOOL_ID\": \"SCHOOL_ID\",", "import json import datetime from openpyxl import load_workbook import cuid", "= unique(\"clpSchoolId\", schools) injected = inject_required(\"ClpSchool\", uniques) return injected def", "Build student -> school relations yield [make_relation(\"ClpStudent\", student[\"id\"], \"school\", \"ClpSchool\",", "nodes_dir = relative_to_absolute(os.path.join(EXTRACT_OUTPUT_DIR + \"-relations\" + str(i), \"relations\")) os.makedirs(nodes_dir, exist_ok=True)", "for location in locations for schoolId in location.get(\"schools\", [])] #", "x in schools] # Build location -> school relations school_keys", "titles = [cell.value for cell in rows[0]] dicts = [convert_row_to_dict(titles,", "prepare_schools(raw_schools), prepare_locations(raw_locations), prepare_teachers(raw_teachers), prepare_students(raw_students) ) def make_relation(entity1, id1, field1, entity2,", "organisations, copy_without(schools, \"clpOrganisationId\"), copy_without(locations, \"schools\"), copy_without(teachers, \"organisationId\", \"organisationName\", \"schools\", \"schoolName\"),", "inject_required(\"ClpLocation\", uniques.values()) # FIX THIS - Current extract doesn't include", "def extract(fields, row_as_dict): data = {} for (k, v) in", "x in injected: x[\"dateOfBirth\"] = convert_dob_to_datetime(x[\"dateOfBirth\"]) return injected def prepare_teachers(teachers):", "\"P_SUBURB\", \"P_STATE\", \"P_POSTCODE\", \"TELEPHONE\", \"TEL_EVENING\", \"EMAIL\", \"MOBILE\", \"LEVEL_TAUGHT\", \"LEVEL_OF_EDUCATION\", \"FIELD_OF_EDUCATION\",", "contains copies of the given dictionary with all the given", "\"STATE\", \"S_POSTCODE\": \"POSTCODE\", } SCHOOL_FIELDS = {\"SCHOOL_NAME\": \"NAME\", \"SCH_ELECTORATE\": \"ELECTORATE\",", "the schools that the same teacher is teaching at. uniques", "[\"SCHOOL_NAME\", \"SCHOOL_ID\", \"STUDENT_ID\", \"STUDENT_SRN\", \"LOCATION_NAME\", \"STUDENT_LNAME\", \"STUDENT_FNAME\", \"DOB\", \"TEL\", \"LOCATION_NAME_1\"]", "[] def convert_row_to_dict(titles, row): data = {} for (i, cell)", "data = {} for (k, v) in fields.items(): data[to_camel(v)] =", "all the schools that the same teacher is teaching at.", "location[\"id\"], \"schools\", \"ClpSchool\", school_keys[schoolId], \"locations\") for location in locations for", "= {\"SCHOOL_NAME\": \"NAME\", \"SCH_ELECTORATE\": \"ELECTORATE\", \"SCHOOL_ID\": \"CLP_SCHOOL_ID\", \"ORGANISATION_ID\": \"CLP_ORGANISATION_ID\", \"SCHOOL_S_ADDRESS1\":", "\"PHONE\", \"LOCATION_NAME_1\": \"DAY_SCHOOL\", } class Sheet: \"Data container object to", "relative_to_absolute(relative_path): path_to_py = os.path.abspath(os.path.dirname(__file__)) return os.path.join(path_to_py, relative_path) def extract(fields, row_as_dict):", "= {} for (k, v) in fields.items(): data[to_camel(v)] = row_as_dict[k]", "for x in injected: x[\"clpLocationId\"] = cuid.cuid() return injected def", "\"FIELD_OF_EDUCATION\", \"DEGREE_COUNTRY\", \"DEGREE_YEAR\", \"ORGANISATION_ID\", \"SCHOOL_ID\"] STUDENT_TITLES = [\"SCHOOL_NAME\", \"SCHOOL_ID\", \"STUDENT_ID\",", "= {} for x in locations: # get an existing", "uniques.setdefault(x[\"clpTeacherId\"], x) related_schools = teacher.setdefault(\"schools\", list()) related_schools.append(x.pop(\"schoolId\")) injected = inject_required(\"ClpTeacher\",", "in dicts] for d in copies: for to_remove in keys_to_remove:", "i in range(0, len(l), chunk_size): yield l[i:i + chunk_size] def", "the contents of one sheet within an excel spreadsheet\" def", "uniques) return injected def prepare_locations(locations): # There are multiple locations,", "\"../server/extract\" SCHOOL_TITLES = [\"ORGANISATION_ID\", \"ORGANISATION_NAME\", \"ORG_ELECTORATE\", \"P_ADDRESS1\", \"P_SUBURB\", \"P_STATE\", \"P_POSTCODE\",", "(organisations, schools, locations, teachers, students) = prepare(*raw_collections) write_nodes( organisations, copy_without(schools,", "\"EDUCATION_COUNTRY\", \"DEGREE_YEAR\": \"EDUCATION_YEAR\", \"ORGANISATION_ID\": \"ORGANISATION_ID\", \"SCHOOL_ID\": \"SCHOOL_ID\", } STUDENT_FIELDS =", "return copies def write_nodes(*list_of_lists): for (i, one_list) in enumerate(list_of_lists): nodes_dir", "\"STATE\", \"SCHOOL_S_POSTCODE\": \"POSTCODE\", } LOCATION_FIELDS = {\"LOCATION_NAME\": \"NAME\", \"LOC_ELECTORATE\": \"ELECTORATE\",", "or [] self.rows = rows or [] def convert_row_to_dict(titles, row):", "write_nodes( organisations, copy_without(schools, \"clpOrganisationId\"), copy_without(locations, \"schools\"), copy_without(teachers, \"organisationId\", \"organisationName\", \"schools\",", "\"SCHOOL_S_POSTCODE\": \"POSTCODE\", } LOCATION_FIELDS = {\"LOCATION_NAME\": \"NAME\", \"LOC_ELECTORATE\": \"ELECTORATE\", \"SCHOOL_ID\":", "in teachers for schoolId in teacher.get(\"schools\", [])] # Build student", "\"fieldName\": field2} ] def generate_relations(organisations, schools, locations, teachers, students): #", "\"P_POSTCODE\", \"S_ADDRESS1\", \"S_SUBURB\", \"S_STATE\", \"S_POSTCODE\", \"SCHOOL_NAME\", \"SCH_ELECTORATE\", \"SCHOOL_ID\", \"SCHOOL_P_ADDRESS1\", \"SCHOOL_P_SUBURB\",", "datetime.datetime.strptime(s, \"%d/%b/%y\") return dt.isoformat() + \".0Z\" # GraphCool import insists", "\"STUDENT_ID\": \"CLP_STUDENT_ID\", \"STUDENT_SRN\": \"SRN\", \"LOCATION_NAME\": \"LOCATION\", \"STUDENT_LNAME\": \"FAMILY_NAME\", \"STUDENT_FNAME\": \"GIVEN_NAMES\",", "n for i in range(0, len(l), chunk_size): yield l[i:i +", "student[\"schoolId\"] in school_keys] def main(): xlsx_path = relative_to_absolute(SOURCE_XLSX) raw_collections =", "\"SCHOOL_S_SUBURB\": \"SUBURB\", \"SCHOOL_S_STATE\": \"STATE\", \"SCHOOL_S_POSTCODE\": \"POSTCODE\", } LOCATION_FIELDS = {\"LOCATION_NAME\":", "\"STUDENT_SRN\", \"LOCATION_NAME\", \"STUDENT_LNAME\", \"STUDENT_FNAME\", \"DOB\", \"TEL\", \"LOCATION_NAME_1\"] TEACHER_FIELDS = {\"TEACHER_ID\":", "an existing location with the given name, or add the", "\"LEVEL_TAUGHT\": \"LEVEL_TAUGHT\", \"LEVEL_OF_EDUCATION\": \"EDUCATION_LEVEL\", \"FIELD_OF_EDUCATION\": \"EDUCATION_FIELD\", \"DEGREE_COUNTRY\": \"EDUCATION_COUNTRY\", \"DEGREE_YEAR\": \"EDUCATION_YEAR\",", ") = process_sheet(sheet, TEACHER_TITLES, [TEACHER_FIELDS]) elif sheet.name == \"Student\": (students,", "\"ORGANISATION_ID\", \"SCHOOL_ID\"] STUDENT_TITLES = [\"SCHOOL_NAME\", \"SCHOOL_ID\", \"STUDENT_ID\", \"STUDENT_SRN\", \"LOCATION_NAME\", \"STUDENT_LNAME\",", "= load_workbook(filename=xlsx_file, read_only=True, data_only=True) for sheet in work_book: rows =", "bits = [(x.lower() if i == 0 else x.title()) for", "insists on microseconds, hence the \".0\" def prepare_students(students): uniques =", "json import datetime from openpyxl import load_workbook import cuid #", "\"organisationId\", \"organisationName\", \"schools\", \"schoolName\"), *chunks(3, copy_without(students, \"schoolId\", \"schoolName\", \"location\"))) write_relations(generate_relations(organisations,", "datetime.datetime.now().replace(microsecond=0).isoformat() + \"Z\" def inject_required(type_name, dicts): \"Inject the required fields", "= uniques.setdefault(x[\"clpTeacherId\"], x) related_schools = teacher.setdefault(\"schools\", list()) related_schools.append(x.pop(\"schoolId\")) injected =", "teacher = uniques.setdefault(x[\"clpTeacherId\"], x) related_schools = teacher.setdefault(\"schools\", list()) related_schools.append(x.pop(\"schoolId\")) injected", "-> school relations school_keys = {x[\"clpSchoolId\"]: x[\"id\"] for x in", "teachers, students): # Build school -> organisation relations org_keys =", ") = process_sheet(sheet, STUDENT_TITLES, [STUDENT_FIELDS]) else: print(\"Ignoring sheet:\", sheet.name) return", "locations, the same teacher can have multiple records, # each", "\"fieldName\": field1}, {\"_typeName\": entity2, \"id\": id2, \"fieldName\": field2} ] def", "rows[0]] dicts = [convert_row_to_dict(titles, row) for row in rows[1:]] yield", "defn in field_defns] return structs def unique(key, dicts): t =", "copy_without(locations, \"schools\"), copy_without(teachers, \"organisationId\", \"organisationName\", \"schools\", \"schoolName\"), *chunks(3, copy_without(students, \"schoolId\",", "work_book = load_workbook(filename=xlsx_file, read_only=True, data_only=True) for sheet in work_book: rows", "== 0 else x.title()) for (i, x) in enumerate(s.split(\"_\"))] return", "= inject_required(\"ClpSchool\", uniques) return injected def prepare_locations(locations): # There are", "related_schools = location.setdefault(\"schools\", list()) related_schools.append(x.pop(\"clpSchoolId\")) injected = inject_required(\"ClpLocation\", uniques.values()) #", "\"SCHOOL_S_STATE\", \"SCHOOL_S_POSTCODE\", \"LOCATION_NAME\", \"LOC_ELECTORATE\", \"LOC_S_ADDRESS1\", \"LOC_S_SUBURB\", \"LOC_S_STATE\", \"LOC_S_POSTCODE\"] ORGANISATION_FIELDS =", "x[\"clpLocationId\"] = cuid.cuid() return injected def convert_dob_to_datetime(s): \"Convert the string", "titles:\", [(i, x) for (i, x) in enumerate(titles) if x", "injected def convert_dob_to_datetime(s): \"Convert the string from 99/MON/YY to a", "is teaching at. uniques = {} for x in teachers:", "chunk_size] def prepare(raw_organisations, raw_schools, raw_locations, raw_teachers, raw_students): return ( prepare_organisations(raw_organisations),", "expects SOURCE_XLSX = \"./data/CLP_combined.xlsx\" EXTRACT_OUTPUT_DIR = \"../server/extract\" SCHOOL_TITLES = [\"ORGANISATION_ID\",", "-> school relations yield [make_relation(\"ClpStudent\", student[\"id\"], \"school\", \"ClpSchool\", school_keys[student[\"schoolId\"]], \"students\")", "# We have to collect all the schools that the", "99/MON/YY to a ISO date\" dt = datetime.datetime.strptime(s, \"%d/%b/%y\") return", "= extract_from_xlsx(xlsx_path) (organisations, schools, locations, teachers, students) = prepare(*raw_collections) write_nodes(", "\"SCHOOL_ID\", \"STUDENT_ID\": \"CLP_STUDENT_ID\", \"STUDENT_SRN\": \"SRN\", \"LOCATION_NAME\": \"LOCATION\", \"STUDENT_LNAME\": \"FAMILY_NAME\", \"STUDENT_FNAME\":", "= datetime.datetime.strptime(s, \"%d/%b/%y\") return dt.isoformat() + \".0Z\" # GraphCool import", "convert_dob_to_datetime(x[\"dateOfBirth\"]) return injected def prepare_teachers(teachers): # Like locations, the same", "student[\"id\"], \"school\", \"ClpSchool\", school_keys[student[\"schoolId\"]], \"students\") for student in students if", "\"LOCATION_NAME_1\": \"DAY_SCHOOL\", } class Sheet: \"Data container object to hold", "object to hold the contents of one sheet within an", "injected: x[\"dateOfBirth\"] = convert_dob_to_datetime(x[\"dateOfBirth\"]) return injected def prepare_teachers(teachers): # Like", "THIS - Current extract doesn't include the CLP location id", "read_only=True, data_only=True) for sheet in work_book: rows = [x for", "\"Teacher\": (teachers, ) = process_sheet(sheet, TEACHER_TITLES, [TEACHER_FIELDS]) elif sheet.name ==", "= cuid.cuid() x[\"createdAt\"] = x[\"updatedAt\"] = now_as_iso8601() return list(dicts) def", "the given XLSX spreadsheet to iterable of Sheet objects, in", "for x in dicts: x[\"_typeName\"] = type_name x[\"id\"] = cuid.cuid()", "spreadsheet\" def __init__(self, name, titles=None, rows=None): self.name = name self.titles", "fields.items(): data[to_camel(v)] = row_as_dict[k] return data def process_sheet(sheet, titles, field_defns):", "= inject_required(\"ClpTeacher\", uniques.values()) return injected def extract_from_xlsx(file_path): for sheet in", "raw_locations, raw_teachers, raw_students): return ( prepare_organisations(raw_organisations), prepare_schools(raw_schools), prepare_locations(raw_locations), prepare_teachers(raw_teachers), prepare_students(raw_students)", "[make_relation(\"ClpLocation\", location[\"id\"], \"schools\", \"ClpSchool\", school_keys[schoolId], \"locations\") for location in locations", "sheet.titles[i]]) return [] structs = [[extract(defn, x) for x in", "records, # each of which is identitical except that for", "uniques.setdefault(x[\"name\"], x) related_schools = location.setdefault(\"schools\", list()) related_schools.append(x.pop(\"clpSchoolId\")) injected = inject_required(\"ClpLocation\",", "students) def copy_without(dicts, *keys_to_remove): \"Return iterable that contains copies of", "+ \".0Z\" # GraphCool import insists on microseconds, hence the", "teacher with that id, or add the new teacher record", "= inject_required(\"ClpOrganisation\", unique_orgs) return fat_orgs def prepare_schools(schools): uniques = unique(\"clpSchoolId\",", "[TEACHER_FIELDS]) elif sheet.name == \"Student\": (students, ) = process_sheet(sheet, STUDENT_TITLES,", "required\" for x in dicts: x[\"_typeName\"] = type_name x[\"id\"] =", "os.path import json import datetime from openpyxl import load_workbook import", "relative_to_absolute(os.path.join(EXTRACT_OUTPUT_DIR + \"-relations\" + str(i), \"relations\")) os.makedirs(nodes_dir, exist_ok=True) path =", "(i, x) in enumerate(titles) if x != sheet.titles[i]]) return []", "for sheet in work_book: rows = [x for x in", "f.write(json.dumps(nodes)) def write_relations(list_of_lists): for (i, one_list) in enumerate(list_of_lists): nodes_dir =", "\"STUDENT_FNAME\", \"DOB\", \"TEL\", \"LOCATION_NAME_1\"] TEACHER_FIELDS = {\"TEACHER_ID\": \"CLP_TEACHER_ID\", \"ORGANISATION_NAME\": \"ORGANISATION_NAME\",", "from openpyxl import load_workbook import cuid # https://github.com/necaris/cuid.py - create", "\"P_STATE\", \"P_POSTCODE\", \"TELEPHONE\", \"TEL_EVENING\", \"EMAIL\", \"MOBILE\", \"LEVEL_TAUGHT\", \"LEVEL_OF_EDUCATION\", \"FIELD_OF_EDUCATION\", \"DEGREE_COUNTRY\",", "str(i), \"relations\")) os.makedirs(nodes_dir, exist_ok=True) path = os.path.join(nodes_dir, \"1.json\") with open(path,", "chunk_size): yield l[i:i + chunk_size] def prepare(raw_organisations, raw_schools, raw_locations, raw_teachers,", "self.name = name self.titles = titles or [] self.rows =", "\"schoolName\"), *chunks(3, copy_without(students, \"schoolId\", \"schoolName\", \"location\"))) write_relations(generate_relations(organisations, schools, locations, teachers,", "container object to hold the contents of one sheet within", "one sheet within an excel spreadsheet\" def __init__(self, name, titles=None,", "-> organisation relations org_keys = {x[\"clpOrganisationId\"]: x[\"id\"] for x in", "given dictionary with all the given keys removed\" copies =", "\"LOC_ELECTORATE\": \"ELECTORATE\", \"SCHOOL_ID\": \"CLP_SCHOOL_ID\", \"LOC_S_ADDRESS1\": \"ADDRESS\", \"LOC_S_SUBURB\": \"SUBURB\", \"LOC_S_STATE\": \"STATE\",", "\"ORGANISATION_NAME\": \"NAME\", \"ORG_ELECTORATE\": \"ELECTORATE\", \"S_ADDRESS1\": \"ADDRESS\", \"S_SUBURB\": \"SUBURB\", \"S_STATE\": \"STATE\",", "row): data = {} for (i, cell) in enumerate(row): if", "row) for row in rows[1:]] yield Sheet(sheet.title, titles, dicts) else:", "id, or add the new teacher record teacher = uniques.setdefault(x[\"clpTeacherId\"],", "ORGANISATION_FIELDS = {\"ORGANISATION_ID\": \"CLP_ORGANISATION_ID\", \"ORGANISATION_NAME\": \"NAME\", \"ORG_ELECTORATE\": \"ELECTORATE\", \"S_ADDRESS1\": \"ADDRESS\",", "\"SCHOOL_S_SUBURB\", \"SCHOOL_S_STATE\", \"SCHOOL_S_POSTCODE\", \"LOCATION_NAME\", \"LOC_ELECTORATE\", \"LOC_S_ADDRESS1\", \"LOC_S_SUBURB\", \"LOC_S_STATE\", \"LOC_S_POSTCODE\"] ORGANISATION_FIELDS", "\"NAME\", \"SCH_ELECTORATE\": \"ELECTORATE\", \"SCHOOL_ID\": \"CLP_SCHOOL_ID\", \"ORGANISATION_ID\": \"CLP_ORGANISATION_ID\", \"SCHOOL_S_ADDRESS1\": \"ADDRESS\", \"SCHOOL_S_SUBURB\":", "dt.isoformat() + \".0Z\" # GraphCool import insists on microseconds, hence", "exist_ok=True) path = os.path.join(nodes_dir, \"1.json\") with open(path, \"w\") as f:", "'parentOrganisationId'\"\"\" bits = [(x.lower() if i == 0 else x.title())", "= [[extract(defn, x) for x in sheet.rows] for defn in", "uniques = {} for x in teachers: # get an", "[(i, x) for (i, x) in enumerate(titles) if x !=", "CLP location id :( Make one up for the time", "field1}, {\"_typeName\": entity2, \"id\": id2, \"fieldName\": field2} ] def generate_relations(organisations,", "= cuid.cuid() return injected def convert_dob_to_datetime(s): \"Convert the string from", "=> 'parentOrganisationId'\"\"\" bits = [(x.lower() if i == 0 else", "def main(): xlsx_path = relative_to_absolute(SOURCE_XLSX) raw_collections = extract_from_xlsx(xlsx_path) (organisations, schools,", "\"SCHOOL_ID\": \"SCHOOL_ID\", } STUDENT_FIELDS = {\"SCHOOL_NAME\": \"SCHOOL_NAME\", \"SCHOOL_ID\": \"SCHOOL_ID\", \"STUDENT_ID\":", "cell.Value is not None: data[titles[i]] = str(cell.value) return data def", "enumerate(s.split(\"_\"))] return \"\".join(bits) def relative_to_absolute(relative_path): path_to_py = os.path.abspath(os.path.dirname(__file__)) return os.path.join(path_to_py,", "\"SCHOOL_P_ADDRESS1\", \"SCHOOL_P_SUBURB\", \"SCHOOL_P_STATE\", \"SCHOOL_P_POSTCODE\", \"SCHOOL_S_ADDRESS1\", \"SCHOOL_S_SUBURB\", \"SCHOOL_S_STATE\", \"SCHOOL_S_POSTCODE\", \"LOCATION_NAME\", \"LOC_ELECTORATE\",", "raw_teachers, raw_students): return ( prepare_organisations(raw_organisations), prepare_schools(raw_schools), prepare_locations(raw_locations), prepare_teachers(raw_teachers), prepare_students(raw_students) )", "schools, locations, teachers, students) = prepare(*raw_collections) write_nodes( organisations, copy_without(schools, \"clpOrganisationId\"),", "enumerate(list_of_lists): nodes_dir = relative_to_absolute(os.path.join(EXTRACT_OUTPUT_DIR + \"-relations\" + str(i), \"relations\")) os.makedirs(nodes_dir,", "SOURCE_XLSX = \"./data/CLP_combined.xlsx\" EXTRACT_OUTPUT_DIR = \"../server/extract\" SCHOOL_TITLES = [\"ORGANISATION_ID\", \"ORGANISATION_NAME\",", "\"-relations\" + str(i), \"relations\")) os.makedirs(nodes_dir, exist_ok=True) path = os.path.join(nodes_dir, \"1.json\")", "convert_row_to_dict(titles, row): data = {} for (i, cell) in enumerate(row):", "\"SCHOOL_NAME\", \"SCHOOL_ID\": \"SCHOOL_ID\", \"STUDENT_ID\": \"CLP_STUDENT_ID\", \"STUDENT_SRN\": \"SRN\", \"LOCATION_NAME\": \"LOCATION\", \"STUDENT_LNAME\":", "in rows[0]] dicts = [convert_row_to_dict(titles, row) for row in rows[1:]]", "list()) related_schools.append(x.pop(\"clpSchoolId\")) injected = inject_required(\"ClpLocation\", uniques.values()) # FIX THIS -", "# Build student -> school relations yield [make_relation(\"ClpStudent\", student[\"id\"], \"school\",", "\"SCHOOL_ID\": \"SCHOOL_ID\", \"STUDENT_ID\": \"CLP_STUDENT_ID\", \"STUDENT_SRN\": \"SRN\", \"LOCATION_NAME\": \"LOCATION\", \"STUDENT_LNAME\": \"FAMILY_NAME\",", "return data def convert_xlsx(xlsx_file): \"\"\"Convert the given XLSX spreadsheet to", "inject_required(\"ClpSchool\", uniques) return injected def prepare_locations(locations): # There are multiple", "removed\" copies = [x.copy() for x in dicts] for d", "name self.titles = titles or [] self.rows = rows or", "None: data[titles[i]] = str(cell.value) return data def convert_xlsx(xlsx_file): \"\"\"Convert the", "+ str(i), \"nodes\")) os.makedirs(nodes_dir, exist_ok=True) path = os.path.join(nodes_dir, \"1.json\") with", "Sheet(sheet.title) def to_camel(s): \"\"\"Convert an underscored title into camel case.", "id1, \"fieldName\": field1}, {\"_typeName\": entity2, \"id\": id2, \"fieldName\": field2} ]", "\"CLP_ORGANISATION_ID\", \"ORGANISATION_NAME\": \"NAME\", \"ORG_ELECTORATE\": \"ELECTORATE\", \"S_ADDRESS1\": \"ADDRESS\", \"S_SUBURB\": \"SUBURB\", \"S_STATE\":", "x in dicts} return t.values() def now_as_iso8601(): return datetime.datetime.now().replace(microsecond=0).isoformat() +", "schools) injected = inject_required(\"ClpSchool\", uniques) return injected def prepare_locations(locations): #", "Build school -> organisation relations org_keys = {x[\"clpOrganisationId\"]: x[\"id\"] for", "\"P_POSTCODE\": \"POSTCODE\", \"TELEPHONE\": \"DAY_PHONE\", \"TEL_EVENING\": \"EVENING_PHONE\", \"EMAIL\": \"EMAIL\", \"MOBILE\": \"MOBILE\",", "\"LOC_S_ADDRESS1\": \"ADDRESS\", \"LOC_S_SUBURB\": \"SUBURB\", \"LOC_S_STATE\": \"STATE\", \"LOC_S_POSTCODE\": \"POSTCODE\"} TEACHER_TITLES =", "locations, each of which is identitical except that for being", "chunks(n, l): \"\"\"Yield n successive similar-sized chunks from l.\"\"\" chunk_size", "def process_sheet(sheet, titles, field_defns): if titles != sheet.titles: print(\"Sheet doesn't", "range(0, len(l), chunk_size): yield l[i:i + chunk_size] def prepare(raw_organisations, raw_schools,", "copy_without(students, \"schoolId\", \"schoolName\", \"location\"))) write_relations(generate_relations(organisations, schools, locations, teachers, students)) if", "titles != sheet.titles: print(\"Sheet doesn't have expected titles:\", [(i, x)", "for x in locations: # get an existing location with", "structs def unique(key, dicts): t = {x[key]: x for x", "t = {x[key]: x for x in dicts} return t.values()", "the given name, or add the new location location =", "for d in copies: for to_remove in keys_to_remove: d.pop(to_remove, None)", "for x in dicts] for d in copies: for to_remove", "\"P_ADDRESS1\", \"P_ADDRESS2\", \"P_SUBURB\", \"P_STATE\", \"P_POSTCODE\", \"TELEPHONE\", \"TEL_EVENING\", \"EMAIL\", \"MOBILE\", \"LEVEL_TAUGHT\",", "enumerate(titles) if x != sheet.titles[i]]) return [] structs = [[extract(defn,", "that graphcool expects SOURCE_XLSX = \"./data/CLP_combined.xlsx\" EXTRACT_OUTPUT_DIR = \"../server/extract\" SCHOOL_TITLES", "converted into a dictionary\"\"\" work_book = load_workbook(filename=xlsx_file, read_only=True, data_only=True) for", "f.write(json.dumps(nodes)) def chunks(n, l): \"\"\"Yield n successive similar-sized chunks from", "unique_orgs) return fat_orgs def prepare_schools(schools): uniques = unique(\"clpSchoolId\", schools) injected", "\"CLP_TEACHER_ID\", \"ORGANISATION_NAME\": \"ORGANISATION_NAME\", \"SCHOOL_NAME\": \"SCHOOL_NAME\", \"TITLE\": \"TITLE\", \"LNAME\": \"FAMILY_NAME\", \"FNAME\":", "\"LNAME\": \"FAMILY_NAME\", \"FNAME\": \"GIVEN_NAMES\", \"TEACHER_LANGUAGES\": \"LANGUAGES\", \"P_ADDRESS1\": \"ADDRESS1\", \"P_ADDRESS2\": \"ADDRESS2\",", "\"TEL_EVENING\": \"EVENING_PHONE\", \"EMAIL\": \"EMAIL\", \"MOBILE\": \"MOBILE\", \"LEVEL_TAUGHT\": \"LEVEL_TAUGHT\", \"LEVEL_OF_EDUCATION\": \"EDUCATION_LEVEL\",", "prepare_organisations(raw_organisations), prepare_schools(raw_schools), prepare_locations(raw_locations), prepare_teachers(raw_teachers), prepare_students(raw_students) ) def make_relation(entity1, id1, field1,", "date\" dt = datetime.datetime.strptime(s, \"%d/%b/%y\") return dt.isoformat() + \".0Z\" #", "\"TEL\", \"LOCATION_NAME_1\"] TEACHER_FIELDS = {\"TEACHER_ID\": \"CLP_TEACHER_ID\", \"ORGANISATION_NAME\": \"ORGANISATION_NAME\", \"SCHOOL_NAME\": \"SCHOOL_NAME\",", "STUDENT_FIELDS = {\"SCHOOL_NAME\": \"SCHOOL_NAME\", \"SCHOOL_ID\": \"SCHOOL_ID\", \"STUDENT_ID\": \"CLP_STUDENT_ID\", \"STUDENT_SRN\": \"SRN\",", "underscored title into camel case. 'PARENT_ORGANISATION_ID' => 'parentOrganisationId'\"\"\" bits =", "\"Inject the required fields that graphcool import required\" for x", "process_sheet(sheet, TEACHER_TITLES, [TEACHER_FIELDS]) elif sheet.name == \"Student\": (students, ) =", "keys removed\" copies = [x.copy() for x in dicts] for", "location.setdefault(\"schools\", list()) related_schools.append(x.pop(\"clpSchoolId\")) injected = inject_required(\"ClpLocation\", uniques.values()) # FIX THIS", "an excel spreadsheet\" def __init__(self, name, titles=None, rows=None): self.name =", "LOCATION_FIELDS = {\"LOCATION_NAME\": \"NAME\", \"LOC_ELECTORATE\": \"ELECTORATE\", \"SCHOOL_ID\": \"CLP_SCHOOL_ID\", \"LOC_S_ADDRESS1\": \"ADDRESS\",", "# get an existing location with the given name, or", "injected = inject_required(\"ClpLocation\", uniques.values()) # FIX THIS - Current extract", "install cuid import os.path import json import datetime from openpyxl", "schoolId in location.get(\"schools\", [])] # Build teacher -> school relations", "\"LOC_ELECTORATE\", \"LOC_S_ADDRESS1\", \"LOC_S_SUBURB\", \"LOC_S_STATE\", \"LOC_S_POSTCODE\"] ORGANISATION_FIELDS = {\"ORGANISATION_ID\": \"CLP_ORGANISATION_ID\", \"ORGANISATION_NAME\":", "data def process_sheet(sheet, titles, field_defns): if titles != sheet.titles: print(\"Sheet", "# Build location -> school relations school_keys = {x[\"clpSchoolId\"]: x[\"id\"]", "students): # Build school -> organisation relations org_keys = {x[\"clpOrganisationId\"]:", "to_camel(s): \"\"\"Convert an underscored title into camel case. 'PARENT_ORGANISATION_ID' =>", "\"CLP_SCHOOL_ID\", \"LOC_S_ADDRESS1\": \"ADDRESS\", \"LOC_S_SUBURB\": \"SUBURB\", \"LOC_S_STATE\": \"STATE\", \"LOC_S_POSTCODE\": \"POSTCODE\"} TEACHER_TITLES", "n successive similar-sized chunks from l.\"\"\" chunk_size = 1 +", "teacher.setdefault(\"schools\", list()) related_schools.append(x.pop(\"schoolId\")) injected = inject_required(\"ClpTeacher\", uniques.values()) return injected def" ]
[ "callsign def get_ustvgo_stream(self, chandict): driver = self.get_firefox_driver() blockPrint() driver.get(\"https://ustvgo.tv/\" +", "if os.path.isfile(self.m3ucache): self.fhdhr.logger.info(\"Loading Previously Saved Channel m3u.\") with open(self.m3ucache, 'r')", "html.fromstring(chanpage.content) channel_names_xpath = \"/html/body/div[1]/div[1]/div/div[2]/div/div/div/article/div[1]/ol/li[*]/strong/a/text()\" channel_urls_xpath = \"/html/body/div[1]/div[1]/div/div[2]/div/div/div/article/div[1]/ol/li[*]/strong/a/@href\" chan_names = tree.xpath(channel_names_xpath)", "channels try: driver.switch_to.frame(iframe) driver.find_element_by_xpath(\"//*[text()='This channel requires our VPN to watch!']\")", "= tree.xpath(channel_names_xpath) chan_urls = tree.xpath(channel_urls_xpath) return chan_names, chan_urls def format_callsign(self,", "def blockPrint(): sys.stdout = open(os.devnull, 'w') # Restore def enablePrint():", "self.scrape_channels() chan_number_index = 1 for name, url in zip(chan_names, chan_urls):", "tree.xpath(channel_names_xpath) chan_urls = tree.xpath(channel_urls_xpath) return chan_names, chan_urls def format_callsign(self, url):", "= str(playlist) driver.close() driver.quit() self.cached_m3u[chandict[\"callsign\"]] = streamurl self.save_m3u_cache() return streamurl", "self.m3ucache = pathlib.Path(self.cache_dir).joinpath('m3ucache.json') self.cached_m3u = {} self.load_m3u_cache() def load_m3u_cache(self): if", "m3u8_url): bestStream = None videoUrlM3u = m3u8.load(m3u8_url) if not videoUrlM3u.is_variant:", "caching def m3u8_beststream(self, m3u8_url): bestStream = None videoUrlM3u = m3u8.load(m3u8_url)", "def get_firefox_driver(self): ff_options = FirefoxOptions() ff_options.add_argument('--headless') firefox_profile = webdriver.FirefoxProfile() firefox_profile.set_preference('permissions.default.image',", "be grabbed.') return None # Autoplay iframe.click() try: playlist =", "None # Autoplay iframe.click() try: playlist = driver.wait_for_request('/playlist.m3u8', timeout=10) except", "= \"/html/body/div[1]/div[1]/div/div[2]/div/div/div/article/div[1]/ol/li[*]/strong/a/@href\" chan_names = tree.xpath(channel_names_xpath) chan_urls = tree.xpath(channel_urls_xpath) return chan_names,", "our VPN to watch!']\") need_vpn = True except NoSuchElementException: need_vpn", "m3u.\") with open(self.m3ucache, 'r') as m3ufile: self.cached_m3u = json.load(m3ufile) def", "try: iframe = driver.find_element_by_css_selector(IFRAME_CSS_SELECTOR) except NoSuchElementException: self.fhdhr.logger.error('Video frame is not", "scrape_channels(self): channels_url = \"https://ustvgo.tv/\" chanpage = self.fhdhr.web.session.get(channels_url) tree = html.fromstring(chanpage.content)", "streamdict = {\"number\": chandict[\"number\"], \"stream_url\": streamurl} streamlist.append(streamdict) return streamlist, caching", "videoStream in videoUrlM3u.playlists: if not bestStream: bestStream = videoStream elif", "(url .split('/')[-2] .replace('-live', '') .replace('-channel', '') .replace('-free', '') .replace('-streaming', ''))", "if not bestStream: return bestStream.absolute_uri else: return m3u8_url def scrape_channels(self):", "firefox_profile.set_preference('browser.tabs.warnOnClose', False) firefox_profile.set_preference('media.volume_scale', '0.0') set_seleniumwire_options = { 'connection_timeout': None, 'verify_ssl':", "\"/html/body/div[1]/div[1]/div/div[2]/div/div/div/article/div[1]/ol/li[*]/strong/a/@href\" chan_names = tree.xpath(channel_names_xpath) chan_urls = tree.xpath(channel_urls_xpath) return chan_names, chan_urls", "m3u8.load(m3u8_url) if not videoUrlM3u.is_variant: return m3u8_url for videoStream in videoUrlM3u.playlists:", "videoStream elif videoStream.stream_info.bandwidth > bestStream.stream_info.bandwidth: bestStream = videoStream if not", "zip(chan_names, chan_urls): chan_dict = { \"name\": name.rstrip(), \"number\": chan_number_index, \"callsign\":", "get_ustvgo_stream(self, chandict): driver = self.get_firefox_driver() blockPrint() driver.get(\"https://ustvgo.tv/\" + chandict[\"callsign\"]) enablePrint()", "= pathlib.Path(self.cache_dir).joinpath('m3ucache.json') self.cached_m3u = {} self.load_m3u_cache() def load_m3u_cache(self): if os.path.isfile(self.m3ucache):", "list(self.cached_m3u): streamurl = self.cached_m3u[chandict[\"callsign\"]] else: streamurl = self.get_ustvgo_stream(chandict) # if", "save_m3u_cache(self): self.fhdhr.logger.info(\"Saving Channel m3u cache.\") with open(self.m3ucache, 'w') as m3ufile:", "return None # Autoplay iframe.click() try: playlist = driver.wait_for_request('/playlist.m3u8', timeout=10)", "blockPrint(): sys.stdout = open(os.devnull, 'w') # Restore def enablePrint(): sys.stdout", "chandict[\"callsign\"] in list(self.cached_m3u): streamurl = self.cached_m3u[chandict[\"callsign\"]] else: streamurl = self.get_ustvgo_stream(chandict)", "streamurl = self.get_ustvgo_stream(chandict) # if self.fhdhr.config.dict[\"origin\"][\"force_best\"]: streamurl = self.m3u8_beststream(streamurl) streamdict", "need_vpn = True except NoSuchElementException: need_vpn = False finally: driver.switch_to.default_content()", "chandict, allchandict): caching = True streamlist = [] streamdict =", "'w') as m3ufile: m3ufile.write(json.dumps(self.cached_m3u, indent=4)) def get_channels(self): channel_list = []", "True except NoSuchElementException: need_vpn = False finally: driver.switch_to.default_content() if need_vpn:", "if not videoUrlM3u.is_variant: return m3u8_url for videoStream in videoUrlM3u.playlists: if", "str(playlist) driver.close() driver.quit() self.cached_m3u[chandict[\"callsign\"]] = streamurl self.save_m3u_cache() return streamurl def", "streamlist.append(streamdict) return streamlist, caching def m3u8_beststream(self, m3u8_url): bestStream = None", "tree = html.fromstring(chanpage.content) channel_names_xpath = \"/html/body/div[1]/div[1]/div/div[2]/div/div/div/article/div[1]/ol/li[*]/strong/a/text()\" channel_urls_xpath = \"/html/body/div[1]/div[1]/div/div[2]/div/div/div/article/div[1]/ol/li[*]/strong/a/@href\" chan_names", "for channel') return None # Detect VPN-required channels try: driver.switch_to.frame(iframe)", "return None # Detect VPN-required channels try: driver.switch_to.frame(iframe) driver.find_element_by_xpath(\"//*[text()='This channel", "= { 'connection_timeout': None, 'verify_ssl': False, 'suppress_connection_errors': True } driver", "# Disable def blockPrint(): sys.stdout = open(os.devnull, 'w') # Restore", "import webdriver from selenium.common.exceptions import TimeoutException, NoSuchElementException from selenium.webdriver.firefox.options import", "origin self.cache_dir = self.fhdhr.config.dict[\"filedir\"][\"epg_cache\"][\"origin\"][\"top\"] self.m3ucache = pathlib.Path(self.cache_dir).joinpath('m3ucache.json') self.cached_m3u = {}", "return streamurl def get_firefox_driver(self): ff_options = FirefoxOptions() ff_options.add_argument('--headless') firefox_profile =", "Detect VPN-required channels try: driver.switch_to.frame(iframe) driver.find_element_by_xpath(\"//*[text()='This channel requires our VPN", "return chan_names, chan_urls def format_callsign(self, url): callsign = (url .split('/')[-2]", "False) firefox_profile.set_preference('media.volume_scale', '0.0') set_seleniumwire_options = { 'connection_timeout': None, 'verify_ssl': False,", ".split('/')[-2] .replace('-live', '') .replace('-channel', '') .replace('-free', '') .replace('-streaming', '')) return", "Previously Saved Channel m3u.\") with open(self.m3ucache, 'r') as m3ufile: self.cached_m3u", "'r') as m3ufile: self.cached_m3u = json.load(m3ufile) def save_m3u_cache(self): self.fhdhr.logger.info(\"Saving Channel", "as m3ufile: self.cached_m3u = json.load(m3ufile) def save_m3u_cache(self): self.fhdhr.logger.info(\"Saving Channel m3u", "= tree.xpath(channel_urls_xpath) return chan_names, chan_urls def format_callsign(self, url): callsign =", "True streamlist = [] streamdict = {} if chandict[\"callsign\"] in", "1 return channel_list def get_channel_stream(self, chandict, allchandict): caching = True", "videoUrlM3u.playlists: if not bestStream: bestStream = videoStream elif videoStream.stream_info.bandwidth >", "VPN to watch!']\") need_vpn = True except NoSuchElementException: need_vpn =", "= self.get_firefox_driver() blockPrint() driver.get(\"https://ustvgo.tv/\" + chandict[\"callsign\"]) enablePrint() # Get iframe", "'') .replace('-streaming', '')) return callsign def get_ustvgo_stream(self, chandict): driver =", "NoSuchElementException from selenium.webdriver.firefox.options import Options as FirefoxOptions IFRAME_CSS_SELECTOR = '.iframe-container>iframe'", "VPN-required channels try: driver.switch_to.frame(iframe) driver.find_element_by_xpath(\"//*[text()='This channel requires our VPN to", "None, 'verify_ssl': False, 'suppress_connection_errors': True } driver = webdriver.Firefox(seleniumwire_options=set_seleniumwire_options, options=ff_options,", "'verify_ssl': False, 'suppress_connection_errors': True } driver = webdriver.Firefox(seleniumwire_options=set_seleniumwire_options, options=ff_options, firefox_profile=firefox_profile)", "Autoplay iframe.click() try: playlist = driver.wait_for_request('/playlist.m3u8', timeout=10) except TimeoutException: self.fhdhr.logger.error('Channel", "driver.wait_for_request('/playlist.m3u8', timeout=10) except TimeoutException: self.fhdhr.logger.error('Channel m3u8 not found.') return None", "open(os.devnull, 'w') # Restore def enablePrint(): sys.stdout = sys.__stdout__ class", "as m3ufile: m3ufile.write(json.dumps(self.cached_m3u, indent=4)) def get_channels(self): channel_list = [] chan_names,", "firefox_profile.set_preference('dom.ipc.plugins.enabled.libflashplayer.so', 'false') firefox_profile.set_preference('dom.disable_beforeunload', True) firefox_profile.set_preference('browser.tabs.warnOnClose', False) firefox_profile.set_preference('media.volume_scale', '0.0') set_seleniumwire_options =", "webdriver.FirefoxProfile() firefox_profile.set_preference('permissions.default.image', 2) firefox_profile.set_preference('dom.ipc.plugins.enabled.libflashplayer.so', 'false') firefox_profile.set_preference('dom.disable_beforeunload', True) firefox_profile.set_preference('browser.tabs.warnOnClose', False) firefox_profile.set_preference('media.volume_scale',", "\"callsign\": self.format_callsign(url), } channel_list.append(chan_dict) chan_number_index += 1 return channel_list def", "self.cached_m3u[chandict[\"callsign\"]] else: streamurl = self.get_ustvgo_stream(chandict) # if self.fhdhr.config.dict[\"origin\"][\"force_best\"]: streamurl =", "{\"number\": chandict[\"number\"], \"stream_url\": streamurl} streamlist.append(streamdict) return streamlist, caching def m3u8_beststream(self,", "def get_channels(self): channel_list = [] chan_names, chan_urls = self.scrape_channels() chan_number_index", "return callsign def get_ustvgo_stream(self, chandict): driver = self.get_firefox_driver() blockPrint() driver.get(\"https://ustvgo.tv/\"", "= \"/html/body/div[1]/div[1]/div/div[2]/div/div/div/article/div[1]/ol/li[*]/strong/a/text()\" channel_urls_xpath = \"/html/body/div[1]/div[1]/div/div[2]/div/div/div/article/div[1]/ol/li[*]/strong/a/@href\" chan_names = tree.xpath(channel_names_xpath) chan_urls =", "html import pathlib import json import m3u8 from seleniumwire import", "= [] streamdict = {} if chandict[\"callsign\"] in list(self.cached_m3u): streamurl", "m3u8_beststream(self, m3u8_url): bestStream = None videoUrlM3u = m3u8.load(m3u8_url) if not", "format_callsign(self, url): callsign = (url .split('/')[-2] .replace('-live', '') .replace('-channel', '')", "bestStream = videoStream if not bestStream: return bestStream.absolute_uri else: return", "return bestStream.absolute_uri else: return m3u8_url def scrape_channels(self): channels_url = \"https://ustvgo.tv/\"", "chan_number_index = 1 for name, url in zip(chan_names, chan_urls): chan_dict", "allchandict): caching = True streamlist = [] streamdict = {}", "from selenium.common.exceptions import TimeoutException, NoSuchElementException from selenium.webdriver.firefox.options import Options as", "needs VPN to be grabbed.') return None # Autoplay iframe.click()", "return streamlist, caching def m3u8_beststream(self, m3u8_url): bestStream = None videoUrlM3u", "if self.fhdhr.config.dict[\"origin\"][\"force_best\"]: streamurl = self.m3u8_beststream(streamurl) streamdict = {\"number\": chandict[\"number\"], \"stream_url\":", "def scrape_channels(self): channels_url = \"https://ustvgo.tv/\" chanpage = self.fhdhr.web.session.get(channels_url) tree =", "os.path.isfile(self.m3ucache): self.fhdhr.logger.info(\"Loading Previously Saved Channel m3u.\") with open(self.m3ucache, 'r') as", "m3ufile: m3ufile.write(json.dumps(self.cached_m3u, indent=4)) def get_channels(self): channel_list = [] chan_names, chan_urls", "OriginChannels(): def __init__(self, fhdhr, origin): self.fhdhr = fhdhr self.origin =", "self.fhdhr.logger.info(\"Loading Previously Saved Channel m3u.\") with open(self.m3ucache, 'r') as m3ufile:", "chandict[\"callsign\"]) enablePrint() # Get iframe iframe = None try: iframe", "def __init__(self, fhdhr, origin): self.fhdhr = fhdhr self.origin = origin", "= None videoUrlM3u = m3u8.load(m3u8_url) if not videoUrlM3u.is_variant: return m3u8_url", "= self.fhdhr.config.dict[\"filedir\"][\"epg_cache\"][\"origin\"][\"top\"] self.m3ucache = pathlib.Path(self.cache_dir).joinpath('m3ucache.json') self.cached_m3u = {} self.load_m3u_cache() def", "__init__(self, fhdhr, origin): self.fhdhr = fhdhr self.origin = origin self.cache_dir", "NoSuchElementException: need_vpn = False finally: driver.switch_to.default_content() if need_vpn: self.fhdhr.logger.warning('Channel needs", "not bestStream: bestStream = videoStream elif videoStream.stream_info.bandwidth > bestStream.stream_info.bandwidth: bestStream", "= 1 for name, url in zip(chan_names, chan_urls): chan_dict =", "= self.scrape_channels() chan_number_index = 1 for name, url in zip(chan_names,", "if chandict[\"callsign\"] in list(self.cached_m3u): streamurl = self.cached_m3u[chandict[\"callsign\"]] else: streamurl =", "chan_urls): chan_dict = { \"name\": name.rstrip(), \"number\": chan_number_index, \"callsign\": self.format_callsign(url),", "= m3u8.load(m3u8_url) if not videoUrlM3u.is_variant: return m3u8_url for videoStream in", "driver.quit() self.cached_m3u[chandict[\"callsign\"]] = streamurl self.save_m3u_cache() return streamurl def get_firefox_driver(self): ff_options", "bestStream = None videoUrlM3u = m3u8.load(m3u8_url) if not videoUrlM3u.is_variant: return", "'suppress_connection_errors': True } driver = webdriver.Firefox(seleniumwire_options=set_seleniumwire_options, options=ff_options, firefox_profile=firefox_profile) return driver", "iframe.click() try: playlist = driver.wait_for_request('/playlist.m3u8', timeout=10) except TimeoutException: self.fhdhr.logger.error('Channel m3u8", "pathlib import json import m3u8 from seleniumwire import webdriver from", "chan_names = tree.xpath(channel_names_xpath) chan_urls = tree.xpath(channel_urls_xpath) return chan_names, chan_urls def", "return m3u8_url def scrape_channels(self): channels_url = \"https://ustvgo.tv/\" chanpage = self.fhdhr.web.session.get(channels_url)", "sys.stdout = open(os.devnull, 'w') # Restore def enablePrint(): sys.stdout =", "firefox_profile.set_preference('media.volume_scale', '0.0') set_seleniumwire_options = { 'connection_timeout': None, 'verify_ssl': False, 'suppress_connection_errors':", "sys.__stdout__ class OriginChannels(): def __init__(self, fhdhr, origin): self.fhdhr = fhdhr", "open(self.m3ucache, 'w') as m3ufile: m3ufile.write(json.dumps(self.cached_m3u, indent=4)) def get_channels(self): channel_list =", "open(self.m3ucache, 'r') as m3ufile: self.cached_m3u = json.load(m3ufile) def save_m3u_cache(self): self.fhdhr.logger.info(\"Saving", "url): callsign = (url .split('/')[-2] .replace('-live', '') .replace('-channel', '') .replace('-free',", "IFRAME_CSS_SELECTOR = '.iframe-container>iframe' # Disable def blockPrint(): sys.stdout = open(os.devnull,", "False, 'suppress_connection_errors': True } driver = webdriver.Firefox(seleniumwire_options=set_seleniumwire_options, options=ff_options, firefox_profile=firefox_profile) return", "VPN to be grabbed.') return None # Autoplay iframe.click() try:", "m3u8_url def scrape_channels(self): channels_url = \"https://ustvgo.tv/\" chanpage = self.fhdhr.web.session.get(channels_url) tree", "selenium.common.exceptions import TimeoutException, NoSuchElementException from selenium.webdriver.firefox.options import Options as FirefoxOptions", "os import sys from lxml import html import pathlib import", "streamurl = str(playlist) driver.close() driver.quit() self.cached_m3u[chandict[\"callsign\"]] = streamurl self.save_m3u_cache() return", "videoStream if not bestStream: return bestStream.absolute_uri else: return m3u8_url def", "iframe iframe = None try: iframe = driver.find_element_by_css_selector(IFRAME_CSS_SELECTOR) except NoSuchElementException:", "cache.\") with open(self.m3ucache, 'w') as m3ufile: m3ufile.write(json.dumps(self.cached_m3u, indent=4)) def get_channels(self):", "{} if chandict[\"callsign\"] in list(self.cached_m3u): streamurl = self.cached_m3u[chandict[\"callsign\"]] else: streamurl", "if not bestStream: bestStream = videoStream elif videoStream.stream_info.bandwidth > bestStream.stream_info.bandwidth:", "chan_dict = { \"name\": name.rstrip(), \"number\": chan_number_index, \"callsign\": self.format_callsign(url), }", "= videoStream elif videoStream.stream_info.bandwidth > bestStream.stream_info.bandwidth: bestStream = videoStream if", "driver.find_element_by_css_selector(IFRAME_CSS_SELECTOR) except NoSuchElementException: self.fhdhr.logger.error('Video frame is not found for channel')", "videoUrlM3u.is_variant: return m3u8_url for videoStream in videoUrlM3u.playlists: if not bestStream:", "except TimeoutException: self.fhdhr.logger.error('Channel m3u8 not found.') return None streamurl =", ".replace('-streaming', '')) return callsign def get_ustvgo_stream(self, chandict): driver = self.get_firefox_driver()", "fhdhr, origin): self.fhdhr = fhdhr self.origin = origin self.cache_dir =", "+= 1 return channel_list def get_channel_stream(self, chandict, allchandict): caching =", "webdriver from selenium.common.exceptions import TimeoutException, NoSuchElementException from selenium.webdriver.firefox.options import Options", "get_channel_stream(self, chandict, allchandict): caching = True streamlist = [] streamdict", "streamurl self.save_m3u_cache() return streamurl def get_firefox_driver(self): ff_options = FirefoxOptions() ff_options.add_argument('--headless')", "streamdict = {} if chandict[\"callsign\"] in list(self.cached_m3u): streamurl = self.cached_m3u[chandict[\"callsign\"]]", "'false') firefox_profile.set_preference('dom.disable_beforeunload', True) firefox_profile.set_preference('browser.tabs.warnOnClose', False) firefox_profile.set_preference('media.volume_scale', '0.0') set_seleniumwire_options = {", "to be grabbed.') return None # Autoplay iframe.click() try: playlist", "Channel m3u.\") with open(self.m3ucache, 'r') as m3ufile: self.cached_m3u = json.load(m3ufile)", "def get_channel_stream(self, chandict, allchandict): caching = True streamlist = []", "m3u8 from seleniumwire import webdriver from selenium.common.exceptions import TimeoutException, NoSuchElementException", "{} self.load_m3u_cache() def load_m3u_cache(self): if os.path.isfile(self.m3ucache): self.fhdhr.logger.info(\"Loading Previously Saved Channel", "None try: iframe = driver.find_element_by_css_selector(IFRAME_CSS_SELECTOR) except NoSuchElementException: self.fhdhr.logger.error('Video frame is", "with open(self.m3ucache, 'r') as m3ufile: self.cached_m3u = json.load(m3ufile) def save_m3u_cache(self):", ".replace('-live', '') .replace('-channel', '') .replace('-free', '') .replace('-streaming', '')) return callsign", "+ chandict[\"callsign\"]) enablePrint() # Get iframe iframe = None try:", "selenium.webdriver.firefox.options import Options as FirefoxOptions IFRAME_CSS_SELECTOR = '.iframe-container>iframe' # Disable", "driver.switch_to.default_content() if need_vpn: self.fhdhr.logger.warning('Channel needs VPN to be grabbed.') return", "[] chan_names, chan_urls = self.scrape_channels() chan_number_index = 1 for name,", "return None streamurl = str(playlist) driver.close() driver.quit() self.cached_m3u[chandict[\"callsign\"]] = streamurl", "found for channel') return None # Detect VPN-required channels try:", "streamurl = self.m3u8_beststream(streamurl) streamdict = {\"number\": chandict[\"number\"], \"stream_url\": streamurl} streamlist.append(streamdict)", "m3u8_url for videoStream in videoUrlM3u.playlists: if not bestStream: bestStream =", "\"/html/body/div[1]/div[1]/div/div[2]/div/div/div/article/div[1]/ol/li[*]/strong/a/text()\" channel_urls_xpath = \"/html/body/div[1]/div[1]/div/div[2]/div/div/div/article/div[1]/ol/li[*]/strong/a/@href\" chan_names = tree.xpath(channel_names_xpath) chan_urls = tree.xpath(channel_urls_xpath)", "= sys.__stdout__ class OriginChannels(): def __init__(self, fhdhr, origin): self.fhdhr =", "\"number\": chan_number_index, \"callsign\": self.format_callsign(url), } channel_list.append(chan_dict) chan_number_index += 1 return", "get_channels(self): channel_list = [] chan_names, chan_urls = self.scrape_channels() chan_number_index =", "tree.xpath(channel_urls_xpath) return chan_names, chan_urls def format_callsign(self, url): callsign = (url", "self.fhdhr.config.dict[\"filedir\"][\"epg_cache\"][\"origin\"][\"top\"] self.m3ucache = pathlib.Path(self.cache_dir).joinpath('m3ucache.json') self.cached_m3u = {} self.load_m3u_cache() def load_m3u_cache(self):", "chan_urls = self.scrape_channels() chan_number_index = 1 for name, url in", "timeout=10) except TimeoutException: self.fhdhr.logger.error('Channel m3u8 not found.') return None streamurl", "channel requires our VPN to watch!']\") need_vpn = True except", "def format_callsign(self, url): callsign = (url .split('/')[-2] .replace('-live', '') .replace('-channel',", "m3ufile: self.cached_m3u = json.load(m3ufile) def save_m3u_cache(self): self.fhdhr.logger.info(\"Saving Channel m3u cache.\")", "self.load_m3u_cache() def load_m3u_cache(self): if os.path.isfile(self.m3ucache): self.fhdhr.logger.info(\"Loading Previously Saved Channel m3u.\")", "m3u cache.\") with open(self.m3ucache, 'w') as m3ufile: m3ufile.write(json.dumps(self.cached_m3u, indent=4)) def", "import json import m3u8 from seleniumwire import webdriver from selenium.common.exceptions", "def get_ustvgo_stream(self, chandict): driver = self.get_firefox_driver() blockPrint() driver.get(\"https://ustvgo.tv/\" + chandict[\"callsign\"])", "need_vpn: self.fhdhr.logger.warning('Channel needs VPN to be grabbed.') return None #", "self.cache_dir = self.fhdhr.config.dict[\"filedir\"][\"epg_cache\"][\"origin\"][\"top\"] self.m3ucache = pathlib.Path(self.cache_dir).joinpath('m3ucache.json') self.cached_m3u = {} self.load_m3u_cache()", "pathlib.Path(self.cache_dir).joinpath('m3ucache.json') self.cached_m3u = {} self.load_m3u_cache() def load_m3u_cache(self): if os.path.isfile(self.m3ucache): self.fhdhr.logger.info(\"Loading", "else: streamurl = self.get_ustvgo_stream(chandict) # if self.fhdhr.config.dict[\"origin\"][\"force_best\"]: streamurl = self.m3u8_beststream(streamurl)", "self.cached_m3u = json.load(m3ufile) def save_m3u_cache(self): self.fhdhr.logger.info(\"Saving Channel m3u cache.\") with", "name.rstrip(), \"number\": chan_number_index, \"callsign\": self.format_callsign(url), } channel_list.append(chan_dict) chan_number_index += 1", "not found for channel') return None # Detect VPN-required channels", "try: playlist = driver.wait_for_request('/playlist.m3u8', timeout=10) except TimeoutException: self.fhdhr.logger.error('Channel m3u8 not", "is not found for channel') return None # Detect VPN-required", "\"stream_url\": streamurl} streamlist.append(streamdict) return streamlist, caching def m3u8_beststream(self, m3u8_url): bestStream", "\"https://ustvgo.tv/\" chanpage = self.fhdhr.web.session.get(channels_url) tree = html.fromstring(chanpage.content) channel_names_xpath = \"/html/body/div[1]/div[1]/div/div[2]/div/div/div/article/div[1]/ol/li[*]/strong/a/text()\"", "import m3u8 from seleniumwire import webdriver from selenium.common.exceptions import TimeoutException,", "= fhdhr self.origin = origin self.cache_dir = self.fhdhr.config.dict[\"filedir\"][\"epg_cache\"][\"origin\"][\"top\"] self.m3ucache =", "= {} self.load_m3u_cache() def load_m3u_cache(self): if os.path.isfile(self.m3ucache): self.fhdhr.logger.info(\"Loading Previously Saved", "channel') return None # Detect VPN-required channels try: driver.switch_to.frame(iframe) driver.find_element_by_xpath(\"//*[text()='This", "lxml import html import pathlib import json import m3u8 from", "iframe = None try: iframe = driver.find_element_by_css_selector(IFRAME_CSS_SELECTOR) except NoSuchElementException: self.fhdhr.logger.error('Video", "= self.cached_m3u[chandict[\"callsign\"]] else: streamurl = self.get_ustvgo_stream(chandict) # if self.fhdhr.config.dict[\"origin\"][\"force_best\"]: streamurl", "{ 'connection_timeout': None, 'verify_ssl': False, 'suppress_connection_errors': True } driver =", "= videoStream if not bestStream: return bestStream.absolute_uri else: return m3u8_url", "sys.stdout = sys.__stdout__ class OriginChannels(): def __init__(self, fhdhr, origin): self.fhdhr", "TimeoutException, NoSuchElementException from selenium.webdriver.firefox.options import Options as FirefoxOptions IFRAME_CSS_SELECTOR =", "for name, url in zip(chan_names, chan_urls): chan_dict = { \"name\":", "= driver.find_element_by_css_selector(IFRAME_CSS_SELECTOR) except NoSuchElementException: self.fhdhr.logger.error('Video frame is not found for", "chandict[\"number\"], \"stream_url\": streamurl} streamlist.append(streamdict) return streamlist, caching def m3u8_beststream(self, m3u8_url):", "= streamurl self.save_m3u_cache() return streamurl def get_firefox_driver(self): ff_options = FirefoxOptions()", "enablePrint() # Get iframe iframe = None try: iframe =", "bestStream = videoStream elif videoStream.stream_info.bandwidth > bestStream.stream_info.bandwidth: bestStream = videoStream", "= {} if chandict[\"callsign\"] in list(self.cached_m3u): streamurl = self.cached_m3u[chandict[\"callsign\"]] else:", ".replace('-free', '') .replace('-streaming', '')) return callsign def get_ustvgo_stream(self, chandict): driver", "load_m3u_cache(self): if os.path.isfile(self.m3ucache): self.fhdhr.logger.info(\"Loading Previously Saved Channel m3u.\") with open(self.m3ucache,", "get_firefox_driver(self): ff_options = FirefoxOptions() ff_options.add_argument('--headless') firefox_profile = webdriver.FirefoxProfile() firefox_profile.set_preference('permissions.default.image', 2)", "= [] chan_names, chan_urls = self.scrape_channels() chan_number_index = 1 for", "in zip(chan_names, chan_urls): chan_dict = { \"name\": name.rstrip(), \"number\": chan_number_index,", "streamurl = self.cached_m3u[chandict[\"callsign\"]] else: streamurl = self.get_ustvgo_stream(chandict) # if self.fhdhr.config.dict[\"origin\"][\"force_best\"]:", "self.fhdhr.logger.warning('Channel needs VPN to be grabbed.') return None # Autoplay", "= webdriver.FirefoxProfile() firefox_profile.set_preference('permissions.default.image', 2) firefox_profile.set_preference('dom.ipc.plugins.enabled.libflashplayer.so', 'false') firefox_profile.set_preference('dom.disable_beforeunload', True) firefox_profile.set_preference('browser.tabs.warnOnClose', False)", "'w') # Restore def enablePrint(): sys.stdout = sys.__stdout__ class OriginChannels():", "Disable def blockPrint(): sys.stdout = open(os.devnull, 'w') # Restore def", "def m3u8_beststream(self, m3u8_url): bestStream = None videoUrlM3u = m3u8.load(m3u8_url) if", "channel_list.append(chan_dict) chan_number_index += 1 return channel_list def get_channel_stream(self, chandict, allchandict):", "watch!']\") need_vpn = True except NoSuchElementException: need_vpn = False finally:", "frame is not found for channel') return None # Detect", "FirefoxOptions() ff_options.add_argument('--headless') firefox_profile = webdriver.FirefoxProfile() firefox_profile.set_preference('permissions.default.image', 2) firefox_profile.set_preference('dom.ipc.plugins.enabled.libflashplayer.so', 'false') firefox_profile.set_preference('dom.disable_beforeunload',", "self.fhdhr.web.session.get(channels_url) tree = html.fromstring(chanpage.content) channel_names_xpath = \"/html/body/div[1]/div[1]/div/div[2]/div/div/div/article/div[1]/ol/li[*]/strong/a/text()\" channel_urls_xpath = \"/html/body/div[1]/div[1]/div/div[2]/div/div/div/article/div[1]/ol/li[*]/strong/a/@href\"", "2) firefox_profile.set_preference('dom.ipc.plugins.enabled.libflashplayer.so', 'false') firefox_profile.set_preference('dom.disable_beforeunload', True) firefox_profile.set_preference('browser.tabs.warnOnClose', False) firefox_profile.set_preference('media.volume_scale', '0.0') set_seleniumwire_options", "# Get iframe iframe = None try: iframe = driver.find_element_by_css_selector(IFRAME_CSS_SELECTOR)", "bestStream: return bestStream.absolute_uri else: return m3u8_url def scrape_channels(self): channels_url =", "self.format_callsign(url), } channel_list.append(chan_dict) chan_number_index += 1 return channel_list def get_channel_stream(self,", "firefox_profile = webdriver.FirefoxProfile() firefox_profile.set_preference('permissions.default.image', 2) firefox_profile.set_preference('dom.ipc.plugins.enabled.libflashplayer.so', 'false') firefox_profile.set_preference('dom.disable_beforeunload', True) firefox_profile.set_preference('browser.tabs.warnOnClose',", "= {\"number\": chandict[\"number\"], \"stream_url\": streamurl} streamlist.append(streamdict) return streamlist, caching def", "for videoStream in videoUrlM3u.playlists: if not bestStream: bestStream = videoStream", "url in zip(chan_names, chan_urls): chan_dict = { \"name\": name.rstrip(), \"number\":", "self.save_m3u_cache() return streamurl def get_firefox_driver(self): ff_options = FirefoxOptions() ff_options.add_argument('--headless') firefox_profile", "self.fhdhr = fhdhr self.origin = origin self.cache_dir = self.fhdhr.config.dict[\"filedir\"][\"epg_cache\"][\"origin\"][\"top\"] self.m3ucache", "= True streamlist = [] streamdict = {} if chandict[\"callsign\"]", "seleniumwire import webdriver from selenium.common.exceptions import TimeoutException, NoSuchElementException from selenium.webdriver.firefox.options", "\"name\": name.rstrip(), \"number\": chan_number_index, \"callsign\": self.format_callsign(url), } channel_list.append(chan_dict) chan_number_index +=", "'.iframe-container>iframe' # Disable def blockPrint(): sys.stdout = open(os.devnull, 'w') #", "firefox_profile.set_preference('permissions.default.image', 2) firefox_profile.set_preference('dom.ipc.plugins.enabled.libflashplayer.so', 'false') firefox_profile.set_preference('dom.disable_beforeunload', True) firefox_profile.set_preference('browser.tabs.warnOnClose', False) firefox_profile.set_preference('media.volume_scale', '0.0')", "None # Detect VPN-required channels try: driver.switch_to.frame(iframe) driver.find_element_by_xpath(\"//*[text()='This channel requires", "chan_number_index, \"callsign\": self.format_callsign(url), } channel_list.append(chan_dict) chan_number_index += 1 return channel_list", "if need_vpn: self.fhdhr.logger.warning('Channel needs VPN to be grabbed.') return None", "self.m3u8_beststream(streamurl) streamdict = {\"number\": chandict[\"number\"], \"stream_url\": streamurl} streamlist.append(streamdict) return streamlist,", "origin): self.fhdhr = fhdhr self.origin = origin self.cache_dir = self.fhdhr.config.dict[\"filedir\"][\"epg_cache\"][\"origin\"][\"top\"]", "chan_number_index += 1 return channel_list def get_channel_stream(self, chandict, allchandict): caching", "in videoUrlM3u.playlists: if not bestStream: bestStream = videoStream elif videoStream.stream_info.bandwidth", "chan_urls def format_callsign(self, url): callsign = (url .split('/')[-2] .replace('-live', '')", "not videoUrlM3u.is_variant: return m3u8_url for videoStream in videoUrlM3u.playlists: if not", "except NoSuchElementException: self.fhdhr.logger.error('Video frame is not found for channel') return", "bestStream.absolute_uri else: return m3u8_url def scrape_channels(self): channels_url = \"https://ustvgo.tv/\" chanpage", "json import m3u8 from seleniumwire import webdriver from selenium.common.exceptions import", "Get iframe iframe = None try: iframe = driver.find_element_by_css_selector(IFRAME_CSS_SELECTOR) except", "import TimeoutException, NoSuchElementException from selenium.webdriver.firefox.options import Options as FirefoxOptions IFRAME_CSS_SELECTOR", "None streamurl = str(playlist) driver.close() driver.quit() self.cached_m3u[chandict[\"callsign\"]] = streamurl self.save_m3u_cache()", "= html.fromstring(chanpage.content) channel_names_xpath = \"/html/body/div[1]/div[1]/div/div[2]/div/div/div/article/div[1]/ol/li[*]/strong/a/text()\" channel_urls_xpath = \"/html/body/div[1]/div[1]/div/div[2]/div/div/div/article/div[1]/ol/li[*]/strong/a/@href\" chan_names =", "'connection_timeout': None, 'verify_ssl': False, 'suppress_connection_errors': True } driver = webdriver.Firefox(seleniumwire_options=set_seleniumwire_options,", "elif videoStream.stream_info.bandwidth > bestStream.stream_info.bandwidth: bestStream = videoStream if not bestStream:", "channel_urls_xpath = \"/html/body/div[1]/div[1]/div/div[2]/div/div/div/article/div[1]/ol/li[*]/strong/a/@href\" chan_names = tree.xpath(channel_names_xpath) chan_urls = tree.xpath(channel_urls_xpath) return", "driver.get(\"https://ustvgo.tv/\" + chandict[\"callsign\"]) enablePrint() # Get iframe iframe = None", "= open(os.devnull, 'w') # Restore def enablePrint(): sys.stdout = sys.__stdout__", "not found.') return None streamurl = str(playlist) driver.close() driver.quit() self.cached_m3u[chandict[\"callsign\"]]", "def enablePrint(): sys.stdout = sys.__stdout__ class OriginChannels(): def __init__(self, fhdhr,", "json.load(m3ufile) def save_m3u_cache(self): self.fhdhr.logger.info(\"Saving Channel m3u cache.\") with open(self.m3ucache, 'w')", "finally: driver.switch_to.default_content() if need_vpn: self.fhdhr.logger.warning('Channel needs VPN to be grabbed.')", "need_vpn = False finally: driver.switch_to.default_content() if need_vpn: self.fhdhr.logger.warning('Channel needs VPN", "self.get_firefox_driver() blockPrint() driver.get(\"https://ustvgo.tv/\" + chandict[\"callsign\"]) enablePrint() # Get iframe iframe", "'0.0') set_seleniumwire_options = { 'connection_timeout': None, 'verify_ssl': False, 'suppress_connection_errors': True", "Restore def enablePrint(): sys.stdout = sys.__stdout__ class OriginChannels(): def __init__(self,", "False finally: driver.switch_to.default_content() if need_vpn: self.fhdhr.logger.warning('Channel needs VPN to be", "import html import pathlib import json import m3u8 from seleniumwire", "ff_options.add_argument('--headless') firefox_profile = webdriver.FirefoxProfile() firefox_profile.set_preference('permissions.default.image', 2) firefox_profile.set_preference('dom.ipc.plugins.enabled.libflashplayer.so', 'false') firefox_profile.set_preference('dom.disable_beforeunload', True)", "= json.load(m3ufile) def save_m3u_cache(self): self.fhdhr.logger.info(\"Saving Channel m3u cache.\") with open(self.m3ucache,", "chan_names, chan_urls = self.scrape_channels() chan_number_index = 1 for name, url", "caching = True streamlist = [] streamdict = {} if", "videoStream.stream_info.bandwidth > bestStream.stream_info.bandwidth: bestStream = videoStream if not bestStream: return", "} channel_list.append(chan_dict) chan_number_index += 1 return channel_list def get_channel_stream(self, chandict,", "TimeoutException: self.fhdhr.logger.error('Channel m3u8 not found.') return None streamurl = str(playlist)", "driver.close() driver.quit() self.cached_m3u[chandict[\"callsign\"]] = streamurl self.save_m3u_cache() return streamurl def get_firefox_driver(self):", "= False finally: driver.switch_to.default_content() if need_vpn: self.fhdhr.logger.warning('Channel needs VPN to", "> bestStream.stream_info.bandwidth: bestStream = videoStream if not bestStream: return bestStream.absolute_uri", "self.cached_m3u = {} self.load_m3u_cache() def load_m3u_cache(self): if os.path.isfile(self.m3ucache): self.fhdhr.logger.info(\"Loading Previously", "1 for name, url in zip(chan_names, chan_urls): chan_dict = {", "'')) return callsign def get_ustvgo_stream(self, chandict): driver = self.get_firefox_driver() blockPrint()", "return m3u8_url for videoStream in videoUrlM3u.playlists: if not bestStream: bestStream", "{ \"name\": name.rstrip(), \"number\": chan_number_index, \"callsign\": self.format_callsign(url), } channel_list.append(chan_dict) chan_number_index", "enablePrint(): sys.stdout = sys.__stdout__ class OriginChannels(): def __init__(self, fhdhr, origin):", "self.get_ustvgo_stream(chandict) # if self.fhdhr.config.dict[\"origin\"][\"force_best\"]: streamurl = self.m3u8_beststream(streamurl) streamdict = {\"number\":", "self.fhdhr.logger.error('Channel m3u8 not found.') return None streamurl = str(playlist) driver.close()", "bestStream: bestStream = videoStream elif videoStream.stream_info.bandwidth > bestStream.stream_info.bandwidth: bestStream =", "import pathlib import json import m3u8 from seleniumwire import webdriver", "channel_names_xpath = \"/html/body/div[1]/div[1]/div/div[2]/div/div/div/article/div[1]/ol/li[*]/strong/a/text()\" channel_urls_xpath = \"/html/body/div[1]/div[1]/div/div[2]/div/div/div/article/div[1]/ol/li[*]/strong/a/@href\" chan_names = tree.xpath(channel_names_xpath) chan_urls", "None videoUrlM3u = m3u8.load(m3u8_url) if not videoUrlM3u.is_variant: return m3u8_url for", "= self.m3u8_beststream(streamurl) streamdict = {\"number\": chandict[\"number\"], \"stream_url\": streamurl} streamlist.append(streamdict) return", "driver.switch_to.frame(iframe) driver.find_element_by_xpath(\"//*[text()='This channel requires our VPN to watch!']\") need_vpn =", "= { \"name\": name.rstrip(), \"number\": chan_number_index, \"callsign\": self.format_callsign(url), } channel_list.append(chan_dict)", "def load_m3u_cache(self): if os.path.isfile(self.m3ucache): self.fhdhr.logger.info(\"Loading Previously Saved Channel m3u.\") with", "self.fhdhr.logger.info(\"Saving Channel m3u cache.\") with open(self.m3ucache, 'w') as m3ufile: m3ufile.write(json.dumps(self.cached_m3u,", "bestStream.stream_info.bandwidth: bestStream = videoStream if not bestStream: return bestStream.absolute_uri else:", "callsign = (url .split('/')[-2] .replace('-live', '') .replace('-channel', '') .replace('-free', '')", "Channel m3u cache.\") with open(self.m3ucache, 'w') as m3ufile: m3ufile.write(json.dumps(self.cached_m3u, indent=4))", "streamlist = [] streamdict = {} if chandict[\"callsign\"] in list(self.cached_m3u):", "return channel_list def get_channel_stream(self, chandict, allchandict): caching = True streamlist", "import Options as FirefoxOptions IFRAME_CSS_SELECTOR = '.iframe-container>iframe' # Disable def", "to watch!']\") need_vpn = True except NoSuchElementException: need_vpn = False", "not bestStream: return bestStream.absolute_uri else: return m3u8_url def scrape_channels(self): channels_url", "sys from lxml import html import pathlib import json import", "Saved Channel m3u.\") with open(self.m3ucache, 'r') as m3ufile: self.cached_m3u =", "= origin self.cache_dir = self.fhdhr.config.dict[\"filedir\"][\"epg_cache\"][\"origin\"][\"top\"] self.m3ucache = pathlib.Path(self.cache_dir).joinpath('m3ucache.json') self.cached_m3u =", "= '.iframe-container>iframe' # Disable def blockPrint(): sys.stdout = open(os.devnull, 'w')", "blockPrint() driver.get(\"https://ustvgo.tv/\" + chandict[\"callsign\"]) enablePrint() # Get iframe iframe =", "m3ufile.write(json.dumps(self.cached_m3u, indent=4)) def get_channels(self): channel_list = [] chan_names, chan_urls =", "= driver.wait_for_request('/playlist.m3u8', timeout=10) except TimeoutException: self.fhdhr.logger.error('Channel m3u8 not found.') return", "as FirefoxOptions IFRAME_CSS_SELECTOR = '.iframe-container>iframe' # Disable def blockPrint(): sys.stdout", "NoSuchElementException: self.fhdhr.logger.error('Video frame is not found for channel') return None", "'') .replace('-channel', '') .replace('-free', '') .replace('-streaming', '')) return callsign def", "else: return m3u8_url def scrape_channels(self): channels_url = \"https://ustvgo.tv/\" chanpage =", "chan_urls = tree.xpath(channel_urls_xpath) return chan_names, chan_urls def format_callsign(self, url): callsign", "= True except NoSuchElementException: need_vpn = False finally: driver.switch_to.default_content() if", "streamurl} streamlist.append(streamdict) return streamlist, caching def m3u8_beststream(self, m3u8_url): bestStream =", "requires our VPN to watch!']\") need_vpn = True except NoSuchElementException:", "FirefoxOptions IFRAME_CSS_SELECTOR = '.iframe-container>iframe' # Disable def blockPrint(): sys.stdout =", "import os import sys from lxml import html import pathlib", "try: driver.switch_to.frame(iframe) driver.find_element_by_xpath(\"//*[text()='This channel requires our VPN to watch!']\") need_vpn", "driver = self.get_firefox_driver() blockPrint() driver.get(\"https://ustvgo.tv/\" + chandict[\"callsign\"]) enablePrint() # Get", "in list(self.cached_m3u): streamurl = self.cached_m3u[chandict[\"callsign\"]] else: streamurl = self.get_ustvgo_stream(chandict) #", "= (url .split('/')[-2] .replace('-live', '') .replace('-channel', '') .replace('-free', '') .replace('-streaming',", "found.') return None streamurl = str(playlist) driver.close() driver.quit() self.cached_m3u[chandict[\"callsign\"]] =", "chan_names, chan_urls def format_callsign(self, url): callsign = (url .split('/')[-2] .replace('-live',", "self.origin = origin self.cache_dir = self.fhdhr.config.dict[\"filedir\"][\"epg_cache\"][\"origin\"][\"top\"] self.m3ucache = pathlib.Path(self.cache_dir).joinpath('m3ucache.json') self.cached_m3u", "from selenium.webdriver.firefox.options import Options as FirefoxOptions IFRAME_CSS_SELECTOR = '.iframe-container>iframe' #", "= self.get_ustvgo_stream(chandict) # if self.fhdhr.config.dict[\"origin\"][\"force_best\"]: streamurl = self.m3u8_beststream(streamurl) streamdict =", "self.cached_m3u[chandict[\"callsign\"]] = streamurl self.save_m3u_cache() return streamurl def get_firefox_driver(self): ff_options =", "name, url in zip(chan_names, chan_urls): chan_dict = { \"name\": name.rstrip(),", "def save_m3u_cache(self): self.fhdhr.logger.info(\"Saving Channel m3u cache.\") with open(self.m3ucache, 'w') as", "except NoSuchElementException: need_vpn = False finally: driver.switch_to.default_content() if need_vpn: self.fhdhr.logger.warning('Channel", "ff_options = FirefoxOptions() ff_options.add_argument('--headless') firefox_profile = webdriver.FirefoxProfile() firefox_profile.set_preference('permissions.default.image', 2) firefox_profile.set_preference('dom.ipc.plugins.enabled.libflashplayer.so',", "= FirefoxOptions() ff_options.add_argument('--headless') firefox_profile = webdriver.FirefoxProfile() firefox_profile.set_preference('permissions.default.image', 2) firefox_profile.set_preference('dom.ipc.plugins.enabled.libflashplayer.so', 'false')", "channel_list = [] chan_names, chan_urls = self.scrape_channels() chan_number_index = 1", "# if self.fhdhr.config.dict[\"origin\"][\"force_best\"]: streamurl = self.m3u8_beststream(streamurl) streamdict = {\"number\": chandict[\"number\"],", ".replace('-channel', '') .replace('-free', '') .replace('-streaming', '')) return callsign def get_ustvgo_stream(self,", "firefox_profile.set_preference('dom.disable_beforeunload', True) firefox_profile.set_preference('browser.tabs.warnOnClose', False) firefox_profile.set_preference('media.volume_scale', '0.0') set_seleniumwire_options = { 'connection_timeout':", "driver.find_element_by_xpath(\"//*[text()='This channel requires our VPN to watch!']\") need_vpn = True", "self.fhdhr.config.dict[\"origin\"][\"force_best\"]: streamurl = self.m3u8_beststream(streamurl) streamdict = {\"number\": chandict[\"number\"], \"stream_url\": streamurl}", "streamurl def get_firefox_driver(self): ff_options = FirefoxOptions() ff_options.add_argument('--headless') firefox_profile = webdriver.FirefoxProfile()", "= self.fhdhr.web.session.get(channels_url) tree = html.fromstring(chanpage.content) channel_names_xpath = \"/html/body/div[1]/div[1]/div/div[2]/div/div/div/article/div[1]/ol/li[*]/strong/a/text()\" channel_urls_xpath =", "# Autoplay iframe.click() try: playlist = driver.wait_for_request('/playlist.m3u8', timeout=10) except TimeoutException:", "= None try: iframe = driver.find_element_by_css_selector(IFRAME_CSS_SELECTOR) except NoSuchElementException: self.fhdhr.logger.error('Video frame", "from lxml import html import pathlib import json import m3u8", "self.fhdhr.logger.error('Video frame is not found for channel') return None #", "playlist = driver.wait_for_request('/playlist.m3u8', timeout=10) except TimeoutException: self.fhdhr.logger.error('Channel m3u8 not found.')", "'') .replace('-free', '') .replace('-streaming', '')) return callsign def get_ustvgo_stream(self, chandict):", "iframe = driver.find_element_by_css_selector(IFRAME_CSS_SELECTOR) except NoSuchElementException: self.fhdhr.logger.error('Video frame is not found", "m3u8 not found.') return None streamurl = str(playlist) driver.close() driver.quit()", "set_seleniumwire_options = { 'connection_timeout': None, 'verify_ssl': False, 'suppress_connection_errors': True }", "fhdhr self.origin = origin self.cache_dir = self.fhdhr.config.dict[\"filedir\"][\"epg_cache\"][\"origin\"][\"top\"] self.m3ucache = pathlib.Path(self.cache_dir).joinpath('m3ucache.json')", "channels_url = \"https://ustvgo.tv/\" chanpage = self.fhdhr.web.session.get(channels_url) tree = html.fromstring(chanpage.content) channel_names_xpath", "chanpage = self.fhdhr.web.session.get(channels_url) tree = html.fromstring(chanpage.content) channel_names_xpath = \"/html/body/div[1]/div[1]/div/div[2]/div/div/div/article/div[1]/ol/li[*]/strong/a/text()\" channel_urls_xpath", "from seleniumwire import webdriver from selenium.common.exceptions import TimeoutException, NoSuchElementException from", "Options as FirefoxOptions IFRAME_CSS_SELECTOR = '.iframe-container>iframe' # Disable def blockPrint():", "True) firefox_profile.set_preference('browser.tabs.warnOnClose', False) firefox_profile.set_preference('media.volume_scale', '0.0') set_seleniumwire_options = { 'connection_timeout': None,", "videoUrlM3u = m3u8.load(m3u8_url) if not videoUrlM3u.is_variant: return m3u8_url for videoStream", "import sys from lxml import html import pathlib import json", "= \"https://ustvgo.tv/\" chanpage = self.fhdhr.web.session.get(channels_url) tree = html.fromstring(chanpage.content) channel_names_xpath =", "grabbed.') return None # Autoplay iframe.click() try: playlist = driver.wait_for_request('/playlist.m3u8',", "indent=4)) def get_channels(self): channel_list = [] chan_names, chan_urls = self.scrape_channels()", "with open(self.m3ucache, 'w') as m3ufile: m3ufile.write(json.dumps(self.cached_m3u, indent=4)) def get_channels(self): channel_list", "channel_list def get_channel_stream(self, chandict, allchandict): caching = True streamlist =", "class OriginChannels(): def __init__(self, fhdhr, origin): self.fhdhr = fhdhr self.origin", "# Detect VPN-required channels try: driver.switch_to.frame(iframe) driver.find_element_by_xpath(\"//*[text()='This channel requires our", "# Restore def enablePrint(): sys.stdout = sys.__stdout__ class OriginChannels(): def", "[] streamdict = {} if chandict[\"callsign\"] in list(self.cached_m3u): streamurl =", "chandict): driver = self.get_firefox_driver() blockPrint() driver.get(\"https://ustvgo.tv/\" + chandict[\"callsign\"]) enablePrint() #", "streamlist, caching def m3u8_beststream(self, m3u8_url): bestStream = None videoUrlM3u =" ]
[]
[ "= self.image.get_rect() # define rectangle from image size self.rect.center =", "next frame self.rect = self.image.get_rect() # new rectangle self.rect.center =", "all frames self.frame = 0 # no of first frame", "self.rect.center = self.centre # set centre for all frames self.frame", "= self.image.get_rect() # new rectangle self.rect.center = self.centre # set", "Class variables. key: 'sm', 'lg', 'player ''' self.images = images", "= True def update(self, dt): self.time_passed += dt if self.time_passed", "size self.rect.center = self.centre # set centre for all frames", "variables. key: 'sm', 'lg', 'player ''' self.images = images #", "images[key][0] # set to first image in the sequence self.rect", "0.8 seconds self.active = True def update(self, dt): self.time_passed +=", "# define rectangle from image size self.rect.center = self.centre #", "in the sequence self.rect = self.image.get_rect() # define rectangle from", "False # animation finished else: self.image = self.images[self.key][self.frame] # next", "key # key used later self.image = images[key][0] # set", "number if self.frame >= len(self.images[self.key]): # check if end of", "''' self.images = images # list of 8 images self.centre", "increase frame number if self.frame >= len(self.images[self.key]): # check if", "played at 1 frame per 0.1 secs = 0.8 seconds", "new rectangle self.rect.center = self.centre # set centre to parameter", "if self.time_passed >= self.frame_rate: # 0.1 seconds has passed self.time_passed", "-> None: ''' Class variables. key: 'sm', 'lg', 'player '''", "self.frame_rate: # 0.1 seconds has passed self.time_passed = 0 #", "seconds self.active = True def update(self, dt): self.time_passed += dt", "no of first frame self.time_passed = 0 # set timer", "reset timer self.frame += 1 # increase frame number if", "self.image = self.images[self.key][self.frame] # next frame self.rect = self.image.get_rect() #", "shared class Explosion(): def __init__(self, images:list, centre:tuple, key:str) -> None:", "= False # animation finished else: self.image = self.images[self.key][self.frame] #", "1 # increase frame number if self.frame >= len(self.images[self.key]): #", "def __init__(self, images:list, centre:tuple, key:str) -> None: ''' Class variables.", "key:str) -> None: ''' Class variables. key: 'sm', 'lg', 'player", "self.time_passed = 0 # set timer to 0 self.frame_rate =", "self.rect.center = self.centre # set centre to parameter value return", "first frame self.time_passed = 0 # set timer to 0", "set centre for all frames self.frame = 0 # no", "= images # list of 8 images self.centre = centre", "key used later self.image = images[key][0] # set to first", "set centre to parameter value return self.active def draw(self): shared.screen.blit(self.image,", "the sequence self.rect = self.image.get_rect() # define rectangle from image", "0.1 secs = 0.8 seconds self.active = True def update(self,", "# next frame self.rect = self.image.get_rect() # new rectangle self.rect.center", "of first frame self.time_passed = 0 # set timer to", "1 frame per 0.1 secs = 0.8 seconds self.active =", "return self.active def draw(self): shared.screen.blit(self.image, self.rect) # draw current frame", "# set timer to 0 self.frame_rate = 0.1 # 8", "None: ''' Class variables. key: 'sm', 'lg', 'player ''' self.images", "self.centre = centre # use for all frames self.key =", "used later self.image = images[key][0] # set to first image", "dt if self.time_passed >= self.frame_rate: # 0.1 seconds has passed", "# use for all frames self.key = key # key", "rectangle from image size self.rect.center = self.centre # set centre", "set to first image in the sequence self.rect = self.image.get_rect()", "= 0.1 # 8 images played at 1 frame per", "= self.centre # set centre to parameter value return self.active", "parameter value return self.active def draw(self): shared.screen.blit(self.image, self.rect) # draw", "self.time_passed >= self.frame_rate: # 0.1 seconds has passed self.time_passed =", "= 0 # set timer to 0 self.frame_rate = 0.1", "import shared class Explosion(): def __init__(self, images:list, centre:tuple, key:str) ->", "self.rect = self.image.get_rect() # new rectangle self.rect.center = self.centre #", "self.images[self.key][self.frame] # next frame self.rect = self.image.get_rect() # new rectangle", "''' Class variables. key: 'sm', 'lg', 'player ''' self.images =", "# key used later self.image = images[key][0] # set to", "frame self.time_passed = 0 # set timer to 0 self.frame_rate", "sequence self.rect = self.image.get_rect() # define rectangle from image size", "key: 'sm', 'lg', 'player ''' self.images = images # list", "self.centre # set centre to parameter value return self.active def", "first image in the sequence self.rect = self.image.get_rect() # define", "centre:tuple, key:str) -> None: ''' Class variables. key: 'sm', 'lg',", "0.1 # 8 images played at 1 frame per 0.1", ">= len(self.images[self.key]): # check if end of list? self.active =", "frame per 0.1 secs = 0.8 seconds self.active = True", "= 0 # reset timer self.frame += 1 # increase", "= images[key][0] # set to first image in the sequence", "+= 1 # increase frame number if self.frame >= len(self.images[self.key]):", "# set centre to parameter value return self.active def draw(self):", "'sm', 'lg', 'player ''' self.images = images # list of", "# 0.1 seconds has passed self.time_passed = 0 # reset", "self.frame += 1 # increase frame number if self.frame >=", "= self.images[self.key][self.frame] # next frame self.rect = self.image.get_rect() # new", "seconds has passed self.time_passed = 0 # reset timer self.frame", "pygame import shared class Explosion(): def __init__(self, images:list, centre:tuple, key:str)", "self.active = False # animation finished else: self.image = self.images[self.key][self.frame]", "self.frame >= len(self.images[self.key]): # check if end of list? self.active", "def update(self, dt): self.time_passed += dt if self.time_passed >= self.frame_rate:", "check if end of list? self.active = False # animation", "self.time_passed = 0 # reset timer self.frame += 1 #", "8 images played at 1 frame per 0.1 secs =", "8 images self.centre = centre # use for all frames", "self.image.get_rect() # new rectangle self.rect.center = self.centre # set centre", "+= dt if self.time_passed >= self.frame_rate: # 0.1 seconds has", "Explosion(): def __init__(self, images:list, centre:tuple, key:str) -> None: ''' Class", "images:list, centre:tuple, key:str) -> None: ''' Class variables. key: 'sm',", "'player ''' self.images = images # list of 8 images", "animation finished else: self.image = self.images[self.key][self.frame] # next frame self.rect", "= key # key used later self.image = images[key][0] #", "of 8 images self.centre = centre # use for all", "__init__(self, images:list, centre:tuple, key:str) -> None: ''' Class variables. key:", "0 # set timer to 0 self.frame_rate = 0.1 #", "for all frames self.frame = 0 # no of first", "self.time_passed += dt if self.time_passed >= self.frame_rate: # 0.1 seconds", "dt): self.time_passed += dt if self.time_passed >= self.frame_rate: # 0.1", "# animation finished else: self.image = self.images[self.key][self.frame] # next frame", "# increase frame number if self.frame >= len(self.images[self.key]): # check", "self.images = images # list of 8 images self.centre =", "True def update(self, dt): self.time_passed += dt if self.time_passed >=", "frame number if self.frame >= len(self.images[self.key]): # check if end", "finished else: self.image = self.images[self.key][self.frame] # next frame self.rect =", "self.key = key # key used later self.image = images[key][0]", "of list? self.active = False # animation finished else: self.image", "passed self.time_passed = 0 # reset timer self.frame += 1", "# new rectangle self.rect.center = self.centre # set centre to", "to first image in the sequence self.rect = self.image.get_rect() #", "'lg', 'player ''' self.images = images # list of 8", "frames self.key = key # key used later self.image =", "else: self.image = self.images[self.key][self.frame] # next frame self.rect = self.image.get_rect()", "images self.centre = centre # use for all frames self.key", "use for all frames self.key = key # key used", "from image size self.rect.center = self.centre # set centre for", "centre # use for all frames self.key = key #", "# set to first image in the sequence self.rect =", "self.rect = self.image.get_rect() # define rectangle from image size self.rect.center", "set timer to 0 self.frame_rate = 0.1 # 8 images", "to 0 self.frame_rate = 0.1 # 8 images played at", "frame self.rect = self.image.get_rect() # new rectangle self.rect.center = self.centre", "class Explosion(): def __init__(self, images:list, centre:tuple, key:str) -> None: '''", "<gh_stars>1-10 import pygame import shared class Explosion(): def __init__(self, images:list,", "= centre # use for all frames self.key = key", "if end of list? self.active = False # animation finished", "images # list of 8 images self.centre = centre #", "= self.centre # set centre for all frames self.frame =", "later self.image = images[key][0] # set to first image in", "value return self.active def draw(self): shared.screen.blit(self.image, self.rect) # draw current", "# list of 8 images self.centre = centre # use", ">= self.frame_rate: # 0.1 seconds has passed self.time_passed = 0", "to parameter value return self.active def draw(self): shared.screen.blit(self.image, self.rect) #", "centre for all frames self.frame = 0 # no of", "= 0.8 seconds self.active = True def update(self, dt): self.time_passed", "self.image.get_rect() # define rectangle from image size self.rect.center = self.centre", "frames self.frame = 0 # no of first frame self.time_passed", "self.active = True def update(self, dt): self.time_passed += dt if", "all frames self.key = key # key used later self.image", "0 # reset timer self.frame += 1 # increase frame", "for all frames self.key = key # key used later", "if self.frame >= len(self.images[self.key]): # check if end of list?", "list? self.active = False # animation finished else: self.image =", "# check if end of list? self.active = False #", "# 8 images played at 1 frame per 0.1 secs", "secs = 0.8 seconds self.active = True def update(self, dt):", "define rectangle from image size self.rect.center = self.centre # set", "0 # no of first frame self.time_passed = 0 #", "images played at 1 frame per 0.1 secs = 0.8", "image in the sequence self.rect = self.image.get_rect() # define rectangle", "end of list? self.active = False # animation finished else:", "len(self.images[self.key]): # check if end of list? self.active = False", "has passed self.time_passed = 0 # reset timer self.frame +=", "self.frame_rate = 0.1 # 8 images played at 1 frame", "at 1 frame per 0.1 secs = 0.8 seconds self.active", "0 self.frame_rate = 0.1 # 8 images played at 1", "per 0.1 secs = 0.8 seconds self.active = True def", "image size self.rect.center = self.centre # set centre for all", "self.frame = 0 # no of first frame self.time_passed =", "# reset timer self.frame += 1 # increase frame number", "# set centre for all frames self.frame = 0 #", "self.image = images[key][0] # set to first image in the", "= 0 # no of first frame self.time_passed = 0", "self.centre # set centre for all frames self.frame = 0", "timer self.frame += 1 # increase frame number if self.frame", "timer to 0 self.frame_rate = 0.1 # 8 images played", "# no of first frame self.time_passed = 0 # set", "centre to parameter value return self.active def draw(self): shared.screen.blit(self.image, self.rect)", "rectangle self.rect.center = self.centre # set centre to parameter value", "update(self, dt): self.time_passed += dt if self.time_passed >= self.frame_rate: #", "import pygame import shared class Explosion(): def __init__(self, images:list, centre:tuple,", "0.1 seconds has passed self.time_passed = 0 # reset timer", "list of 8 images self.centre = centre # use for" ]
[ "# Sleep till time to wake up while True: prev", "while (current_time()-start < INTERVAL) : wait() break callback() \"\"\" def", "We start the timer thread here Args: - dfk (DataFlowKernel)", "= get_events_since(start) if count >= THRESHOLD : break callback() This", "self.cb_args = args self.callback = self.strategize self._handle = None self._event_count", "This is based on the following logic : .. code-block::", ": count = get_events_since(start) if count >= THRESHOLD : break", "is called everytime the threshold or the interval is hit", "resets the timer. \"\"\" self._wake_up_time = time.time() + self.interval self.callback(*self.cb_args)", "timer. This timer does not employ notify events. This is", "on the sites available as well asqeuque TODO: When the", "strategize(self, *args, **kwargs): \"\"\" Strategize is called everytime the threshold", "for systems with infrequent events as well as systems which", "a bit more\") def notify(self, event_id): \"\"\"Let the FlowControl system", "self.interchange = interchange if hasattr(interchange.config, 'provider'): logger.debug(\"Strategy bounds-> init:{}, min:{},", "control. The overall goal is to trap the flow of", "from a duplicate logger being added by the thread. \"\"\"", "max:{}\".format( interchange.config.provider.init_blocks, interchange.config.provider.min_blocks, interchange.config.provider.max_blocks)) self._thread.start() def strategize(self, *args, **kwargs): \"\"\"", "event_id): \"\"\"Let the FlowControl system know that there is an", "the function that the thread will execute. waits on an", "used to pass information on what triggered the callback \"\"\"", "the parent thread time_to_die = kill_event.wait(float(max(prev - time.time(), 0))) if", "after which timer expires \"\"\" self.interval = interval self.cb_args =", "on an event so that the thread can make a", "duplicate logger being added by the thread. \"\"\" def __init__(self,", "self._thread.daemon = True self._thread.start() def _wake_up_timer(self, kill_event): \"\"\"Internal. This is", "= callback self._wake_up_time = time.time() + 1 self._kill_event = threading.Event()", "systems with infrequent events as well as systems which would", "after which the callback is triggered - interval (int) :", "Event to wait on \"\"\" # Sleep till time to", ">= threshold\") self.make_callback(kind=\"event\") def make_callback(self, kind=None): \"\"\"Makes the callback and", "threading.Event() self._thread = threading.Thread(target=self._wake_up_timer, args=(self._kill_event,)) self._thread.daemon = True def start(self,", "INTERVAL) : count = get_events_since(start) if count >= THRESHOLD :", "a duplicate logger being added by the thread. \"\"\" def", "code-block:: none BEGIN (INTERVAL, THRESHOLD, callback) : start = current_time()", "so that the thread can make a quick exit when", "track parsl progress KWargs: - threshold (int) : Tasks after", "- kill_event (threading.Event) : Event to wait on \"\"\" #", "and terminate.\"\"\" self._kill_event.set() self._thread.join() class Timer(object): \"\"\"This timer is a", "so this could be from a duplicate logger being added", "workflow, measure it and redirect it the appropriate executors for", "FlowControl timer. This timer does not employ notify events. This", "to wait on \"\"\" while True: prev = self._wake_up_time #", "self._wake_up_time: self.make_callback(kind='timer') else: print(\"Sleeping a bit more\") def make_callback(self, kind=None):", "---------- interchange: funcx.executors.high_throughput.interchange.Interchange Interchange to bind the strategy to \"\"\"", "- interval (int) : seconds after which timer expires \"\"\"", "is based on the following logic : .. code-block:: none", "wait on \"\"\" while True: prev = self._wake_up_time # Waiting", "that the thread can make a quick exit when close()", "or the interval is hit \"\"\" logger.debug(\"Strategize called with {}", "threshold\") self.make_callback(kind=\"event\") def make_callback(self, kind=None): \"\"\"Makes the callback and resets", "strategy to \"\"\" self.interchange = interchange if hasattr(interchange.config, 'provider'): logger.debug(\"Strategy", "large bursts of events. Once a callback is triggered, the", "self._event_buffer = [] self._wake_up_time = time.time() + 1 self._kill_event =", "interchange if hasattr(interchange.config, 'provider'): logger.debug(\"Strategy bounds-> init:{}, min:{}, max:{}\".format( interchange.config.provider.init_blocks,", ": Tasks after which the callback is triggered - interval", "simplified version of the FlowControl timer. This timer does not", "events. This is based on the following logic : ..", "on \"\"\" # Sleep till time to wake up while", "of `interval` for systems with infrequent events as well as", "prev == self._wake_up_time: self.make_callback(kind='timer') else: print(\"Sleeping a bit more\") def", "KWargs: - threshold (int) : Tasks after which the callback", "up while True: prev = self._wake_up_time # Waiting for the", "based flow control. The overall goal is to trap the", "on the following logic: .. code-block:: none BEGIN (INTERVAL, THRESHOLD,", "import logging import time logger = logging.getLogger(\"interchange.strategy.base\") class BaseStrategy(object): \"\"\"Implements", "# Waiting for the event returns True only when the", "make_callback(self, kind=None): \"\"\"Makes the callback and resets the timer. KWargs:", "the callback is triggered - interval (int) : seconds after", "the Interchange to notify the flowcontrol \"\"\" self._event_buffer.extend([event_id]) self._event_count +=", "else: print(\"Sleeping a bit more\") def notify(self, event_id): \"\"\"Let the", "if prev == self._wake_up_time: self.make_callback(kind='timer') else: print(\"Sleeping a bit more\")", "logger.debug(\"Strategy bounds-> init:{}, min:{}, max:{}\".format( interchange.config.provider.init_blocks, interchange.config.provider.min_blocks, interchange.config.provider.max_blocks)) self._thread.start() def", "interchange.config.provider.min_blocks, interchange.config.provider.max_blocks)) self._thread.start() def strategize(self, *args, **kwargs): \"\"\" Strategize is", "the callback and resets the timer. \"\"\" self._wake_up_time = time.time()", "1 self._kill_event = threading.Event() self._thread = threading.Thread(target=self._wake_up_timer, args=(self._kill_event,)) self._thread.daemon =", "`interval` for systems with infrequent events as well as systems", "the event # is set, usually by the parent thread", "self._thread.join() class Timer(object): \"\"\"This timer is a simplified version of", "does not employ notify events. This is based on the", "while True: prev = self._wake_up_time # Waiting for the event", "which timer expires \"\"\" self.interval = interval self.cb_args = args", "min:{}, max:{}\".format( interchange.config.provider.init_blocks, interchange.config.provider.min_blocks, interchange.config.provider.max_blocks)) self._thread.start() def strategize(self, *args, **kwargs):", "logging import time logger = logging.getLogger(\"interchange.strategy.base\") class BaseStrategy(object): \"\"\"Implements threshold-interval", "- threshold (int) : Tasks after which the callback is", "is a simplified version of the FlowControl timer. This timer", "callbacks are activated with a maximum delay of `interval` for", "progress KWargs: - threshold (int) : Tasks after which the", "of apps from the workflow, measure it and redirect it", "= time.time() + self.interval self.callback(tasks=self._event_buffer, kind=kind) self._event_buffer = [] def", "self._thread = threading.Thread(target=self._wake_up_timer, args=(self._kill_event,)) self._thread.daemon = True def start(self, interchange):", "Interchange to bind the strategy to \"\"\" self.interchange = interchange", "which timer expires \"\"\" self.interchange = None self.threshold = threshold", "= interchange if hasattr(interchange.config, 'provider'): logger.debug(\"Strategy bounds-> init:{}, min:{}, max:{}\".format(", "available as well asqeuque TODO: When the debug logs are", "is triggered - interval (int) : seconds after which timer", "def start(self, interchange): \"\"\"Actually start the strategy Parameters ---------- interchange:", "time logger = logging.getLogger(\"interchange.strategy.base\") class BaseStrategy(object): \"\"\"Implements threshold-interval based flow", "to pass information on what triggered the callback \"\"\" self._wake_up_time", "callback) : start = current_time() while (current_time()-start < INTERVAL) :", "self.interval self.callback(*self.cb_args) def close(self): \"\"\"Merge the threads and terminate. \"\"\"", "interchange: funcx.executors.high_throughput.interchange.Interchange Interchange to bind the strategy to \"\"\" self.interchange", "with infrequent events as well as systems which would generate", "thread. \"\"\" def __init__(self, *args, threshold=20, interval=5): \"\"\"Initialize the flowcontrol", "the following logic : .. code-block:: none BEGIN (INTERVAL, THRESHOLD,", "that the duplicate messages are present only when the timer", "self._handle = None self._event_count = 0 self._event_buffer = [] self._wake_up_time", "activated with a maximum delay of `interval` for systems with", "employ notify events. This is based on the following logic", "True def start(self, interchange): \"\"\"Actually start the strategy Parameters ----------", "I've learnt so far is that the duplicate messages are", "self._thread = threading.Thread(target=self._wake_up_timer, args=(self._kill_event,)) self._thread.daemon = True self._thread.start() def _wake_up_timer(self,", "threads and terminate.\"\"\" self._kill_event.set() self._thread.join() class Timer(object): \"\"\"This timer is", "(threading.Event) : Event to wait on \"\"\" # Sleep till", "timer thread here Args: - dfk (DataFlowKernel) : DFK object", "DFK object to track parsl progress KWargs: - threshold (int)", "This method is to be called from the Interchange to", "thread is started, so this could be from a duplicate", "and resets the timer. \"\"\" self._wake_up_time = time.time() + self.interval", "wake up while True: prev = self._wake_up_time # Waiting for", "overall goal is to trap the flow of apps from", "break callback() \"\"\" def __init__(self, callback, *args, interval=5): \"\"\"Initialize the", "till time to wake up while True: prev = self._wake_up_time", "\"\"\" logger.debug(\"Strategize called with {} {}\".format(args, kwargs)) def _wake_up_timer(self, kill_event):", "notify events. This is based on the following logic :", "will execute. waits on an event so that the thread", "\"\"\"Let the FlowControl system know that there is an event.", "Interchange to notify the flowcontrol \"\"\" self._event_buffer.extend([event_id]) self._event_count += 1", "self._event_buffer.extend([event_id]) self._event_count += 1 if self._event_count >= self.threshold: logger.debug(\"Eventcount >=", "= interval self.cb_args = args self.callback = self.strategize self._handle =", ">= self.threshold: logger.debug(\"Eventcount >= threshold\") self.make_callback(kind=\"event\") def make_callback(self, kind=None): \"\"\"Makes", "Tasks after which the callback is triggered - interval (int)", "the timer. KWargs: - kind (str): Default=None, used to pass", "terminate.\"\"\" self._kill_event.set() self._thread.join() class Timer(object): \"\"\"This timer is a simplified", "current_time() while (current_time()-start < INTERVAL) : count = get_events_since(start) if", ": seconds after which timer expires \"\"\" self.interval = interval", "events. Once a callback is triggered, the callback generally runs", "wait on \"\"\" # Sleep till time to wake up", "event. This method is to be called from the Interchange", "strategy Parameters ---------- interchange: funcx.executors.high_throughput.interchange.Interchange Interchange to bind the strategy", "prev = self._wake_up_time # Waiting for the event returns True", "that the callbacks are activated with a maximum delay of", "kind=kind) self._event_buffer = [] def close(self): \"\"\"Merge the threads and", "None self._event_count = 0 self._event_buffer = [] self._wake_up_time = time.time()", "that the thread will execute. waits on an event so", "notify(self, event_id): \"\"\"Let the FlowControl system know that there is", "interval self.cb_args = args self.callback = callback self._wake_up_time = time.time()", "callback and resets the timer. \"\"\" self._wake_up_time = time.time() +", "\"\"\"Implements threshold-interval based flow control. The overall goal is to", "interchange.config.provider.max_blocks)) self._thread.start() def strategize(self, *args, **kwargs): \"\"\" Strategize is called", "a maximum delay of `interval` for systems with infrequent events", ": Event to wait on \"\"\" while True: prev =", "self.make_callback(kind='timer') else: print(\"Sleeping a bit more\") def make_callback(self, kind=None): \"\"\"Makes", "is triggered, the callback generally runs a strategy method on", "a bit more\") def make_callback(self, kind=None): \"\"\"Makes the callback and", "this module emits duplicate messages. This issue needs more debugging.", "interval (int) : seconds after which timer expires \"\"\" self.interchange", "self.threshold: logger.debug(\"Eventcount >= threshold\") self.make_callback(kind=\"event\") def make_callback(self, kind=None): \"\"\"Makes the", "flow control. The overall goal is to trap the flow", "kind (str): Default=None, used to pass information on what triggered", "the timer thread here Parameters ---------- - threshold (int) :", "event returns True only when the event # is set,", "pass information on what triggered the callback \"\"\" self._wake_up_time =", "self.cb_args = args self.callback = callback self._wake_up_time = time.time() +", "import threading import logging import time logger = logging.getLogger(\"interchange.strategy.base\") class", "information on what triggered the callback \"\"\" self._wake_up_time = time.time()", "threshold-interval based flow control. The overall goal is to trap", "logic : .. code-block:: none BEGIN (INTERVAL, THRESHOLD, callback) :", "the appropriate executors for processing. This is based on the", "\"\"\"Actually start the strategy Parameters ---------- interchange: funcx.executors.high_throughput.interchange.Interchange Interchange to", "are present only when the timer thread is started, so", "self._wake_up_time = time.time() + self.interval self.callback(tasks=self._event_buffer, kind=kind) self._event_buffer = []", "the timer thread is started, so this could be from", "is hit \"\"\" logger.debug(\"Strategize called with {} {}\".format(args, kwargs)) def", "a strategy method on the sites available as well asqeuque", "logger.debug(\"Strategize called with {} {}\".format(args, kwargs)) def _wake_up_timer(self, kill_event): \"\"\"Internal.", "module emits duplicate messages. This issue needs more debugging. What", "== self._wake_up_time: self.make_callback(kind='timer') else: print(\"Sleeping a bit more\") def notify(self,", ": start = current_time() while (current_time()-start < INTERVAL) : count", "get_events_since(start) if count >= THRESHOLD : break callback() This logic", "self._event_buffer = [] def close(self): \"\"\"Merge the threads and terminate.\"\"\"", "interval is hit \"\"\" logger.debug(\"Strategize called with {} {}\".format(args, kwargs))", "it the appropriate executors for processing. This is based on", "threading import logging import time logger = logging.getLogger(\"interchange.strategy.base\") class BaseStrategy(object):", "messages. This issue needs more debugging. What I've learnt so", "callback and resets the timer. KWargs: - kind (str): Default=None,", "= logging.getLogger(\"interchange.strategy.base\") class BaseStrategy(object): \"\"\"Implements threshold-interval based flow control. The", "0 self._event_buffer = [] self._wake_up_time = time.time() + 1 self._kill_event", "is set, usually by the parent thread time_to_die = kill_event.wait(float(max(prev", "and redirect it the appropriate executors for processing. This is", "while (current_time()-start < INTERVAL) : count = get_events_since(start) if count", "= threading.Thread(target=self._wake_up_timer, args=(self._kill_event,)) self._thread.daemon = True self._thread.start() def _wake_up_timer(self, kill_event):", "{} {}\".format(args, kwargs)) def _wake_up_timer(self, kill_event): \"\"\"Internal. This is the", "_wake_up_timer(self, kill_event): \"\"\"Internal. This is the function that the thread", "enabled this module emits duplicate messages. This issue needs more", "messages are present only when the timer thread is started,", "the FlowControl timer. This timer does not employ notify events.", "debugging. What I've learnt so far is that the duplicate", "an event so that the thread can make a quick", "bind the strategy to \"\"\" self.interchange = interchange if hasattr(interchange.config,", "self._thread.daemon = True def start(self, interchange): \"\"\"Actually start the strategy", "= threshold self.interval = interval self.cb_args = args self.callback =", "This issue needs more debugging. What I've learnt so far", "interval self.cb_args = args self.callback = self.strategize self._handle = None", "infrequent events as well as systems which would generate large", "start = current_time() while (current_time()-start < INTERVAL) : count =", "systems which would generate large bursts of events. Once a", "= [] def close(self): \"\"\"Merge the threads and terminate.\"\"\" self._kill_event.set()", "def close(self): \"\"\"Merge the threads and terminate. \"\"\" self._kill_event.set() self._thread.join()", "This is the function that the thread will execute. waits", "Timer(object): \"\"\"This timer is a simplified version of the FlowControl", "\"\"\"Makes the callback and resets the timer. KWargs: - kind", "sys import threading import logging import time logger = logging.getLogger(\"interchange.strategy.base\")", "object to track parsl progress KWargs: - threshold (int) :", "returns True only when the event # is set, usually", "threshold=20, interval=5): \"\"\"Initialize the flowcontrol object. We start the timer", "*args, interval=5): \"\"\"Initialize the flowcontrol object We start the timer", "bit more\") def make_callback(self, kind=None): \"\"\"Makes the callback and resets", "0))) if time_to_die: return if prev == self._wake_up_time: self.make_callback(kind='timer') else:", "parsl progress KWargs: - threshold (int) : Tasks after which", "\"\"\"Makes the callback and resets the timer. \"\"\" self._wake_up_time =", "a callback is triggered, the callback generally runs a strategy", "\"\"\" self._wake_up_time = time.time() + self.interval self.callback(*self.cb_args) def close(self): \"\"\"Merge", "__init__(self, callback, *args, interval=5): \"\"\"Initialize the flowcontrol object We start", "threading.Event() self._thread = threading.Thread(target=self._wake_up_timer, args=(self._kill_event,)) self._thread.daemon = True self._thread.start() def", "goal is to trap the flow of apps from the", "are activated with a maximum delay of `interval` for systems", "here Args: - dfk (DataFlowKernel) : DFK object to track", "True only when the event # is set, usually by", "self.strategize self._handle = None self._event_count = 0 self._event_buffer = []", "based on the following logic: .. code-block:: none BEGIN (INTERVAL,", "self._thread.start() def strategize(self, *args, **kwargs): \"\"\" Strategize is called everytime", "so far is that the duplicate messages are present only", "= args self.callback = callback self._wake_up_time = time.time() + 1", "more\") def make_callback(self, kind=None): \"\"\"Makes the callback and resets the", "kill_event.wait(float(max(prev - time.time(), 0))) if time_to_die: return if prev ==", "= True def start(self, interchange): \"\"\"Actually start the strategy Parameters", "on the following logic : .. code-block:: none BEGIN (INTERVAL,", "debug logs are enabled this module emits duplicate messages. This", "This is based on the following logic: .. code-block:: none", "True: prev = self._wake_up_time # Waiting for the event returns", "start the strategy Parameters ---------- interchange: funcx.executors.high_throughput.interchange.Interchange Interchange to bind", "based on the following logic : .. code-block:: none BEGIN", "BaseStrategy(object): \"\"\"Implements threshold-interval based flow control. The overall goal is", "called with {} {}\".format(args, kwargs)) def _wake_up_timer(self, kill_event): \"\"\"Internal. This", "(current_time()-start < INTERVAL) : wait() break callback() \"\"\" def __init__(self,", "by the thread. \"\"\" def __init__(self, *args, threshold=20, interval=5): \"\"\"Initialize", "after which timer expires \"\"\" self.interchange = None self.threshold =", "\"\"\" def __init__(self, callback, *args, interval=5): \"\"\"Initialize the flowcontrol object", "to bind the strategy to \"\"\" self.interchange = interchange if", "\"\"\" Strategize is called everytime the threshold or the interval", "time_to_die: return if prev == self._wake_up_time: self.make_callback(kind='timer') else: print(\"Sleeping a", "Default=None, used to pass information on what triggered the callback", "The overall goal is to trap the flow of apps", "+ self.interval self.callback(*self.cb_args) def close(self): \"\"\"Merge the threads and terminate.", "generally runs a strategy method on the sites available as", "callback is triggered, the callback generally runs a strategy method", "callback self._wake_up_time = time.time() + 1 self._kill_event = threading.Event() self._thread", "= threading.Event() self._thread = threading.Thread(target=self._wake_up_timer, args=(self._kill_event,)) self._thread.daemon = True def", "kwargs)) def _wake_up_timer(self, kill_event): \"\"\"Internal. This is the function that", "the timer. \"\"\" self._wake_up_time = time.time() + self.interval self.callback(*self.cb_args) def", "is an event. This method is to be called from", ">= THRESHOLD : break callback() This logic ensures that the", "flowcontrol object. We start the timer thread here Parameters ----------", "def _wake_up_timer(self, kill_event): \"\"\"Internal. This is the function that the", "function that the thread will execute. waits on an event", "learnt so far is that the duplicate messages are present", "== self._wake_up_time: self.make_callback(kind='timer') else: print(\"Sleeping a bit more\") def make_callback(self,", "time_to_die = kill_event.wait(float(max(prev - time.time(), 0))) if time_to_die: return if", "= threading.Thread(target=self._wake_up_timer, args=(self._kill_event,)) self._thread.daemon = True def start(self, interchange): \"\"\"Actually", "kill_event): \"\"\"Internal. This is the function that the thread will", "the strategy to \"\"\" self.interchange = interchange if hasattr(interchange.config, 'provider'):", "seconds after which timer expires \"\"\" self.interchange = None self.threshold", "+ 1 self._kill_event = threading.Event() self._thread = threading.Thread(target=self._wake_up_timer, args=(self._kill_event,)) self._thread.daemon", "close(self): \"\"\"Merge the threads and terminate.\"\"\" self._kill_event.set() self._thread.join() class Timer(object):", "callback() \"\"\" def __init__(self, callback, *args, interval=5): \"\"\"Initialize the flowcontrol", "none BEGIN (INTERVAL, THRESHOLD, callback) : start = current_time() while", "callback generally runs a strategy method on the sites available", "THRESHOLD : break callback() This logic ensures that the callbacks", "break callback() This logic ensures that the callbacks are activated", "+= 1 if self._event_count >= self.threshold: logger.debug(\"Eventcount >= threshold\") self.make_callback(kind=\"event\")", "the threshold or the interval is hit \"\"\" logger.debug(\"Strategize called", "threshold (int) : Tasks after which the callback is triggered", "that there is an event. This method is to be", "Args: - dfk (DataFlowKernel) : DFK object to track parsl", "for the event returns True only when the event #", "Parameters ---------- interchange: funcx.executors.high_throughput.interchange.Interchange Interchange to bind the strategy to", "to notify the flowcontrol \"\"\" self._event_buffer.extend([event_id]) self._event_count += 1 if", "apps from the workflow, measure it and redirect it the", "self.interval = interval self.cb_args = args self.callback = callback self._wake_up_time", "timer thread is started, so this could be from a", "waits on an event so that the thread can make", "the timer thread here Args: - dfk (DataFlowKernel) : DFK", "threading.Thread(target=self._wake_up_timer, args=(self._kill_event,)) self._thread.daemon = True self._thread.start() def _wake_up_timer(self, kill_event): \"\"\"Internal.", "interchange.config.provider.init_blocks, interchange.config.provider.min_blocks, interchange.config.provider.max_blocks)) self._thread.start() def strategize(self, *args, **kwargs): \"\"\" Strategize", "when close() is called Args: - kill_event (threading.Event) : Event", "bounds-> init:{}, min:{}, max:{}\".format( interchange.config.provider.init_blocks, interchange.config.provider.min_blocks, interchange.config.provider.max_blocks)) self._thread.start() def strategize(self,", "logger = logging.getLogger(\"interchange.strategy.base\") class BaseStrategy(object): \"\"\"Implements threshold-interval based flow control.", "logic: .. code-block:: none BEGIN (INTERVAL, THRESHOLD, callback) : start", "hasattr(interchange.config, 'provider'): logger.debug(\"Strategy bounds-> init:{}, min:{}, max:{}\".format( interchange.config.provider.init_blocks, interchange.config.provider.min_blocks, interchange.config.provider.max_blocks))", "# is set, usually by the parent thread time_to_die =", "there is an event. This method is to be called", "= threading.Event() self._thread = threading.Thread(target=self._wake_up_timer, args=(self._kill_event,)) self._thread.daemon = True self._thread.start()", "system know that there is an event. This method is", "only when the timer thread is started, so this could", "measure it and redirect it the appropriate executors for processing.", "is based on the following logic: .. code-block:: none BEGIN", "method on the sites available as well asqeuque TODO: When", "We start the timer thread here Parameters ---------- - threshold", "def close(self): \"\"\"Merge the threads and terminate.\"\"\" self._kill_event.set() self._thread.join() class", "well asqeuque TODO: When the debug logs are enabled this", "self._wake_up_time # Waiting for the event returns True only when", "a quick exit when close() is called Args: - kill_event", "method is to be called from the Interchange to notify", "self.make_callback(kind=\"event\") def make_callback(self, kind=None): \"\"\"Makes the callback and resets the", "self.interval = interval self.cb_args = args self.callback = self.strategize self._handle", "kind=None): \"\"\"Makes the callback and resets the timer. KWargs: -", "def notify(self, event_id): \"\"\"Let the FlowControl system know that there", "executors for processing. This is based on the following logic:", "time.time() + 1 self._kill_event = threading.Event() self._thread = threading.Thread(target=self._wake_up_timer, args=(self._kill_event,))", "emits duplicate messages. This issue needs more debugging. What I've", ": .. code-block:: none BEGIN (INTERVAL, THRESHOLD, callback) : start", "< INTERVAL) : wait() break callback() \"\"\" def __init__(self, callback,", "if hasattr(interchange.config, 'provider'): logger.debug(\"Strategy bounds-> init:{}, min:{}, max:{}\".format( interchange.config.provider.init_blocks, interchange.config.provider.min_blocks,", "called everytime the threshold or the interval is hit \"\"\"", "return if prev == self._wake_up_time: self.make_callback(kind='timer') else: print(\"Sleeping a bit", "strategy method on the sites available as well asqeuque TODO:", "def __init__(self, *args, threshold=20, interval=5): \"\"\"Initialize the flowcontrol object. We", "time to wake up while True: prev = self._wake_up_time #", "= self._wake_up_time # Waiting for the event returns True only", "maximum delay of `interval` for systems with infrequent events as", "- kind (str): Default=None, used to pass information on what", "threshold self.interval = interval self.cb_args = args self.callback = self.strategize", "\"\"\" def __init__(self, *args, threshold=20, interval=5): \"\"\"Initialize the flowcontrol object.", "**kwargs): \"\"\" Strategize is called everytime the threshold or the", "self._event_count >= self.threshold: logger.debug(\"Eventcount >= threshold\") self.make_callback(kind=\"event\") def make_callback(self, kind=None):", "present only when the timer thread is started, so this", "FlowControl system know that there is an event. This method", "= time.time() + self.interval self.callback(*self.cb_args) def close(self): \"\"\"Merge the threads", "as well asqeuque TODO: When the debug logs are enabled", "threshold or the interval is hit \"\"\" logger.debug(\"Strategize called with", "hit \"\"\" logger.debug(\"Strategize called with {} {}\".format(args, kwargs)) def _wake_up_timer(self,", "thread time_to_die = kill_event.wait(float(max(prev - time.time(), 0))) if time_to_die: return", "import sys import threading import logging import time logger =", "duplicate messages. This issue needs more debugging. What I've learnt", "(INTERVAL, THRESHOLD, callback) : start = current_time() while (current_time()-start <", "\"\"\"This timer is a simplified version of the FlowControl timer.", "more debugging. What I've learnt so far is that the", "is called Args: - kill_event (threading.Event) : Event to wait", "triggered, the callback generally runs a strategy method on the", "self._thread.start() def _wake_up_timer(self, kill_event): \"\"\"Internal. This is the function that", "What I've learnt so far is that the duplicate messages", "exit when close() is called Args: - kill_event (threading.Event) :", "callback, *args, interval=5): \"\"\"Initialize the flowcontrol object We start the", "time.time() + self.interval self.callback(*self.cb_args) def close(self): \"\"\"Merge the threads and", "processing. This is based on the following logic: .. code-block::", "= None self._event_count = 0 self._event_buffer = [] self._wake_up_time =", "to wait on \"\"\" # Sleep till time to wake", "the callbacks are activated with a maximum delay of `interval`", "make a quick exit when close() is called Args: -", "logs are enabled this module emits duplicate messages. This issue", "execute. waits on an event so that the thread can", "(int) : Tasks after which the callback is triggered -", "BEGIN (INTERVAL, THRESHOLD, callback) : start = current_time() while (current_time()-start", "= kill_event.wait(float(max(prev - time.time(), 0))) if time_to_die: return if prev", "resets the timer. KWargs: - kind (str): Default=None, used to", "callback() This logic ensures that the callbacks are activated with", "event # is set, usually by the parent thread time_to_die", "self._wake_up_time = time.time() + self.interval self.callback(*self.cb_args) def close(self): \"\"\"Merge the", "self.interchange = None self.threshold = threshold self.interval = interval self.cb_args", "args self.callback = callback self._wake_up_time = time.time() + 1 self._kill_event", "__init__(self, *args, threshold=20, interval=5): \"\"\"Initialize the flowcontrol object. We start", "if time_to_die: return if prev == self._wake_up_time: self.make_callback(kind='timer') else: print(\"Sleeping", "thread here Args: - dfk (DataFlowKernel) : DFK object to", "Strategize is called everytime the threshold or the interval is", "is to be called from the Interchange to notify the", "more\") def notify(self, event_id): \"\"\"Let the FlowControl system know that", "make_callback(self, kind=None): \"\"\"Makes the callback and resets the timer. \"\"\"", "quick exit when close() is called Args: - kill_event (threading.Event)", "parent thread time_to_die = kill_event.wait(float(max(prev - time.time(), 0))) if time_to_die:", "start(self, interchange): \"\"\"Actually start the strategy Parameters ---------- interchange: funcx.executors.high_throughput.interchange.Interchange", "logger being added by the thread. \"\"\" def __init__(self, *args,", "from the workflow, measure it and redirect it the appropriate", "\"\"\" self.interchange = None self.threshold = threshold self.interval = interval", "def make_callback(self, kind=None): \"\"\"Makes the callback and resets the timer.", "[] def close(self): \"\"\"Merge the threads and terminate.\"\"\" self._kill_event.set() self._thread.join()", "kind=None): \"\"\"Makes the callback and resets the timer. \"\"\" self._wake_up_time", "timer. KWargs: - kind (str): Default=None, used to pass information", "and resets the timer. KWargs: - kind (str): Default=None, used", "(DataFlowKernel) : DFK object to track parsl progress KWargs: -", "Once a callback is triggered, the callback generally runs a", "= args self.callback = self.strategize self._handle = None self._event_count =", "thread will execute. waits on an event so that the", "which the callback is triggered - interval (int) : seconds", "started, so this could be from a duplicate logger being", "interval (int) : seconds after which timer expires \"\"\" self.interval", "print(\"Sleeping a bit more\") def make_callback(self, kind=None): \"\"\"Makes the callback", "Parameters ---------- - threshold (int) : Tasks after which the", "by the parent thread time_to_die = kill_event.wait(float(max(prev - time.time(), 0)))", "bit more\") def notify(self, event_id): \"\"\"Let the FlowControl system know", "callback \"\"\" self._wake_up_time = time.time() + self.interval self.callback(tasks=self._event_buffer, kind=kind) self._event_buffer", "< INTERVAL) : count = get_events_since(start) if count >= THRESHOLD", "self.callback = self.strategize self._handle = None self._event_count = 0 self._event_buffer", "count >= THRESHOLD : break callback() This logic ensures that", "self._event_count += 1 if self._event_count >= self.threshold: logger.debug(\"Eventcount >= threshold\")", "self._kill_event = threading.Event() self._thread = threading.Thread(target=self._wake_up_timer, args=(self._kill_event,)) self._thread.daemon = True", "def strategize(self, *args, **kwargs): \"\"\" Strategize is called everytime the", "- kill_event (threading.Event) : Event to wait on \"\"\" while", "when the timer thread is started, so this could be", "is the function that the thread will execute. waits on", ": Event to wait on \"\"\" # Sleep till time", ": break callback() This logic ensures that the callbacks are", "(threading.Event) : Event to wait on \"\"\" while True: prev", "\"\"\" self._wake_up_time = time.time() + self.interval self.callback(tasks=self._event_buffer, kind=kind) self._event_buffer =", "Args: - kill_event (threading.Event) : Event to wait on \"\"\"", "the flowcontrol object. We start the timer thread here Parameters", "version of the FlowControl timer. This timer does not employ", "current_time() while (current_time()-start < INTERVAL) : wait() break callback() \"\"\"", "with a maximum delay of `interval` for systems with infrequent", "the callback generally runs a strategy method on the sites", "asqeuque TODO: When the debug logs are enabled this module", "set, usually by the parent thread time_to_die = kill_event.wait(float(max(prev -", "timer thread here Parameters ---------- - threshold (int) : Tasks", "the callback and resets the timer. KWargs: - kind (str):", "trap the flow of apps from the workflow, measure it", "know that there is an event. This method is to", "needs more debugging. What I've learnt so far is that", "the event returns True only when the event # is", "(int) : seconds after which timer expires \"\"\" self.interval =", "INTERVAL) : wait() break callback() \"\"\" def __init__(self, callback, *args,", "None self.threshold = threshold self.interval = interval self.cb_args = args", "= self.strategize self._handle = None self._event_count = 0 self._event_buffer =", "expires \"\"\" self.interval = interval self.cb_args = args self.callback =", ": wait() break callback() \"\"\" def __init__(self, callback, *args, interval=5):", "to track parsl progress KWargs: - threshold (int) : Tasks", "= True self._thread.start() def _wake_up_timer(self, kill_event): \"\"\"Internal. This is the", "the thread will execute. waits on an event so that", "thread can make a quick exit when close() is called", "far is that the duplicate messages are present only when", "funcx.executors.high_throughput.interchange.Interchange Interchange to bind the strategy to \"\"\" self.interchange =", "the FlowControl system know that there is an event. This", "on what triggered the callback \"\"\" self._wake_up_time = time.time() +", "expires \"\"\" self.interchange = None self.threshold = threshold self.interval =", "start the timer thread here Args: - dfk (DataFlowKernel) :", "self._event_count = 0 self._event_buffer = [] self._wake_up_time = time.time() +", "\"\"\"Merge the threads and terminate.\"\"\" self._kill_event.set() self._thread.join() class Timer(object): \"\"\"This", "(str): Default=None, used to pass information on what triggered the", ": seconds after which timer expires \"\"\" self.interchange = None", "self._wake_up_time: self.make_callback(kind='timer') else: print(\"Sleeping a bit more\") def notify(self, event_id):", "the flowcontrol \"\"\" self._event_buffer.extend([event_id]) self._event_count += 1 if self._event_count >=", "import time logger = logging.getLogger(\"interchange.strategy.base\") class BaseStrategy(object): \"\"\"Implements threshold-interval based", "kill_event (threading.Event) : Event to wait on \"\"\" # Sleep", "self.threshold = threshold self.interval = interval self.cb_args = args self.callback", "event so that the thread can make a quick exit", "This timer does not employ notify events. This is based", "following logic: .. code-block:: none BEGIN (INTERVAL, THRESHOLD, callback) :", "from the Interchange to notify the flowcontrol \"\"\" self._event_buffer.extend([event_id]) self._event_count", "\"\"\"Internal. This is the function that the thread will execute.", "would generate large bursts of events. Once a callback is", "timer. \"\"\" self._wake_up_time = time.time() + self.interval self.callback(*self.cb_args) def close(self):", "[] self._wake_up_time = time.time() + 1 self._kill_event = threading.Event() self._thread", "*args, threshold=20, interval=5): \"\"\"Initialize the flowcontrol object. We start the", "following logic : .. code-block:: none BEGIN (INTERVAL, THRESHOLD, callback)", "kill_event (threading.Event) : Event to wait on \"\"\" while True:", "close() is called Args: - kill_event (threading.Event) : Event to", "to \"\"\" self.interchange = interchange if hasattr(interchange.config, 'provider'): logger.debug(\"Strategy bounds->", "interval=5): \"\"\"Initialize the flowcontrol object. We start the timer thread", "else: print(\"Sleeping a bit more\") def make_callback(self, kind=None): \"\"\"Makes the", "triggered the callback \"\"\" self._wake_up_time = time.time() + self.interval self.callback(tasks=self._event_buffer,", "the duplicate messages are present only when the timer thread", "an event. This method is to be called from the", "events as well as systems which would generate large bursts", "here Parameters ---------- - threshold (int) : Tasks after which", "'provider'): logger.debug(\"Strategy bounds-> init:{}, min:{}, max:{}\".format( interchange.config.provider.init_blocks, interchange.config.provider.min_blocks, interchange.config.provider.max_blocks)) self._thread.start()", "= interval self.cb_args = args self.callback = callback self._wake_up_time =", "time.time(), 0))) if time_to_die: return if prev == self._wake_up_time: self.make_callback(kind='timer')", "called Args: - kill_event (threading.Event) : Event to wait on", "the thread can make a quick exit when close() is", "= current_time() while (current_time()-start < INTERVAL) : wait() break callback()", "threading.Thread(target=self._wake_up_timer, args=(self._kill_event,)) self._thread.daemon = True def start(self, interchange): \"\"\"Actually start", "appropriate executors for processing. This is based on the following", "*args, **kwargs): \"\"\" Strategize is called everytime the threshold or", "\"\"\" while True: prev = self._wake_up_time # Waiting for the", "the flowcontrol object We start the timer thread here Args:", "Event to wait on \"\"\" while True: prev = self._wake_up_time", "if self._event_count >= self.threshold: logger.debug(\"Eventcount >= threshold\") self.make_callback(kind=\"event\") def make_callback(self,", "{}\".format(args, kwargs)) def _wake_up_timer(self, kill_event): \"\"\"Internal. This is the function", "self._kill_event.set() self._thread.join() class Timer(object): \"\"\"This timer is a simplified version", "class BaseStrategy(object): \"\"\"Implements threshold-interval based flow control. The overall goal", "issue needs more debugging. What I've learnt so far is", "the debug logs are enabled this module emits duplicate messages.", "if count >= THRESHOLD : break callback() This logic ensures", "for processing. This is based on the following logic: ..", "\"\"\" self.interchange = interchange if hasattr(interchange.config, 'provider'): logger.debug(\"Strategy bounds-> init:{},", "class Timer(object): \"\"\"This timer is a simplified version of the", "True self._thread.start() def _wake_up_timer(self, kill_event): \"\"\"Internal. This is the function", "self.callback(tasks=self._event_buffer, kind=kind) self._event_buffer = [] def close(self): \"\"\"Merge the threads", "timer expires \"\"\" self.interval = interval self.cb_args = args self.callback", "of events. Once a callback is triggered, the callback generally", "the interval is hit \"\"\" logger.debug(\"Strategize called with {} {}\".format(args,", "\"\"\" self._event_buffer.extend([event_id]) self._event_count += 1 if self._event_count >= self.threshold: logger.debug(\"Eventcount", "well as systems which would generate large bursts of events.", "the threads and terminate.\"\"\" self._kill_event.set() self._thread.join() class Timer(object): \"\"\"This timer", "notify the flowcontrol \"\"\" self._event_buffer.extend([event_id]) self._event_count += 1 if self._event_count", "when the event # is set, usually by the parent", "(current_time()-start < INTERVAL) : count = get_events_since(start) if count >=", "runs a strategy method on the sites available as well", "the flow of apps from the workflow, measure it and", "can make a quick exit when close() is called Args:", "what triggered the callback \"\"\" self._wake_up_time = time.time() + self.interval", "+ self.interval self.callback(tasks=self._event_buffer, kind=kind) self._event_buffer = [] def close(self): \"\"\"Merge", ": start = current_time() while (current_time()-start < INTERVAL) : wait()", "flowcontrol \"\"\" self._event_buffer.extend([event_id]) self._event_count += 1 if self._event_count >= self.threshold:", "sites available as well asqeuque TODO: When the debug logs", "args=(self._kill_event,)) self._thread.daemon = True def start(self, interchange): \"\"\"Actually start the", "KWargs: - kind (str): Default=None, used to pass information on", "(int) : seconds after which timer expires \"\"\" self.interchange =", "callback is triggered - interval (int) : seconds after which", "timer expires \"\"\" self.interchange = None self.threshold = threshold self.interval", "\"\"\" # Sleep till time to wake up while True:", "seconds after which timer expires \"\"\" self.interval = interval self.cb_args", "on \"\"\" while True: prev = self._wake_up_time # Waiting for", "interval=5): \"\"\"Initialize the flowcontrol object We start the timer thread", "- dfk (DataFlowKernel) : DFK object to track parsl progress", "delay of `interval` for systems with infrequent events as well", "be called from the Interchange to notify the flowcontrol \"\"\"", "being added by the thread. \"\"\" def __init__(self, *args, threshold=20,", "= time.time() + 1 self._kill_event = threading.Event() self._thread = threading.Thread(target=self._wake_up_timer,", "Sleep till time to wake up while True: prev =", "the sites available as well asqeuque TODO: When the debug", "added by the thread. \"\"\" def __init__(self, *args, threshold=20, interval=5):", "\"\"\"Initialize the flowcontrol object. We start the timer thread here", ": DFK object to track parsl progress KWargs: - threshold", "logger.debug(\"Eventcount >= threshold\") self.make_callback(kind=\"event\") def make_callback(self, kind=None): \"\"\"Makes the callback", "start = current_time() while (current_time()-start < INTERVAL) : wait() break", "dfk (DataFlowKernel) : DFK object to track parsl progress KWargs:", "with {} {}\".format(args, kwargs)) def _wake_up_timer(self, kill_event): \"\"\"Internal. This is", "flow of apps from the workflow, measure it and redirect", "When the debug logs are enabled this module emits duplicate", ".. code-block:: none BEGIN (INTERVAL, THRESHOLD, callback) : start =", "to wake up while True: prev = self._wake_up_time # Waiting", "logic ensures that the callbacks are activated with a maximum", "called from the Interchange to notify the flowcontrol \"\"\" self._event_buffer.extend([event_id])", "only when the event # is set, usually by the", "the thread. \"\"\" def __init__(self, *args, threshold=20, interval=5): \"\"\"Initialize the", "def __init__(self, callback, *args, interval=5): \"\"\"Initialize the flowcontrol object We", "could be from a duplicate logger being added by the", "are enabled this module emits duplicate messages. This issue needs", "THRESHOLD, callback) : start = current_time() while (current_time()-start < INTERVAL)", "This logic ensures that the callbacks are activated with a", "= current_time() while (current_time()-start < INTERVAL) : count = get_events_since(start)", "which would generate large bursts of events. Once a callback", "object. We start the timer thread here Parameters ---------- -", "args self.callback = self.strategize self._handle = None self._event_count = 0", "as well as systems which would generate large bursts of", "- time.time(), 0))) if time_to_die: return if prev == self._wake_up_time:", "= None self.threshold = threshold self.interval = interval self.cb_args =", "redirect it the appropriate executors for processing. This is based", "= 0 self._event_buffer = [] self._wake_up_time = time.time() + 1", "wait() break callback() \"\"\" def __init__(self, callback, *args, interval=5): \"\"\"Initialize", "it and redirect it the appropriate executors for processing. This", "timer is a simplified version of the FlowControl timer. This", "args=(self._kill_event,)) self._thread.daemon = True self._thread.start() def _wake_up_timer(self, kill_event): \"\"\"Internal. This", "not employ notify events. This is based on the following", "usually by the parent thread time_to_die = kill_event.wait(float(max(prev - time.time(),", "self.make_callback(kind='timer') else: print(\"Sleeping a bit more\") def notify(self, event_id): \"\"\"Let", "self._wake_up_time = time.time() + 1 self._kill_event = threading.Event() self._thread =", "start the timer thread here Parameters ---------- - threshold (int)", "is that the duplicate messages are present only when the", "print(\"Sleeping a bit more\") def notify(self, event_id): \"\"\"Let the FlowControl", "TODO: When the debug logs are enabled this module emits", "to be called from the Interchange to notify the flowcontrol", "bursts of events. Once a callback is triggered, the callback", "self.callback(*self.cb_args) def close(self): \"\"\"Merge the threads and terminate. \"\"\" self._kill_event.set()", "timer does not employ notify events. This is based on", "this could be from a duplicate logger being added by", "\"\"\" self.interval = interval self.cb_args = args self.callback = callback", "\"\"\"Initialize the flowcontrol object We start the timer thread here", "the following logic: .. code-block:: none BEGIN (INTERVAL, THRESHOLD, callback)", "1 if self._event_count >= self.threshold: logger.debug(\"Eventcount >= threshold\") self.make_callback(kind=\"event\") def", "self.interval self.callback(tasks=self._event_buffer, kind=kind) self._event_buffer = [] def close(self): \"\"\"Merge the", "is started, so this could be from a duplicate logger", "time.time() + self.interval self.callback(tasks=self._event_buffer, kind=kind) self._event_buffer = [] def close(self):", "logging.getLogger(\"interchange.strategy.base\") class BaseStrategy(object): \"\"\"Implements threshold-interval based flow control. The overall", "the callback \"\"\" self._wake_up_time = time.time() + self.interval self.callback(tasks=self._event_buffer, kind=kind)", "as systems which would generate large bursts of events. Once", "the workflow, measure it and redirect it the appropriate executors", "interchange): \"\"\"Actually start the strategy Parameters ---------- interchange: funcx.executors.high_throughput.interchange.Interchange Interchange", "duplicate messages are present only when the timer thread is", "is to trap the flow of apps from the workflow,", "a simplified version of the FlowControl timer. This timer does", "to trap the flow of apps from the workflow, measure", "Waiting for the event returns True only when the event", "thread here Parameters ---------- - threshold (int) : Tasks after", "everytime the threshold or the interval is hit \"\"\" logger.debug(\"Strategize", "self.callback = callback self._wake_up_time = time.time() + 1 self._kill_event =", "ensures that the callbacks are activated with a maximum delay", "be from a duplicate logger being added by the thread.", "= [] self._wake_up_time = time.time() + 1 self._kill_event = threading.Event()", "count = get_events_since(start) if count >= THRESHOLD : break callback()", "triggered - interval (int) : seconds after which timer expires", "---------- - threshold (int) : Tasks after which the callback", "init:{}, min:{}, max:{}\".format( interchange.config.provider.init_blocks, interchange.config.provider.min_blocks, interchange.config.provider.max_blocks)) self._thread.start() def strategize(self, *args,", "object We start the timer thread here Args: - dfk", "the strategy Parameters ---------- interchange: funcx.executors.high_throughput.interchange.Interchange Interchange to bind the", "flowcontrol object We start the timer thread here Args: -", "generate large bursts of events. Once a callback is triggered,", "of the FlowControl timer. This timer does not employ notify" ]
[ "# # but with some preprocessing calculations. # # #", "Guided Back-Propagation gbp_result = gc.guided_backprop(X_tensor,y_tensor, gc_model) plt.figure(figsize=(24, 24)) for i", "the attributions change layer = model.features[3] # Example visualization for", "= torch.from_numpy(img) img = deprocess(img) plt.subplot(1, 5, i + 1)", "along to 'visualize_attr_maps' as the default # # 'attr_preprocess' is", "gradcam_val = gradcam_result[i] img = X[i] + (matplotlib.cm.jet(gradcam_val)[:,:,:3]*255) img =", "1, 1) visualize_attr_maps('visualization/LayerConductance.png', X, y, class_names, [LC_attr_int], ['LayerConductance']) LGC =", "attributions. # # # # For layer gradcam look at", "GuidedGradCam, GuidedBackprop from captum.attr import LayerActivation, LayerConductance, LayerGradCam from data_utils", "CODE # ############################################################################## # Try out different layers and see", "numpy arrays to Torch Tensors X_tensor = torch.cat([preprocess(Image.fromarray(x)) for x", "y_tensor = torch.LongTensor(y) # Guided Back-Propagation gbp_result = gc.guided_backprop(X_tensor,y_tensor, gc_model)", "(H,W) ) LC_attr_int = LC_attr_int.repeat(1, 3, 1, 1) visualize_attr_maps('visualization/LayerConductance.png', X,", "visualize_attr_maps function from captum_utils.py is useful for # # visualizing", "matplotlib.pyplot as plt from PIL import Image from captum.attr import", "= compute_attributions(ggc, X_tensor, target = y_tensor) # print(X_tensor.shape, y_tensor.shape, attribution_gcc.shape)", "plt.imshow(img) plt.title(class_names[y[i]]) plt.axis('off') plt.gcf().tight_layout() plt.savefig('visualization/guided_gradcam.png') # **************************************************************************************** # # Captum", "plt.axis('off') plt.gcf().tight_layout() plt.savefig('visualization/guided_backprop.png') # GradCam # GradCAM. We have given", "conv_module as the convolution layer for gradcam # ############################################################################## #", "can see in conv_module variable below gc_model = torchvision.models.squeezenet1_1(pretrained=True) for", "have given you which module(=layer) that we need to capture", "gradcam_val * gbp_val img = np.expand_dims(img.transpose(2, 0, 1), axis=0) img", "# Also note that, you would need to customize your", "LayerActivation, LayerConductance, LayerGradCam from data_utils import * from image_utils import", "you should be # # using 'layer' given above for", "customize your 'attr_preprocess' # # parameter that you send along", "so tell PyTorch not to compute gradients # with respect", "visualization for using layer visualizations # layer_act = LayerActivation(model, layer)", "gbp_result[i] gradcam_val = np.expand_dims(gradcam_result[i], axis=2) # Pointwise multiplication and normalization", "Conductance (similar # # to what we did for the", "import * from image_utils import * from captum_utils import *", "model = torchvision.models.squeezenet1_1(pretrained=True) # We don't want to train the", "LayerConductance(model, layer) LC_attr = compute_attributions(LC, X_tensor, target = y_tensor) LC_attr_sum", "LGC_attr_int.repeat(1, 3, 1, 1) visualize_attr_maps ('visualization/LayerGradCam.png', X, y, class_names, [LGC_attr_int],", "gradients # with respect to model parameters. for param in", "to capture gradients from, which you can see in conv_module", "relu_attributions # ############################################################################## # Layer gradcam aggregates across all channels", "We introduce a new model we will use explicitly for", "X, y, class_names, [attribution_gcc], ['Guided_Grad_Cam']) # Computing Guided BackProp gbp", "well. # # visualize_attr_maps function from captum_utils.py is useful for", "channel attributions. # # # # For layer gradcam look", "captum_utils.py is useful for # # visualizing captum outputs #", "24)) for i in range(gradcam_result.shape[0]): gradcam_val = gradcam_result[i] img =", "* from image_utils import * from captum_utils import * import", "BackProp gbp = GuidedBackprop(model) attribution_gbp = compute_attributions(gbp, X_tensor, target =", "compute_attributions(layer_act, X_tensor) # layer_act_attr_sum = layer_act_attr.mean(axis=1, keepdim=True) ############################################################################## # TODO:", "LC_attr_int = LayerAttribution.interpolate(LC_attr_sum, (H,W) ) LC_attr_int = LC_attr_int.repeat(1, 3, 1,", "which you can see in conv_module variable below gc_model =", "img = gbp_result[i] img = rescale(img) plt.imshow(img) plt.title(class_names[y[i]]) plt.axis('off') plt.gcf().tight_layout()", "# visualizing captum outputs # # Use conv_module as the", "X_tensor, target = y_tensor) # print(X_tensor.shape, y_tensor.shape, attribution_gcc.shape) visualize_attr_maps('visualization/GuidedGradCam.png', X,", "load_imagenet_val(num=5) # FOR THIS SECTION ONLY, we need to use", "introduce a new model we will use explicitly for GradCAM", "gbp_result = gc.guided_backprop(X_tensor, y_tensor, gc_model) plt.figure(figsize=(24, 24)) for i in", "y, class_names = load_imagenet_val(num=5) # FOR THIS SECTION ONLY, we", "= layer_act_attr.mean(axis=1, keepdim=True) ############################################################################## # TODO: Visualize Individual Layer Gradcam", "compute_attributions(LC, X_tensor, target = y_tensor) LC_attr_sum = LC_attr.mean(axis = 1,", "LayerGradCam from data_utils import * from image_utils import * from", "visualize_attr_maps('visualization/LayerConductance.png', X, y, class_names, [LC_attr_int], ['LayerConductance']) LGC = LayerGradCam(model, layer)", "gradcam_val = np.expand_dims(gradcam_result[i], axis=2) # Pointwise multiplication and normalization of", "# # parameter that you send along to 'visualize_attr_maps' as", "= True) LC_attr_int = LayerAttribution.interpolate(LC_attr_sum, (H,W) ) LC_attr_int = LC_attr_int.repeat(1,", "range(gbp_result.shape[0]): plt.subplot(1, 5, i + 1) img = gbp_result[i] img", "see observe how the attributions change layer = model.features[3] #", "Gradcam and Layer Conductance (similar # # to what we", "the model, so tell PyTorch not to compute gradients #", "img = np.float32(img) img = torch.from_numpy(img) img = deprocess(img) plt.subplot(1,", "methods), # # but with some preprocessing calculations. # #", "############################################################################## # Computing Guided GradCam ggc = GuidedGradCam(model, conv_module) attribution_gcc", "= torch.LongTensor(y) # Guided Back-Propagation gbp_result = gc.guided_backprop(X_tensor,y_tensor, gc_model) plt.figure(figsize=(24,", "from PIL import Image from captum.attr import GuidedGradCam, GuidedBackprop from", "gradcam look at the usage of the parameter relu_attributions #", "plt.gcf().tight_layout() plt.savefig('visualization/guided_gradcam.png') # **************************************************************************************** # # Captum model = torchvision.models.squeezenet1_1(pretrained=True)", "+ 1) plt.imshow(img) plt.title(class_names[y[i]]) plt.axis('off') plt.gcf().tight_layout() plt.savefig('visualization/guided_gradcam.png') # **************************************************************************************** #", "############################################################################## # Try out different layers and see observe how", "As a final step, we can combine GradCam and Guided", "= GradCam() X_tensor = torch.cat([preprocess(Image.fromarray(x)) for x in X], dim=0).requires_grad_(True)", "# # Use conv_module as the convolution layer for gradcam", "from visualizers import GradCam plt.rcParams['figure.figsize'] = (10.0, 8.0) # set", "plt.subplot(1, 5, i + 1) plt.imshow(img) plt.title(class_names[y[i]]) plt.axis('off') plt.gcf().tight_layout() plt.savefig('visualization/gradcam.png')", "compute_attributions(gbp, X_tensor, target = y_tensor) visualize_attr_maps('visualization/GuidedBackpropCam.png', X, y, class_names, [attribution_gbp],", "= LC_attr.mean(axis = 1, keepdim = True) LC_attr_int = LayerAttribution.interpolate(LC_attr_sum,", "visualize_attr_maps('visualization/GuidedBackpropCam.png', X, y, class_names, [attribution_gbp], ['Guided_Backprop_Cam']) ############################################################################## # END OF", "# with respect to model parameters. for param in model.parameters():", "use explicitly for GradCAM for this. gc_model = torchvision.models.squeezenet1_1(pretrained=True) gc", "import * from captum_utils import * import numpy as np", "LayerAttribution.interpolate(LGC_attr_sum, (H,W)) LGC_attr_int = LGC_attr_int.repeat(1, 3, 1, 1) visualize_attr_maps ('visualization/LayerGradCam.png',", "# Captum model = torchvision.models.squeezenet1_1(pretrained=True) # We don't want to", "= gc.guided_backprop(X_tensor, y_tensor, gc_model) plt.figure(figsize=(24, 24)) for i in range(gradcam_result.shape[0]):", "LGC_attr_int = LGC_attr_int.repeat(1, 3, 1, 1) visualize_attr_maps ('visualization/LayerGradCam.png', X, y,", "# **************************************************************************************** # # Captum model = torchvision.models.squeezenet1_1(pretrained=True) # We", "we can combine GradCam and Guided Backprop to get Guided", "H, W = X_tensor.shape LC = LayerConductance(model, layer) LC_attr =", "torch.LongTensor(y) gradcam_result = gc.grad_cam(X_tensor, y_tensor, gc_model) gbp_result = gc.guided_backprop(X_tensor, y_tensor,", "layer_act_attr = compute_attributions(layer_act, X_tensor) # layer_act_attr_sum = layer_act_attr.mean(axis=1, keepdim=True) ##############################################################################", "(10.0, 8.0) # set default size of plots plt.rcParams['image.interpolation'] =", "y_tensor) LC_attr_sum = LC_attr.mean(axis = 1, keepdim = True) LC_attr_int", "the LayerActivation example above and you should be # #", "note that, you would need to customize your 'attr_preprocess' #", "class_names = load_imagenet_val(num=5) # FOR THIS SECTION ONLY, we need", "need to use gradients. We introduce a new model we", "axis=2) # Pointwise multiplication and normalization of the gradcam and", "# Convert X and y from numpy arrays to Torch", "attribution_gcc.shape) visualize_attr_maps('visualization/GuidedGradCam.png', X, y, class_names, [attribution_gcc], ['Guided_Grad_Cam']) # Computing Guided", "* gbp_val img = np.expand_dims(img.transpose(2, 0, 1), axis=0) img =", "X], dim=0).requires_grad_(True) y_tensor = torch.LongTensor(y) # Guided Back-Propagation gbp_result =", "LGC = LayerGradCam(model, layer) LGC_attr = compute_attributions(LGC, X_tensor, target =", "out different layers and see observe how the attributions change", "layers and see observe how the attributions change layer =", "our helper methods), # # but with some preprocessing calculations.", "plt.savefig('visualization/gradcam.png') # As a final step, we can combine GradCam", "above and you should be # # using 'layer' given", "LC_attr_sum = LC_attr.mean(axis = 1, keepdim = True) LC_attr_int =", "X, y, class_names, [LGC_attr_int], ['LayerGradCam']) ############################################################################## # END OF YOUR", "img = torch.from_numpy(img) img = deprocess(img) plt.subplot(1, 5, i +", "respect to model parameters. for param in model.parameters(): param.requires_grad =", "LGC_attr = compute_attributions(LGC, X_tensor, target = y_tensor) LGC_attr_sum = LGC_attr.mean(axis", "attributions change layer = model.features[3] # Example visualization for using", "1) plt.imshow(img) plt.title(class_names[y[i]]) plt.axis('off') plt.gcf().tight_layout() plt.savefig('visualization/guided_gradcam.png') # **************************************************************************************** # #", "dim=0) y_tensor = torch.LongTensor(y) conv_module = model.features[12] ############################################################################## # TODO:", "torchvision.models.squeezenet1_1(pretrained=True) for param in gc_model.parameters(): param.requires_grad = True X_tensor =", "plt.title(class_names[y[i]]) plt.axis('off') plt.gcf().tight_layout() plt.savefig('visualization/guided_backprop.png') # GradCam # GradCAM. We have", "plt.axis('off') plt.gcf().tight_layout() plt.savefig('visualization/gradcam.png') # As a final step, we can", "param.requires_grad = True X_tensor = torch.cat([preprocess(Image.fromarray(x)) for x in X],", "LayerAttribution N, C, H, W = X_tensor.shape LC = LayerConductance(model,", "and y from numpy arrays to Torch Tensors X_tensor =", "for GradCAM for this. gc_model = torchvision.models.squeezenet1_1(pretrained=True) gc = GradCam()", "that we need to capture gradients from, which you can", "in range(gradcam_result.shape[0]): gradcam_val = gradcam_result[i] img = X[i] + (matplotlib.cm.jet(gradcam_val)[:,:,:3]*255)", "5, i + 1) plt.imshow(img) plt.title(class_names[y[i]]) plt.axis('off') plt.gcf().tight_layout() plt.savefig('visualization/guided_gradcam.png') #", "conv_module) attribution_gcc = compute_attributions(ggc, X_tensor, target = y_tensor) # print(X_tensor.shape,", "compute gradients # with respect to model parameters. for param", "plt.gcf().tight_layout() plt.savefig('visualization/gradcam.png') # As a final step, we can combine", "in X], dim=0) y_tensor = torch.LongTensor(y) conv_module = model.features[12] ##############################################################################", "that you send along to 'visualize_attr_maps' as the default #", "to 'visualize_attr_maps' as the default # # 'attr_preprocess' is written", "* from captum_utils import * import numpy as np from", "Guided Backprop to get Guided GradCam. X_tensor = torch.cat([preprocess(Image.fromarray(x)) for", "y_tensor.shape, attribution_gcc.shape) visualize_attr_maps('visualization/GuidedGradCam.png', X, y, class_names, [attribution_gcc], ['Guided_Grad_Cam']) # Computing", "in range(gradcam_result.shape[0]): gbp_val = gbp_result[i] gradcam_val = np.expand_dims(gradcam_result[i], axis=2) #", "'visualize_attr_maps' as the default # # 'attr_preprocess' is written to", "gc.guided_backprop(X_tensor,y_tensor, gc_model) plt.figure(figsize=(24, 24)) for i in range(gbp_result.shape[0]): plt.subplot(1, 5,", "x in X], dim=0) y_tensor = torch.LongTensor(y) conv_module = model.features[12]", "# Layer gradcam aggregates across all channels from captum.attr import", "y_tensor) LGC_attr_sum = LGC_attr.mean(axis = 1, keepdim = True) LGC_attr_int", "Individual Layer Gradcam and Layer Conductance (similar # # to", "torchvision import matplotlib import matplotlib.pyplot as plt from PIL import", "plots plt.rcParams['image.interpolation'] = 'nearest' plt.rcParams['image.cmap'] = 'gray' X, y, class_names", "visualize_attr_maps('visualization/GuidedGradCam.png', X, y, class_names, [attribution_gcc], ['Guided_Grad_Cam']) # Computing Guided BackProp", "visualizing captum outputs # # Use conv_module as the convolution", "in gc_model.parameters(): param.requires_grad = True X_tensor = torch.cat([preprocess(Image.fromarray(x)) for x", "step, we can combine GradCam and Guided Backprop to get", "as well. # # visualize_attr_maps function from captum_utils.py is useful", "5, i + 1) plt.imshow(img) plt.title(class_names[y[i]]) plt.axis('off') plt.gcf().tight_layout() plt.savefig('visualization/gradcam.png') #", "class_names, [attribution_gcc], ['Guided_Grad_Cam']) # Computing Guided BackProp gbp = GuidedBackprop(model)", "[attribution_gbp], ['Guided_Backprop_Cam']) ############################################################################## # END OF YOUR CODE # ##############################################################################", "layer) LC_attr = compute_attributions(LC, X_tensor, target = y_tensor) LC_attr_sum =", "x in X], dim=0).requires_grad_(True) y_tensor = torch.LongTensor(y) # Guided Back-Propagation", "to the LayerActivation example above and you should be #", "'layer' given above for this section # # # #", "# Pointwise multiplication and normalization of the gradcam and guided", "module(=layer) that we need to capture gradients from, which you", "img / np.max(img) plt.subplot(1, 5, i + 1) plt.imshow(img) plt.title(class_names[y[i]])", "= torch.LongTensor(y) gradcam_result = gc.grad_cam(X_tensor, y_tensor, gc_model) gbp_result = gc.guided_backprop(X_tensor,", "plt.rcParams['image.cmap'] = 'gray' X, y, class_names = load_imagenet_val(num=5) # FOR", "plt.gcf().tight_layout() plt.savefig('visualization/guided_backprop.png') # GradCam # GradCAM. We have given you", "X_tensor = torch.cat([preprocess(Image.fromarray(x)) for x in X], dim=0).requires_grad_(True) y_tensor =", "8.0) # set default size of plots plt.rcParams['image.interpolation'] = 'nearest'", "X_tensor) # layer_act_attr_sum = layer_act_attr.mean(axis=1, keepdim=True) ############################################################################## # TODO: Visualize", "# Example visualization for using layer visualizations # layer_act =", "# END OF YOUR CODE # ############################################################################## # Try out", "layer) # layer_act_attr = compute_attributions(layer_act, X_tensor) # layer_act_attr_sum = layer_act_attr.mean(axis=1,", "below gc_model = torchvision.models.squeezenet1_1(pretrained=True) for param in gc_model.parameters(): param.requires_grad =", "plt.rcParams['image.interpolation'] = 'nearest' plt.rcParams['image.cmap'] = 'gray' X, y, class_names =", "Also note that, you would need to customize your 'attr_preprocess'", "LayerConductance, LayerGradCam from data_utils import * from image_utils import *", "= 'gray' X, y, class_names = load_imagenet_val(num=5) # FOR THIS", "size of plots plt.rcParams['image.interpolation'] = 'nearest' plt.rcParams['image.cmap'] = 'gray' X,", "torchvision.models.squeezenet1_1(pretrained=True) # We don't want to train the model, so", "param.requires_grad = False # Convert X and y from numpy", "GradCam plt.rcParams['figure.figsize'] = (10.0, 8.0) # set default size of", "GuidedGradCam(model, conv_module) attribution_gcc = compute_attributions(ggc, X_tensor, target = y_tensor) #", "from numpy arrays to Torch Tensors X_tensor = torch.cat([preprocess(Image.fromarray(x)) for", "GradCam and Guided Backprop to get Guided GradCam. X_tensor =", "for x in X], dim=0).requires_grad_(True) y_tensor = torch.LongTensor(y) # Guided", "= False # Convert X and y from numpy arrays", "refer to the LayerActivation example above and you should be", "but with some preprocessing calculations. # # # # You", "(H,W)) LGC_attr_int = LGC_attr_int.repeat(1, 3, 1, 1) visualize_attr_maps ('visualization/LayerGradCam.png', X,", "will use explicitly for GradCAM for this. gc_model = torchvision.models.squeezenet1_1(pretrained=True)", "# set default size of plots plt.rcParams['image.interpolation'] = 'nearest' plt.rcParams['image.cmap']", "LC = LayerConductance(model, layer) LC_attr = compute_attributions(LC, X_tensor, target =", "and Layer Conductance (similar # # to what we did", "how the attributions change layer = model.features[3] # Example visualization", "rescale(img) plt.imshow(img) plt.title(class_names[y[i]]) plt.axis('off') plt.gcf().tight_layout() plt.savefig('visualization/guided_backprop.png') # GradCam # GradCAM.", "# to what we did for the other captum sections,", "torch.LongTensor(y) # Guided Back-Propagation gbp_result = gc.guided_backprop(X_tensor,y_tensor, gc_model) plt.figure(figsize=(24, 24))", "of the gradcam and guided backprop results (2 lines) img", "############################################################################## # Layer gradcam aggregates across all channels from captum.attr", "dim=0).requires_grad_(True) y_tensor = torch.LongTensor(y) gradcam_result = gc.grad_cam(X_tensor, y_tensor, gc_model) gbp_result", "your 'attr_preprocess' # # parameter that you send along to", "GuidedBackprop from captum.attr import LayerActivation, LayerConductance, LayerGradCam from data_utils import", "['LayerConductance']) LGC = LayerGradCam(model, layer) LGC_attr = compute_attributions(LGC, X_tensor, target", "gradients. We introduce a new model we will use explicitly", "gradcam_result = gc.grad_cam(X_tensor, y_tensor, gc_model) plt.figure(figsize=(24, 24)) for i in", "LC_attr.mean(axis = 1, keepdim = True) LC_attr_int = LayerAttribution.interpolate(LC_attr_sum, (H,W)", "as np from visualizers import GradCam plt.rcParams['figure.figsize'] = (10.0, 8.0)", "y_tensor, gc_model) gbp_result = gc.guided_backprop(X_tensor, y_tensor, gc_model) plt.figure(figsize=(24, 24)) for", "END OF YOUR CODE # ############################################################################## # Try out different", "3, 1, 1) visualize_attr_maps('visualization/LayerConductance.png', X, y, class_names, [LC_attr_int], ['LayerConductance']) LGC", "multi channel attributions. # # # # For layer gradcam", "calculations. # # # # You can refer to the", "range(gradcam_result.shape[0]): gbp_val = gbp_result[i] gradcam_val = np.expand_dims(gradcam_result[i], axis=2) # Pointwise", "GradCam # GradCAM. We have given you which module(=layer) that", "is useful for # # visualizing captum outputs # #", "# Computing Guided GradCam ggc = GuidedGradCam(model, conv_module) attribution_gcc =", "above for this section # # # # Also note", "gc.grad_cam(X_tensor, y_tensor, gc_model) gbp_result = gc.guided_backprop(X_tensor, y_tensor, gc_model) plt.figure(figsize=(24, 24))", "layer for gradcam # ############################################################################## # Computing Guided GradCam ggc", "as the default # # 'attr_preprocess' is written to only", "plt.imshow(img) plt.title(class_names[y[i]]) plt.axis('off') plt.gcf().tight_layout() plt.savefig('visualization/guided_backprop.png') # GradCam # GradCAM. We", "# but with some preprocessing calculations. # # # #", "not to compute gradients # with respect to model parameters.", "which module(=layer) that we need to capture gradients from, which", "Computing Guided BackProp gbp = GuidedBackprop(model) attribution_gbp = compute_attributions(gbp, X_tensor,", "24)) for i in range(gbp_result.shape[0]): plt.subplot(1, 5, i + 1)", "change layer = model.features[3] # Example visualization for using layer", "other captum sections, using our helper methods), # # but", "X, y, class_names, [LC_attr_int], ['LayerConductance']) LGC = LayerGradCam(model, layer) LGC_attr", "visualize_attr_maps ('visualization/LayerGradCam.png', X, y, class_names, [LGC_attr_int], ['LayerGradCam']) ############################################################################## # END", "and guided backprop results (2 lines) img = gradcam_val *", "= y_tensor) visualize_attr_maps('visualization/GuidedBackpropCam.png', X, y, class_names, [attribution_gbp], ['Guided_Backprop_Cam']) ############################################################################## #", "# Try out different layers and see observe how the", "given above for this section # # # # Also", "for x in X], dim=0).requires_grad_(True) y_tensor = torch.LongTensor(y) gradcam_result =", "= X[i] + (matplotlib.cm.jet(gradcam_val)[:,:,:3]*255) img = img / np.max(img) plt.subplot(1,", "= LC_attr_int.repeat(1, 3, 1, 1) visualize_attr_maps('visualization/LayerConductance.png', X, y, class_names, [LC_attr_int],", "dim=0).requires_grad_(True) y_tensor = torch.LongTensor(y) # Guided Back-Propagation gbp_result = gc.guided_backprop(X_tensor,y_tensor,", "from image_utils import * from captum_utils import * import numpy", "see in conv_module variable below gc_model = torchvision.models.squeezenet1_1(pretrained=True) for param", "i in range(gradcam_result.shape[0]): gradcam_val = gradcam_result[i] img = X[i] +", "plt.imshow(img) plt.title(class_names[y[i]]) plt.axis('off') plt.gcf().tight_layout() plt.savefig('visualization/gradcam.png') # As a final step,", "24)) for i in range(gradcam_result.shape[0]): gbp_val = gbp_result[i] gradcam_val =", "Convert X and y from numpy arrays to Torch Tensors", "for x in X], dim=0) y_tensor = torch.LongTensor(y) conv_module =", "torchvision.models.squeezenet1_1(pretrained=True) gc = GradCam() X_tensor = torch.cat([preprocess(Image.fromarray(x)) for x in", "torch.from_numpy(img) img = deprocess(img) plt.subplot(1, 5, i + 1) plt.imshow(img)", "= np.expand_dims(gradcam_result[i], axis=2) # Pointwise multiplication and normalization of the", "= 'nearest' plt.rcParams['image.cmap'] = 'gray' X, y, class_names = load_imagenet_val(num=5)", "y_tensor = torch.LongTensor(y) gradcam_result = gc.grad_cam(X_tensor, y_tensor, gc_model) gbp_result =", "We don't want to train the model, so tell PyTorch", "results (2 lines) img = gradcam_val * gbp_val img =", "= gradcam_val * gbp_val img = np.expand_dims(img.transpose(2, 0, 1), axis=0)", "at the usage of the parameter relu_attributions # ############################################################################## #", "from captum.attr import LayerAttribution N, C, H, W = X_tensor.shape", "1) plt.imshow(img) plt.title(class_names[y[i]]) plt.axis('off') plt.gcf().tight_layout() plt.savefig('visualization/gradcam.png') # As a final", "1) visualize_attr_maps ('visualization/LayerGradCam.png', X, y, class_names, [LGC_attr_int], ['LayerGradCam']) ############################################################################## #", "= compute_attributions(gbp, X_tensor, target = y_tensor) visualize_attr_maps('visualization/GuidedBackpropCam.png', X, y, class_names,", "data_utils import * from image_utils import * from captum_utils import", "model.features[12] ############################################################################## # TODO: Compute/Visualize GuidedBackprop and Guided GradCAM as", "# Computing Guided BackProp gbp = GuidedBackprop(model) attribution_gbp = compute_attributions(gbp,", "TODO: Visualize Individual Layer Gradcam and Layer Conductance (similar #", "import * import numpy as np from visualizers import GradCam", "0, 1), axis=0) img = np.float32(img) img = torch.from_numpy(img) img", "Layer Conductance (similar # # to what we did for", "('visualization/LayerGradCam.png', X, y, class_names, [LGC_attr_int], ['LayerGradCam']) ############################################################################## # END OF", "section # # # # Also note that, you would", "a final step, we can combine GradCam and Guided Backprop", "# Use conv_module as the convolution layer for gradcam #", "model.parameters(): param.requires_grad = False # Convert X and y from", "captum_utils import * import numpy as np from visualizers import", "deprocess(img) plt.subplot(1, 5, i + 1) plt.imshow(img) plt.title(class_names[y[i]]) plt.axis('off') plt.gcf().tight_layout()", "torch.cat([preprocess(Image.fromarray(x)) for x in X], dim=0) y_tensor = torch.LongTensor(y) conv_module", "= GuidedGradCam(model, conv_module) attribution_gcc = compute_attributions(ggc, X_tensor, target = y_tensor)", "all channels from captum.attr import LayerAttribution N, C, H, W", "X, y, class_names = load_imagenet_val(num=5) # FOR THIS SECTION ONLY,", "model we will use explicitly for GradCAM for this. gc_model", "useful for # # visualizing captum outputs # # Use", "aggregates across all channels from captum.attr import LayerAttribution N, C,", "layer_act_attr.mean(axis=1, keepdim=True) ############################################################################## # TODO: Visualize Individual Layer Gradcam and", "would need to customize your 'attr_preprocess' # # parameter that", "and Guided Backprop to get Guided GradCam. X_tensor = torch.cat([preprocess(Image.fromarray(x))", "combine GradCam and Guided Backprop to get Guided GradCam. X_tensor", "want to train the model, so tell PyTorch not to", "# # # # Also note that, you would need", "= LGC_attr.mean(axis = 1, keepdim = True) LGC_attr_int = LayerAttribution.interpolate(LGC_attr_sum,", "layer = model.features[3] # Example visualization for using layer visualizations", "= (10.0, 8.0) # set default size of plots plt.rcParams['image.interpolation']", "X], dim=0).requires_grad_(True) y_tensor = torch.LongTensor(y) gradcam_result = gc.grad_cam(X_tensor, y_tensor, gc_model)", "# For layer gradcam look at the usage of the", "gradcam # ############################################################################## # Computing Guided GradCam ggc = GuidedGradCam(model,", "# parameter that you send along to 'visualize_attr_maps' as the", "# TODO: Visualize Individual Layer Gradcam and Layer Conductance (similar", "= torch.cat([preprocess(Image.fromarray(x)) for x in X], dim=0).requires_grad_(True) y_tensor = torch.LongTensor(y)", "gbp = GuidedBackprop(model) attribution_gbp = compute_attributions(gbp, X_tensor, target = y_tensor)", "SECTION ONLY, we need to use gradients. We introduce a", "= gbp_result[i] img = rescale(img) plt.imshow(img) plt.title(class_names[y[i]]) plt.axis('off') plt.gcf().tight_layout() plt.savefig('visualization/guided_backprop.png')", "arrays to Torch Tensors X_tensor = torch.cat([preprocess(Image.fromarray(x)) for x in", "# # # # You can refer to the LayerActivation", "# layer_act = LayerActivation(model, layer) # layer_act_attr = compute_attributions(layer_act, X_tensor)", "PyTorch not to compute gradients # with respect to model", "is written to only to handle multi channel attributions. #", "X_tensor, target = y_tensor) LC_attr_sum = LC_attr.mean(axis = 1, keepdim", "normalization of the gradcam and guided backprop results (2 lines)", "to what we did for the other captum sections, using", "for this. gc_model = torchvision.models.squeezenet1_1(pretrained=True) gc = GradCam() X_tensor =", "= torchvision.models.squeezenet1_1(pretrained=True) gc = GradCam() X_tensor = torch.cat([preprocess(Image.fromarray(x)) for x", "= LayerAttribution.interpolate(LGC_attr_sum, (H,W)) LGC_attr_int = LGC_attr_int.repeat(1, 3, 1, 1) visualize_attr_maps", "gc_model) plt.figure(figsize=(24, 24)) for i in range(gradcam_result.shape[0]): gbp_val = gbp_result[i]", "and normalization of the gradcam and guided backprop results (2", "handle multi channel attributions. # # # # For layer", "= LayerConductance(model, layer) LC_attr = compute_attributions(LC, X_tensor, target = y_tensor)", "import GradCam plt.rcParams['figure.figsize'] = (10.0, 8.0) # set default size", "target = y_tensor) # print(X_tensor.shape, y_tensor.shape, attribution_gcc.shape) visualize_attr_maps('visualization/GuidedGradCam.png', X, y,", "N, C, H, W = X_tensor.shape LC = LayerConductance(model, layer)", "+ (matplotlib.cm.jet(gradcam_val)[:,:,:3]*255) img = img / np.max(img) plt.subplot(1, 5, i", "keepdim = True) LC_attr_int = LayerAttribution.interpolate(LC_attr_sum, (H,W) ) LC_attr_int =", "conv_module = model.features[12] ############################################################################## # TODO: Compute/Visualize GuidedBackprop and Guided", "i + 1) img = gbp_result[i] img = rescale(img) plt.imshow(img)", "with some preprocessing calculations. # # # # You can", "final step, we can combine GradCam and Guided Backprop to", "= torchvision.models.squeezenet1_1(pretrained=True) # We don't want to train the model,", "img = gradcam_val * gbp_val img = np.expand_dims(img.transpose(2, 0, 1),", "= rescale(img) plt.imshow(img) plt.title(class_names[y[i]]) plt.axis('off') plt.gcf().tight_layout() plt.savefig('visualization/guided_backprop.png') # GradCam #", "y, class_names, [LGC_attr_int], ['LayerGradCam']) ############################################################################## # END OF YOUR CODE", "from captum_utils.py is useful for # # visualizing captum outputs", "X_tensor, target = y_tensor) LGC_attr_sum = LGC_attr.mean(axis = 1, keepdim", "the other captum sections, using our helper methods), # #", "plt.figure(figsize=(24, 24)) for i in range(gradcam_result.shape[0]): gradcam_val = gradcam_result[i] img", "using 'layer' given above for this section # # #", "Computing Guided GradCam ggc = GuidedGradCam(model, conv_module) attribution_gcc = compute_attributions(ggc,", "LayerGradCam(model, layer) LGC_attr = compute_attributions(LGC, X_tensor, target = y_tensor) LGC_attr_sum", "W = X_tensor.shape LC = LayerConductance(model, layer) LC_attr = compute_attributions(LC,", "import torchvision import matplotlib import matplotlib.pyplot as plt from PIL", "send along to 'visualize_attr_maps' as the default # # 'attr_preprocess'", "# # Captum model = torchvision.models.squeezenet1_1(pretrained=True) # We don't want", "= y_tensor) # print(X_tensor.shape, y_tensor.shape, attribution_gcc.shape) visualize_attr_maps('visualization/GuidedGradCam.png', X, y, class_names,", "= LayerAttribution.interpolate(LC_attr_sum, (H,W) ) LC_attr_int = LC_attr_int.repeat(1, 3, 1, 1)", "np.expand_dims(img.transpose(2, 0, 1), axis=0) img = np.float32(img) img = torch.from_numpy(img)", "import GuidedGradCam, GuidedBackprop from captum.attr import LayerActivation, LayerConductance, LayerGradCam from", "X], dim=0) y_tensor = torch.LongTensor(y) conv_module = model.features[12] ############################################################################## #", "1) visualize_attr_maps('visualization/LayerConductance.png', X, y, class_names, [LC_attr_int], ['LayerConductance']) LGC = LayerGradCam(model,", "1), axis=0) img = np.float32(img) img = torch.from_numpy(img) img =", "= torch.LongTensor(y) gradcam_result = gc.grad_cam(X_tensor, y_tensor, gc_model) plt.figure(figsize=(24, 24)) for", "look at the usage of the parameter relu_attributions # ##############################################################################", "for using layer visualizations # layer_act = LayerActivation(model, layer) #", "= gc.guided_backprop(X_tensor,y_tensor, gc_model) plt.figure(figsize=(24, 24)) for i in range(gbp_result.shape[0]): plt.subplot(1,", "conv_module variable below gc_model = torchvision.models.squeezenet1_1(pretrained=True) for param in gc_model.parameters():", "for i in range(gradcam_result.shape[0]): gradcam_val = gradcam_result[i] img = X[i]", "attribution_gcc = compute_attributions(ggc, X_tensor, target = y_tensor) # print(X_tensor.shape, y_tensor.shape,", "GradCam ggc = GuidedGradCam(model, conv_module) attribution_gcc = compute_attributions(ggc, X_tensor, target", "captum sections, using our helper methods), # # but with", "written to only to handle multi channel attributions. # #", "we need to capture gradients from, which you can see", "[LC_attr_int], ['LayerConductance']) LGC = LayerGradCam(model, layer) LGC_attr = compute_attributions(LGC, X_tensor,", "Guided BackProp gbp = GuidedBackprop(model) attribution_gbp = compute_attributions(gbp, X_tensor, target", "# using 'layer' given above for this section # #", "gbp_val img = np.expand_dims(img.transpose(2, 0, 1), axis=0) img = np.float32(img)", "function from captum_utils.py is useful for # # visualizing captum", "backprop results (2 lines) img = gradcam_val * gbp_val img", "Compute/Visualize GuidedBackprop and Guided GradCAM as well. # # visualize_attr_maps", "# GradCAM. We have given you which module(=layer) that we", "= compute_attributions(layer_act, X_tensor) # layer_act_attr_sum = layer_act_attr.mean(axis=1, keepdim=True) ############################################################################## #", "y from numpy arrays to Torch Tensors X_tensor = torch.cat([preprocess(Image.fromarray(x))", "1, keepdim = True) LC_attr_int = LayerAttribution.interpolate(LC_attr_sum, (H,W) ) LC_attr_int", "for param in model.parameters(): param.requires_grad = False # Convert X", "captum outputs # # Use conv_module as the convolution layer", "for param in gc_model.parameters(): param.requires_grad = True X_tensor = torch.cat([preprocess(Image.fromarray(x))", "True X_tensor = torch.cat([preprocess(Image.fromarray(x)) for x in X], dim=0).requires_grad_(True) y_tensor", "you send along to 'visualize_attr_maps' as the default # #", "plt.savefig('visualization/guided_gradcam.png') # **************************************************************************************** # # Captum model = torchvision.models.squeezenet1_1(pretrained=True) #", "= X_tensor.shape LC = LayerConductance(model, layer) LC_attr = compute_attributions(LC, X_tensor,", "# # Also note that, you would need to customize", "default size of plots plt.rcParams['image.interpolation'] = 'nearest' plt.rcParams['image.cmap'] = 'gray'", "capture gradients from, which you can see in conv_module variable", "(similar # # to what we did for the other", "variable below gc_model = torchvision.models.squeezenet1_1(pretrained=True) for param in gc_model.parameters(): param.requires_grad", "For layer gradcam look at the usage of the parameter", "attribution_gbp = compute_attributions(gbp, X_tensor, target = y_tensor) visualize_attr_maps('visualization/GuidedBackpropCam.png', X, y,", "a new model we will use explicitly for GradCAM for", "(matplotlib.cm.jet(gradcam_val)[:,:,:3]*255) img = img / np.max(img) plt.subplot(1, 5, i +", "# ############################################################################## # Layer gradcam aggregates across all channels from", "i in range(gradcam_result.shape[0]): gbp_val = gbp_result[i] gradcam_val = np.expand_dims(gradcam_result[i], axis=2)", "Backprop to get Guided GradCam. X_tensor = torch.cat([preprocess(Image.fromarray(x)) for x", "target = y_tensor) visualize_attr_maps('visualization/GuidedBackpropCam.png', X, y, class_names, [attribution_gbp], ['Guided_Backprop_Cam']) ##############################################################################", "torch.cat([preprocess(Image.fromarray(x)) for x in X], dim=0).requires_grad_(True) y_tensor = torch.LongTensor(y) #", "LC_attr = compute_attributions(LC, X_tensor, target = y_tensor) LC_attr_sum = LC_attr.mean(axis", "model parameters. for param in model.parameters(): param.requires_grad = False #", "X and y from numpy arrays to Torch Tensors X_tensor", "np from visualizers import GradCam plt.rcParams['figure.figsize'] = (10.0, 8.0) #", "plt.rcParams['figure.figsize'] = (10.0, 8.0) # set default size of plots", "and see observe how the attributions change layer = model.features[3]", "[attribution_gcc], ['Guided_Grad_Cam']) # Computing Guided BackProp gbp = GuidedBackprop(model) attribution_gbp", "y_tensor = torch.LongTensor(y) gradcam_result = gc.grad_cam(X_tensor, y_tensor, gc_model) plt.figure(figsize=(24, 24))", "captum.attr import GuidedGradCam, GuidedBackprop from captum.attr import LayerActivation, LayerConductance, LayerGradCam", "# You can refer to the LayerActivation example above and", "set default size of plots plt.rcParams['image.interpolation'] = 'nearest' plt.rcParams['image.cmap'] =", "LC_attr_int = LC_attr_int.repeat(1, 3, 1, 1) visualize_attr_maps('visualization/LayerConductance.png', X, y, class_names,", "Layer Gradcam and Layer Conductance (similar # # to what", "= torchvision.models.squeezenet1_1(pretrained=True) for param in gc_model.parameters(): param.requires_grad = True X_tensor", "# layer_act_attr_sum = layer_act_attr.mean(axis=1, keepdim=True) ############################################################################## # TODO: Visualize Individual", "# # You can refer to the LayerActivation example above", "multiplication and normalization of the gradcam and guided backprop results", "THIS SECTION ONLY, we need to use gradients. We introduce", "visualizers import GradCam plt.rcParams['figure.figsize'] = (10.0, 8.0) # set default", "outputs # # Use conv_module as the convolution layer for", "target = y_tensor) LC_attr_sum = LC_attr.mean(axis = 1, keepdim =", "y, class_names, [attribution_gbp], ['Guided_Backprop_Cam']) ############################################################################## # END OF YOUR CODE", "# # visualize_attr_maps function from captum_utils.py is useful for #", "as the convolution layer for gradcam # ############################################################################## # Computing", "Torch Tensors X_tensor = torch.cat([preprocess(Image.fromarray(x)) for x in X], dim=0)", "# layer_act_attr = compute_attributions(layer_act, X_tensor) # layer_act_attr_sum = layer_act_attr.mean(axis=1, keepdim=True)", "the convolution layer for gradcam # ############################################################################## # Computing Guided", "GuidedBackprop(model) attribution_gbp = compute_attributions(gbp, X_tensor, target = y_tensor) visualize_attr_maps('visualization/GuidedBackpropCam.png', X,", "we did for the other captum sections, using our helper", "lines) img = gradcam_val * gbp_val img = np.expand_dims(img.transpose(2, 0,", "= torch.LongTensor(y) conv_module = model.features[12] ############################################################################## # TODO: Compute/Visualize GuidedBackprop", "# As a final step, we can combine GradCam and", "for i in range(gradcam_result.shape[0]): gbp_val = gbp_result[i] gradcam_val = np.expand_dims(gradcam_result[i],", "y_tensor = torch.LongTensor(y) conv_module = model.features[12] ############################################################################## # TODO: Compute/Visualize", "TODO: Compute/Visualize GuidedBackprop and Guided GradCAM as well. # #", "You can refer to the LayerActivation example above and you", "# visualize_attr_maps function from captum_utils.py is useful for # #", "= img / np.max(img) plt.subplot(1, 5, i + 1) plt.imshow(img)", "to model parameters. for param in model.parameters(): param.requires_grad = False", "y_tensor, gc_model) plt.figure(figsize=(24, 24)) for i in range(gradcam_result.shape[0]): gradcam_val =", "= LayerActivation(model, layer) # layer_act_attr = compute_attributions(layer_act, X_tensor) # layer_act_attr_sum", "# # visualizing captum outputs # # Use conv_module as", "Guided GradCam ggc = GuidedGradCam(model, conv_module) attribution_gcc = compute_attributions(ggc, X_tensor,", "in conv_module variable below gc_model = torchvision.models.squeezenet1_1(pretrained=True) for param in", "GradCam. X_tensor = torch.cat([preprocess(Image.fromarray(x)) for x in X], dim=0).requires_grad_(True) y_tensor", "1, 1) visualize_attr_maps ('visualization/LayerGradCam.png', X, y, class_names, [LGC_attr_int], ['LayerGradCam']) ##############################################################################", "parameters. for param in model.parameters(): param.requires_grad = False # Convert", "= load_imagenet_val(num=5) # FOR THIS SECTION ONLY, we need to", "gc.guided_backprop(X_tensor, y_tensor, gc_model) plt.figure(figsize=(24, 24)) for i in range(gradcam_result.shape[0]): gbp_val", "import LayerActivation, LayerConductance, LayerGradCam from data_utils import * from image_utils", "layer_act = LayerActivation(model, layer) # layer_act_attr = compute_attributions(layer_act, X_tensor) #", "only to handle multi channel attributions. # # # #", "import numpy as np from visualizers import GradCam plt.rcParams['figure.figsize'] =", "compute_attributions(ggc, X_tensor, target = y_tensor) # print(X_tensor.shape, y_tensor.shape, attribution_gcc.shape) visualize_attr_maps('visualization/GuidedGradCam.png',", "gc.grad_cam(X_tensor, y_tensor, gc_model) plt.figure(figsize=(24, 24)) for i in range(gradcam_result.shape[0]): gradcam_val", "the parameter relu_attributions # ############################################################################## # Layer gradcam aggregates across", "can combine GradCam and Guided Backprop to get Guided GradCam.", "use gradients. We introduce a new model we will use", "GradCam() X_tensor = torch.cat([preprocess(Image.fromarray(x)) for x in X], dim=0).requires_grad_(True) y_tensor", "plt from PIL import Image from captum.attr import GuidedGradCam, GuidedBackprop", "gradcam_result = gc.grad_cam(X_tensor, y_tensor, gc_model) gbp_result = gc.guided_backprop(X_tensor, y_tensor, gc_model)", "image_utils import * from captum_utils import * import numpy as", "for # # visualizing captum outputs # # Use conv_module", "some preprocessing calculations. # # # # You can refer", "layer) LGC_attr = compute_attributions(LGC, X_tensor, target = y_tensor) LGC_attr_sum =", "GuidedBackprop and Guided GradCAM as well. # # visualize_attr_maps function", "FOR THIS SECTION ONLY, we need to use gradients. We", "convolution layer for gradcam # ############################################################################## # Computing Guided GradCam", "you can see in conv_module variable below gc_model = torchvision.models.squeezenet1_1(pretrained=True)", "'gray' X, y, class_names = load_imagenet_val(num=5) # FOR THIS SECTION", "# GradCam # GradCAM. We have given you which module(=layer)", "to only to handle multi channel attributions. # # #", "'nearest' plt.rcParams['image.cmap'] = 'gray' X, y, class_names = load_imagenet_val(num=5) #", "i + 1) plt.imshow(img) plt.title(class_names[y[i]]) plt.axis('off') plt.gcf().tight_layout() plt.savefig('visualization/guided_gradcam.png') # ****************************************************************************************", "in model.parameters(): param.requires_grad = False # Convert X and y", "img = rescale(img) plt.imshow(img) plt.title(class_names[y[i]]) plt.axis('off') plt.gcf().tight_layout() plt.savefig('visualization/guided_backprop.png') # GradCam", "= gc.grad_cam(X_tensor, y_tensor, gc_model) plt.figure(figsize=(24, 24)) for i in range(gradcam_result.shape[0]):", "False # Convert X and y from numpy arrays to", "dim=0).requires_grad_(True) y_tensor = torch.LongTensor(y) gradcam_result = gc.grad_cam(X_tensor, y_tensor, gc_model) plt.figure(figsize=(24,", "LGC_attr.mean(axis = 1, keepdim = True) LGC_attr_int = LayerAttribution.interpolate(LGC_attr_sum, (H,W))", "= gradcam_result[i] img = X[i] + (matplotlib.cm.jet(gradcam_val)[:,:,:3]*255) img = img", "parameter relu_attributions # ############################################################################## # Layer gradcam aggregates across all", "i + 1) plt.imshow(img) plt.title(class_names[y[i]]) plt.axis('off') plt.gcf().tight_layout() plt.savefig('visualization/gradcam.png') # As", "= LayerGradCam(model, layer) LGC_attr = compute_attributions(LGC, X_tensor, target = y_tensor)", "need to capture gradients from, which you can see in", "YOUR CODE # ############################################################################## # Try out different layers and", "= np.expand_dims(img.transpose(2, 0, 1), axis=0) img = np.float32(img) img =", "model.features[3] # Example visualization for using layer visualizations # layer_act", "import matplotlib.pyplot as plt from PIL import Image from captum.attr", "Tensors X_tensor = torch.cat([preprocess(Image.fromarray(x)) for x in X], dim=0) y_tensor", "keepdim=True) ############################################################################## # TODO: Visualize Individual Layer Gradcam and Layer", "get Guided GradCam. X_tensor = torch.cat([preprocess(Image.fromarray(x)) for x in X],", "y_tensor, gc_model) plt.figure(figsize=(24, 24)) for i in range(gradcam_result.shape[0]): gbp_val =", "numpy as np from visualizers import GradCam plt.rcParams['figure.figsize'] = (10.0,", "new model we will use explicitly for GradCAM for this.", "LGC_attr_sum = LGC_attr.mean(axis = 1, keepdim = True) LGC_attr_int =", "plt.subplot(1, 5, i + 1) plt.imshow(img) plt.title(class_names[y[i]]) plt.axis('off') plt.gcf().tight_layout() plt.savefig('visualization/guided_gradcam.png')", "gc_model = torchvision.models.squeezenet1_1(pretrained=True) for param in gc_model.parameters(): param.requires_grad = True", "3, 1, 1) visualize_attr_maps ('visualization/LayerGradCam.png', X, y, class_names, [LGC_attr_int], ['LayerGradCam'])", "preprocessing calculations. # # # # You can refer to", "y, class_names, [LC_attr_int], ['LayerConductance']) LGC = LayerGradCam(model, layer) LGC_attr =", "= torch.cat([preprocess(Image.fromarray(x)) for x in X], dim=0) y_tensor = torch.LongTensor(y)", "from captum.attr import LayerActivation, LayerConductance, LayerGradCam from data_utils import *", "the gradcam and guided backprop results (2 lines) img =", "keepdim = True) LGC_attr_int = LayerAttribution.interpolate(LGC_attr_sum, (H,W)) LGC_attr_int = LGC_attr_int.repeat(1,", "import matplotlib import matplotlib.pyplot as plt from PIL import Image", "X[i] + (matplotlib.cm.jet(gradcam_val)[:,:,:3]*255) img = img / np.max(img) plt.subplot(1, 5,", "in range(gbp_result.shape[0]): plt.subplot(1, 5, i + 1) img = gbp_result[i]", "of plots plt.rcParams['image.interpolation'] = 'nearest' plt.rcParams['image.cmap'] = 'gray' X, y,", "############################################################################## # END OF YOUR CODE # ############################################################################## # Try", "class_names, [LC_attr_int], ['LayerConductance']) LGC = LayerGradCam(model, layer) LGC_attr = compute_attributions(LGC,", "# # 'attr_preprocess' is written to only to handle multi", "OF YOUR CODE # ############################################################################## # Try out different layers", "plt.subplot(1, 5, i + 1) img = gbp_result[i] img =", "to customize your 'attr_preprocess' # # parameter that you send", "visualizations # layer_act = LayerActivation(model, layer) # layer_act_attr = compute_attributions(layer_act,", "i in range(gbp_result.shape[0]): plt.subplot(1, 5, i + 1) img =", "# # # For layer gradcam look at the usage", "gc_model = torchvision.models.squeezenet1_1(pretrained=True) gc = GradCam() X_tensor = torch.cat([preprocess(Image.fromarray(x)) for", "gc_model) plt.figure(figsize=(24, 24)) for i in range(gbp_result.shape[0]): plt.subplot(1, 5, i", "= deprocess(img) plt.subplot(1, 5, i + 1) plt.imshow(img) plt.title(class_names[y[i]]) plt.axis('off')", "in X], dim=0).requires_grad_(True) y_tensor = torch.LongTensor(y) gradcam_result = gc.grad_cam(X_tensor, y_tensor,", "+ 1) plt.imshow(img) plt.title(class_names[y[i]]) plt.axis('off') plt.gcf().tight_layout() plt.savefig('visualization/gradcam.png') # As a", "captum.attr import LayerActivation, LayerConductance, LayerGradCam from data_utils import * from", "did for the other captum sections, using our helper methods),", "plt.axis('off') plt.gcf().tight_layout() plt.savefig('visualization/guided_gradcam.png') # **************************************************************************************** # # Captum model =", "gbp_result = gc.guided_backprop(X_tensor,y_tensor, gc_model) plt.figure(figsize=(24, 24)) for i in range(gbp_result.shape[0]):", "Layer gradcam aggregates across all channels from captum.attr import LayerAttribution", "as plt from PIL import Image from captum.attr import GuidedGradCam,", "param in gc_model.parameters(): param.requires_grad = True X_tensor = torch.cat([preprocess(Image.fromarray(x)) for", "Pointwise multiplication and normalization of the gradcam and guided backprop", "observe how the attributions change layer = model.features[3] # Example", "layer visualizations # layer_act = LayerActivation(model, layer) # layer_act_attr =", "parameter that you send along to 'visualize_attr_maps' as the default", "you which module(=layer) that we need to capture gradients from,", "1, keepdim = True) LGC_attr_int = LayerAttribution.interpolate(LGC_attr_sum, (H,W)) LGC_attr_int =", "and you should be # # using 'layer' given above", "PIL import Image from captum.attr import GuidedGradCam, GuidedBackprop from captum.attr", "= True X_tensor = torch.cat([preprocess(Image.fromarray(x)) for x in X], dim=0).requires_grad_(True)", "X_tensor.shape LC = LayerConductance(model, layer) LC_attr = compute_attributions(LC, X_tensor, target", "this. gc_model = torchvision.models.squeezenet1_1(pretrained=True) gc = GradCam() X_tensor = torch.cat([preprocess(Image.fromarray(x))", "gbp_result[i] img = rescale(img) plt.imshow(img) plt.title(class_names[y[i]]) plt.axis('off') plt.gcf().tight_layout() plt.savefig('visualization/guided_backprop.png') #", "torch.LongTensor(y) gradcam_result = gc.grad_cam(X_tensor, y_tensor, gc_model) plt.figure(figsize=(24, 24)) for i", "can refer to the LayerActivation example above and you should", "to use gradients. We introduce a new model we will", "= model.features[3] # Example visualization for using layer visualizations #", "you would need to customize your 'attr_preprocess' # # parameter", "'attr_preprocess' # # parameter that you send along to 'visualize_attr_maps'", "# TODO: Compute/Visualize GuidedBackprop and Guided GradCAM as well. #", "should be # # using 'layer' given above for this", "np.expand_dims(gradcam_result[i], axis=2) # Pointwise multiplication and normalization of the gradcam", "= compute_attributions(LGC, X_tensor, target = y_tensor) LGC_attr_sum = LGC_attr.mean(axis =", "ggc = GuidedGradCam(model, conv_module) attribution_gcc = compute_attributions(ggc, X_tensor, target =", "# FOR THIS SECTION ONLY, we need to use gradients.", "# 'attr_preprocess' is written to only to handle multi channel", "img = img / np.max(img) plt.subplot(1, 5, i + 1)", "gradients from, which you can see in conv_module variable below", "X_tensor = torch.cat([preprocess(Image.fromarray(x)) for x in X], dim=0) y_tensor =", "# print(X_tensor.shape, y_tensor.shape, attribution_gcc.shape) visualize_attr_maps('visualization/GuidedGradCam.png', X, y, class_names, [attribution_gcc], ['Guided_Grad_Cam'])", "True) LGC_attr_int = LayerAttribution.interpolate(LGC_attr_sum, (H,W)) LGC_attr_int = LGC_attr_int.repeat(1, 3, 1,", "= LGC_attr_int.repeat(1, 3, 1, 1) visualize_attr_maps ('visualization/LayerGradCam.png', X, y, class_names,", "X, y, class_names, [attribution_gbp], ['Guided_Backprop_Cam']) ############################################################################## # END OF YOUR", "range(gradcam_result.shape[0]): gradcam_val = gradcam_result[i] img = X[i] + (matplotlib.cm.jet(gradcam_val)[:,:,:3]*255) img", "to handle multi channel attributions. # # # # For", "gradcam aggregates across all channels from captum.attr import LayerAttribution N,", "= True) LGC_attr_int = LayerAttribution.interpolate(LGC_attr_sum, (H,W)) LGC_attr_int = LGC_attr_int.repeat(1, 3,", "= model.features[12] ############################################################################## # TODO: Compute/Visualize GuidedBackprop and Guided GradCAM", "the default # # 'attr_preprocess' is written to only to", "sections, using our helper methods), # # but with some", "plt.title(class_names[y[i]]) plt.axis('off') plt.gcf().tight_layout() plt.savefig('visualization/gradcam.png') # As a final step, we", "1) img = gbp_result[i] img = rescale(img) plt.imshow(img) plt.title(class_names[y[i]]) plt.axis('off')", "= GuidedBackprop(model) attribution_gbp = compute_attributions(gbp, X_tensor, target = y_tensor) visualize_attr_maps('visualization/GuidedBackpropCam.png',", "from data_utils import * from image_utils import * from captum_utils", "import Image from captum.attr import GuidedGradCam, GuidedBackprop from captum.attr import", "* import numpy as np from visualizers import GradCam plt.rcParams['figure.figsize']", "x in X], dim=0).requires_grad_(True) y_tensor = torch.LongTensor(y) gradcam_result = gc.grad_cam(X_tensor,", "img = deprocess(img) plt.subplot(1, 5, i + 1) plt.imshow(img) plt.title(class_names[y[i]])", "= y_tensor) LC_attr_sum = LC_attr.mean(axis = 1, keepdim = True)", "LGC_attr_int = LayerAttribution.interpolate(LGC_attr_sum, (H,W)) LGC_attr_int = LGC_attr_int.repeat(1, 3, 1, 1)", "gc_model.parameters(): param.requires_grad = True X_tensor = torch.cat([preprocess(Image.fromarray(x)) for x in", "class_names, [LGC_attr_int], ['LayerGradCam']) ############################################################################## # END OF YOUR CODE #", "'attr_preprocess' is written to only to handle multi channel attributions.", "this section # # # # Also note that, you", "Use conv_module as the convolution layer for gradcam # ##############################################################################", "GradCAM for this. gc_model = torchvision.models.squeezenet1_1(pretrained=True) gc = GradCam() X_tensor", "LC_attr_int.repeat(1, 3, 1, 1) visualize_attr_maps('visualization/LayerConductance.png', X, y, class_names, [LC_attr_int], ['LayerConductance'])", "y, class_names, [attribution_gcc], ['Guided_Grad_Cam']) # Computing Guided BackProp gbp =", "don't want to train the model, so tell PyTorch not", "guided backprop results (2 lines) img = gradcam_val * gbp_val", "[LGC_attr_int], ['LayerGradCam']) ############################################################################## # END OF YOUR CODE # ##############################################################################", "Image from captum.attr import GuidedGradCam, GuidedBackprop from captum.attr import LayerActivation,", "torch.cat([preprocess(Image.fromarray(x)) for x in X], dim=0).requires_grad_(True) y_tensor = torch.LongTensor(y) gradcam_result", "model, so tell PyTorch not to compute gradients # with", "with respect to model parameters. for param in model.parameters(): param.requires_grad", "LayerAttribution.interpolate(LC_attr_sum, (H,W) ) LC_attr_int = LC_attr_int.repeat(1, 3, 1, 1) visualize_attr_maps('visualization/LayerConductance.png',", "to Torch Tensors X_tensor = torch.cat([preprocess(Image.fromarray(x)) for x in X],", "using our helper methods), # # but with some preprocessing", "using layer visualizations # layer_act = LayerActivation(model, layer) # layer_act_attr", "**************************************************************************************** # # Captum model = torchvision.models.squeezenet1_1(pretrained=True) # We don't", "the usage of the parameter relu_attributions # ############################################################################## # Layer", "we will use explicitly for GradCAM for this. gc_model =", "from captum.attr import GuidedGradCam, GuidedBackprop from captum.attr import LayerActivation, LayerConductance,", "target = y_tensor) LGC_attr_sum = LGC_attr.mean(axis = 1, keepdim =", "plt.figure(figsize=(24, 24)) for i in range(gradcam_result.shape[0]): gbp_val = gbp_result[i] gradcam_val", "for this section # # # # Also note that,", "= gc.grad_cam(X_tensor, y_tensor, gc_model) gbp_result = gc.guided_backprop(X_tensor, y_tensor, gc_model) plt.figure(figsize=(24,", "Guided GradCam. X_tensor = torch.cat([preprocess(Image.fromarray(x)) for x in X], dim=0).requires_grad_(True)", "gradcam_result[i] img = X[i] + (matplotlib.cm.jet(gradcam_val)[:,:,:3]*255) img = img /", "ONLY, we need to use gradients. We introduce a new", "given you which module(=layer) that we need to capture gradients", "in X], dim=0).requires_grad_(True) y_tensor = torch.LongTensor(y) # Guided Back-Propagation gbp_result", "captum.attr import LayerAttribution N, C, H, W = X_tensor.shape LC", "default # # 'attr_preprocess' is written to only to handle", "# # # Also note that, you would need to", "different layers and see observe how the attributions change layer", "from, which you can see in conv_module variable below gc_model", "C, H, W = X_tensor.shape LC = LayerConductance(model, layer) LC_attr", "True) LC_attr_int = LayerAttribution.interpolate(LC_attr_sum, (H,W) ) LC_attr_int = LC_attr_int.repeat(1, 3,", "############################################################################## # TODO: Compute/Visualize GuidedBackprop and Guided GradCAM as well.", "gc_model) plt.figure(figsize=(24, 24)) for i in range(gradcam_result.shape[0]): gradcam_val = gradcam_result[i]", "y_tensor) # print(X_tensor.shape, y_tensor.shape, attribution_gcc.shape) visualize_attr_maps('visualization/GuidedGradCam.png', X, y, class_names, [attribution_gcc],", "# # using 'layer' given above for this section #", "need to customize your 'attr_preprocess' # # parameter that you", "= np.float32(img) img = torch.from_numpy(img) img = deprocess(img) plt.subplot(1, 5,", "example above and you should be # # using 'layer'", "axis=0) img = np.float32(img) img = torch.from_numpy(img) img = deprocess(img)", "# # # # For layer gradcam look at the", "for the other captum sections, using our helper methods), #", "helper methods), # # but with some preprocessing calculations. #", "gradcam and guided backprop results (2 lines) img = gradcam_val", "gc_model) gbp_result = gc.guided_backprop(X_tensor, y_tensor, gc_model) plt.figure(figsize=(24, 24)) for i", "img = np.expand_dims(img.transpose(2, 0, 1), axis=0) img = np.float32(img) img", "y_tensor) visualize_attr_maps('visualization/GuidedBackpropCam.png', X, y, class_names, [attribution_gbp], ['Guided_Backprop_Cam']) ############################################################################## # END", "channels from captum.attr import LayerAttribution N, C, H, W =", "usage of the parameter relu_attributions # ############################################################################## # Layer gradcam", "X_tensor, target = y_tensor) visualize_attr_maps('visualization/GuidedBackpropCam.png', X, y, class_names, [attribution_gbp], ['Guided_Backprop_Cam'])", "tell PyTorch not to compute gradients # with respect to", "Try out different layers and see observe how the attributions", "Guided GradCAM as well. # # visualize_attr_maps function from captum_utils.py", "# # # You can refer to the LayerActivation example", "############################################################################## # TODO: Visualize Individual Layer Gradcam and Layer Conductance", "= compute_attributions(LC, X_tensor, target = y_tensor) LC_attr_sum = LC_attr.mean(axis =", "# # to what we did for the other captum", "to get Guided GradCam. X_tensor = torch.cat([preprocess(Image.fromarray(x)) for x in", "LayerActivation(model, layer) # layer_act_attr = compute_attributions(layer_act, X_tensor) # layer_act_attr_sum =", "plt.title(class_names[y[i]]) plt.axis('off') plt.gcf().tight_layout() plt.savefig('visualization/guided_gradcam.png') # **************************************************************************************** # # Captum model", "that, you would need to customize your 'attr_preprocess' # #", "from captum_utils import * import numpy as np from visualizers", "img = X[i] + (matplotlib.cm.jet(gradcam_val)[:,:,:3]*255) img = img / np.max(img)", "to compute gradients # with respect to model parameters. for", ") LC_attr_int = LC_attr_int.repeat(1, 3, 1, 1) visualize_attr_maps('visualization/LayerConductance.png', X, y,", "# We don't want to train the model, so tell", "for gradcam # ############################################################################## # Computing Guided GradCam ggc =", "= y_tensor) LGC_attr_sum = LGC_attr.mean(axis = 1, keepdim = True)", "plt.savefig('visualization/guided_backprop.png') # GradCam # GradCAM. We have given you which", "Captum model = torchvision.models.squeezenet1_1(pretrained=True) # We don't want to train", "explicitly for GradCAM for this. gc_model = torchvision.models.squeezenet1_1(pretrained=True) gc =", "train the model, so tell PyTorch not to compute gradients", "for i in range(gbp_result.shape[0]): plt.subplot(1, 5, i + 1) img", "# ############################################################################## # Computing Guided GradCam ggc = GuidedGradCam(model, conv_module)", "be # # using 'layer' given above for this section", "np.float32(img) img = torch.from_numpy(img) img = deprocess(img) plt.subplot(1, 5, i", "We have given you which module(=layer) that we need to", "['Guided_Grad_Cam']) # Computing Guided BackProp gbp = GuidedBackprop(model) attribution_gbp =", "= 1, keepdim = True) LC_attr_int = LayerAttribution.interpolate(LC_attr_sum, (H,W) )", "to train the model, so tell PyTorch not to compute", "= gbp_result[i] gradcam_val = np.expand_dims(gradcam_result[i], axis=2) # Pointwise multiplication and", "import LayerAttribution N, C, H, W = X_tensor.shape LC =", "GradCAM. We have given you which module(=layer) that we need", "we need to use gradients. We introduce a new model", "compute_attributions(LGC, X_tensor, target = y_tensor) LGC_attr_sum = LGC_attr.mean(axis = 1,", "Back-Propagation gbp_result = gc.guided_backprop(X_tensor,y_tensor, gc_model) plt.figure(figsize=(24, 24)) for i in", "param in model.parameters(): param.requires_grad = False # Convert X and", "import torch import torchvision import matplotlib import matplotlib.pyplot as plt", "torch import torchvision import matplotlib import matplotlib.pyplot as plt from", "class_names, [attribution_gbp], ['Guided_Backprop_Cam']) ############################################################################## # END OF YOUR CODE #", "and Guided GradCAM as well. # # visualize_attr_maps function from", "['Guided_Backprop_Cam']) ############################################################################## # END OF YOUR CODE # ############################################################################## #", "Visualize Individual Layer Gradcam and Layer Conductance (similar # #", "# # For layer gradcam look at the usage of", "torch.LongTensor(y) conv_module = model.features[12] ############################################################################## # TODO: Compute/Visualize GuidedBackprop and", "what we did for the other captum sections, using our", "5, i + 1) img = gbp_result[i] img = rescale(img)", "gc = GradCam() X_tensor = torch.cat([preprocess(Image.fromarray(x)) for x in X],", "matplotlib import matplotlib.pyplot as plt from PIL import Image from", "print(X_tensor.shape, y_tensor.shape, attribution_gcc.shape) visualize_attr_maps('visualization/GuidedGradCam.png', X, y, class_names, [attribution_gcc], ['Guided_Grad_Cam']) #", "layer gradcam look at the usage of the parameter relu_attributions", "GradCAM as well. # # visualize_attr_maps function from captum_utils.py is", "+ 1) img = gbp_result[i] img = rescale(img) plt.imshow(img) plt.title(class_names[y[i]])", "across all channels from captum.attr import LayerAttribution N, C, H,", "plt.figure(figsize=(24, 24)) for i in range(gbp_result.shape[0]): plt.subplot(1, 5, i +", "= 1, keepdim = True) LGC_attr_int = LayerAttribution.interpolate(LGC_attr_sum, (H,W)) LGC_attr_int", "/ np.max(img) plt.subplot(1, 5, i + 1) plt.imshow(img) plt.title(class_names[y[i]]) plt.axis('off')", "# Guided Back-Propagation gbp_result = gc.guided_backprop(X_tensor,y_tensor, gc_model) plt.figure(figsize=(24, 24)) for", "gbp_val = gbp_result[i] gradcam_val = np.expand_dims(gradcam_result[i], axis=2) # Pointwise multiplication", "(2 lines) img = gradcam_val * gbp_val img = np.expand_dims(img.transpose(2,", "# ############################################################################## # Try out different layers and see observe", "Example visualization for using layer visualizations # layer_act = LayerActivation(model,", "layer_act_attr_sum = layer_act_attr.mean(axis=1, keepdim=True) ############################################################################## # TODO: Visualize Individual Layer", "of the parameter relu_attributions # ############################################################################## # Layer gradcam aggregates", "LayerActivation example above and you should be # # using", "np.max(img) plt.subplot(1, 5, i + 1) plt.imshow(img) plt.title(class_names[y[i]]) plt.axis('off') plt.gcf().tight_layout()" ]
[ "5, 6], reduce=[ ('sep_conv_5x5', 1), ('sep_conv_7x7', 0), ('max_pool_3x3', 1), ('sep_conv_7x7',", "0), ('dil_conv_3x3', 1), ('skip_connect', 1), ('skip_connect', 0), ('dil_conv_5x5', 0), ('sep_conv_3x3',", "('skip_connect', 0), ('avg_pool_3x3', 0), ('avg_pool_3x3', 0), ('sep_conv_3x3', 1), ('skip_connect', 1),", "('sep_conv_3x3', 1), ('skip_connect', 1), ], normal_concat=[2, 3, 4, 5, 6],", "0), ('max_pool_3x3', 1), ('skip_connect', 0), ('dil_conv_3x3', 1), ('skip_connect', 1), ('skip_connect',", "1), ('max_pool_3x3', 0), ('skip_connect', 2), ('skip_connect', 2), ('max_pool_3x3', 1)], reduce_concat=[2,", "('skip_connect', 2), ('avg_pool_3x3', 0)], reduce_concat=[2, 3, 4, 5]) DARTS_V2 =", "1), ('sep_conv_3x3', 0), ('skip_connect', 2)], normal_concat=[2, 3, 4, 5], reduce=[('max_pool_3x3',", "normal_concat=[2, 3, 4, 5], reduce=[('max_pool_3x3', 0), ('max_pool_3x3', 1), ('skip_connect', 2),", "Genotype( normal=[('max_pool_3x3', 0), ('max_pool_3x3', 1), ('max_pool_3x3', 0), ('sep_conv_5x5', 2), ('dil_conv_5x5',", "1), ('skip_connect', 0), ('avg_pool_3x3', 0), ('avg_pool_3x3', 0), ('sep_conv_3x3', 1), ('skip_connect',", "0), ('sep_conv_3x3', 1), ('max_pool_3x3', 0), ('sep_conv_7x7', 2), ('sep_conv_7x7', 0), ('avg_pool_3x3',", "= [ 'max_pool_3x3', 'avg_pool_3x3', 'skip_connect', 'sep_conv_3x3', 'sep_conv_5x5', 'dil_conv_3x3', 'dil_conv_5x5' ]", "0), ('sep_conv_3x3', 1), ('skip_connect', 0), ('sep_conv_3x3', 1), ('sep_conv_3x3', 0), ('skip_connect',", "('skip_connect', 2), ('max_pool_3x3', 1), ('skip_connect', 1), ('skip_connect', 2), ('max_pool_3x3', 1),", "6], reduce=[ ('sep_conv_5x5', 1), ('sep_conv_7x7', 0), ('max_pool_3x3', 1), ('sep_conv_7x7', 0),", "3, 4, 5], reduce=[('max_pool_3x3', 0), ('max_pool_3x3', 1), ('skip_connect', 2), ('max_pool_3x3',", "('sep_conv_3x3', 0), ('avg_pool_3x3', 1), ('skip_connect', 0), ('avg_pool_3x3', 0), ('avg_pool_3x3', 0),", "'max_pool_3x3', 'avg_pool_3x3', 'skip_connect', 'sep_conv_3x3', 'sep_conv_5x5', 'dil_conv_3x3', 'dil_conv_5x5' ] NASNet =", "('max_pool_3x3', 1), ], reduce_concat=[4, 5, 6], ) AmoebaNet = Genotype(", "('skip_connect', 1), ('skip_connect', 0), ('avg_pool_3x3', 1), ], normal_concat=[4, 5, 6],", "0), ('sep_conv_7x7', 2), ('sep_conv_7x7', 0), ('avg_pool_3x3', 1), ('max_pool_3x3', 0), ('max_pool_3x3',", "DARTS_V2 = Genotype( normal=[('sep_conv_3x3', 0), ('sep_conv_3x3', 1), ('sep_conv_3x3', 0), ('sep_conv_3x3',", "3), ('avg_pool_3x3', 2), ('sep_conv_3x3', 4), ('dil_conv_5x5', 1)], reduce_concat=range(2, 6)) BATH2", "('skip_connect', 0), ('dil_conv_3x3', 1), ('skip_connect', 1), ('skip_connect', 0), ('dil_conv_5x5', 0),", "0), ('sep_conv_5x5', 0), ('sep_conv_3x3', 0), ('avg_pool_3x3', 1), ('skip_connect', 0), ('avg_pool_3x3',", "('sep_conv_7x7', 0), ('avg_pool_3x3', 1), ('sep_conv_5x5', 0), ('skip_connect', 3), ('avg_pool_3x3', 2),", "('skip_connect', 2), ('max_pool_3x3', 0), ('max_pool_3x3', 0), ('skip_connect', 2), ('skip_connect', 2),", "'sep_conv_5x5', 'dil_conv_3x3', 'dil_conv_5x5' ] CRBPRIMITIVES = [ 'max_pool_3x3', 'avg_pool_3x3', 'skip_connect',", "4, 5]) DARTS_V2 = Genotype( normal=[('sep_conv_3x3', 0), ('sep_conv_3x3', 1), ('sep_conv_3x3',", "('max_pool_3x3', 1), ('sep_conv_5x5', 2), ('skip_connect', 3), ('avg_pool_3x3', 2), ('sep_conv_3x3', 4),", "1), ('sep_conv_5x5', 0), ('skip_connect', 3), ('avg_pool_3x3', 2), ('sep_conv_3x3', 2), ('max_pool_3x3',", "('avg_pool_3x3', 0)], reduce_concat=[2, 3, 4, 5]) DARTS_V2 = Genotype( normal=[('sep_conv_3x3',", "2), ('sep_conv_3x3', 2), ('sep_conv_3x3', 0)], normal_concat=range(2, 6), reduce=[('max_pool_3x3', 1), ('max_pool_3x3',", "1), ('skip_connect', 1), ('skip_connect', 2), ('max_pool_3x3', 1), ('max_pool_3x3', 0)], normal_concat=range(2,", "normal_concat=range(2, 6), reduce=[('max_pool_3x3', 0), ('max_pool_3x3', 1), ('skip_connect', 0), ('dil_conv_3x3', 1),", "0)], normal_concat=range(2, 6), reduce=[('max_pool_3x3', 1), ('max_pool_3x3', 0), ('max_pool_3x3', 1), ('sep_conv_5x5',", "Genotype( normal=[ ('avg_pool_3x3', 0), ('max_pool_3x3', 1), ('sep_conv_3x3', 0), ('sep_conv_5x5', 2),", "normal_concat=range(2, 6), reduce=[('max_pool_3x3', 1), ('max_pool_3x3', 0), ('max_pool_3x3', 1), ('sep_conv_5x5', 2),", "('sep_conv_3x3', 1), ('sep_conv_3x3', 0), ('sep_conv_3x3', 1), ('sep_conv_3x3', 1), ('skip_connect', 0),", "0), ('sep_conv_3x3', 0), ('avg_pool_3x3', 1), ('skip_connect', 0), ('avg_pool_3x3', 0), ('avg_pool_3x3',", "1), ('skip_connect', 2), ('max_pool_3x3', 1), ('max_pool_3x3', 0), ('skip_connect', 2), ('skip_connect',", "0), ('avg_pool_3x3', 0), ('sep_conv_3x3', 1), ('skip_connect', 1), ], normal_concat=[2, 3,", "4, 5], reduce=[('max_pool_3x3', 0), ('max_pool_3x3', 1), ('skip_connect', 2), ('max_pool_3x3', 0),", "4, 5]) DARTS = DARTS_V2 BATH = Genotype( normal=[('max_pool_3x3', 0),", "('avg_pool_3x3', 1), ('sep_conv_5x5', 0), ('skip_connect', 3), ('avg_pool_3x3', 2), ('sep_conv_3x3', 2),", "('max_pool_3x3', 2), ('sep_conv_3x3', 2), ('sep_conv_3x3', 0)], normal_concat=range(2, 6), reduce=[('max_pool_3x3', 1),", "('sep_conv_3x3', 0)], normal_concat=range(2, 6), reduce=[('max_pool_3x3', 1), ('max_pool_3x3', 0), ('max_pool_3x3', 1),", "('max_pool_3x3', 0), ('max_pool_3x3', 1), ('sep_conv_5x5', 2), ('skip_connect', 3), ('avg_pool_3x3', 2),", "1), ('sep_conv_5x5', 2), ('skip_connect', 3), ('avg_pool_3x3', 2), ('sep_conv_3x3', 4), ('dil_conv_5x5',", "0), ('sep_conv_3x3', 1), ('skip_connect', 1), ], normal_concat=[2, 3, 4, 5,", "1), ('skip_connect', 0), ('skip_connect', 0), ('dil_conv_3x3', 2)], normal_concat=[2, 3, 4,", "reduce_concat=[2, 3, 4, 5]) DARTS = DARTS_V2 BATH = Genotype(", "reduce=[('max_pool_3x3', 0), ('max_pool_3x3', 1), ('skip_connect', 0), ('dil_conv_3x3', 1), ('skip_connect', 1),", ") DARTS_V1 = Genotype( normal=[('sep_conv_3x3', 1), ('sep_conv_3x3', 0), ('skip_connect', 0),", "0), ('max_pool_3x3', 0), ('skip_connect', 2), ('skip_connect', 2), ('avg_pool_3x3', 0)], reduce_concat=[2,", "= namedtuple('Genotype', 'normal normal_concat reduce reduce_concat') PRIMITIVES = [ 'none',", "('sep_conv_3x3', 1), ('sep_conv_3x3', 1), ('skip_connect', 0), ('skip_connect', 0), ('dil_conv_3x3', 2)],", "= DARTS_V2 BATH = Genotype( normal=[('max_pool_3x3', 0), ('max_pool_3x3', 1), ('max_pool_3x3',", "reduce_concat') PRIMITIVES = [ 'none', 'max_pool_3x3', 'avg_pool_3x3', 'skip_connect', 'sep_conv_3x3', 'sep_conv_5x5',", "'none', 'max_pool_3x3', 'avg_pool_3x3', 'skip_connect', 'sep_conv_3x3', 'sep_conv_5x5', 'dil_conv_3x3', 'dil_conv_5x5' ] CRBPRIMITIVES", "] CRBPRIMITIVES = [ 'max_pool_3x3', 'avg_pool_3x3', 'skip_connect', 'sep_conv_3x3', 'sep_conv_5x5', 'dil_conv_3x3',", "('sep_conv_5x5', 1), ('sep_conv_3x3', 0), ('sep_conv_5x5', 0), ('sep_conv_3x3', 0), ('avg_pool_3x3', 1),", "('skip_connect', 2), ('skip_connect', 2), ('avg_pool_3x3', 0)], reduce_concat=[2, 3, 4, 5])", "0), ('avg_pool_3x3', 1), ], normal_concat=[4, 5, 6], reduce=[ ('avg_pool_3x3', 0),", "0), ('skip_connect', 0), ('sep_conv_3x3', 1), ('skip_connect', 0), ('sep_conv_3x3', 1), ('sep_conv_3x3',", "('avg_pool_3x3', 1), ('max_pool_3x3', 0), ('max_pool_3x3', 1), ('conv_7x1_1x7', 0), ('sep_conv_3x3', 5),", "3, 4, 5]) DARTS_V2 = Genotype( normal=[('sep_conv_3x3', 0), ('sep_conv_3x3', 1),", "4, 5, 6], reduce=[ ('sep_conv_5x5', 1), ('sep_conv_7x7', 0), ('max_pool_3x3', 1),", "AmoebaNet = Genotype( normal=[ ('avg_pool_3x3', 0), ('max_pool_3x3', 1), ('sep_conv_3x3', 0),", "normal=[('max_pool_3x3', 0), ('max_pool_3x3', 1), ('max_pool_3x3', 0), ('sep_conv_5x5', 2), ('dil_conv_5x5', 0),", "import namedtuple Genotype = namedtuple('Genotype', 'normal normal_concat reduce reduce_concat') PRIMITIVES", "0), ('max_pool_3x3', 1), ('sep_conv_5x5', 2), ('skip_connect', 3), ('avg_pool_3x3', 2), ('sep_conv_3x3',", "('avg_pool_3x3', 2), ('sep_conv_3x3', 2), ('max_pool_3x3', 1), ], reduce_concat=[4, 5, 6],", "('dil_conv_3x3', 1), ('skip_connect', 1), ('skip_connect', 0), ('dil_conv_5x5', 0), ('sep_conv_3x3', 4)],", "= Genotype( normal=[ ('avg_pool_3x3', 0), ('max_pool_3x3', 1), ('sep_conv_3x3', 0), ('sep_conv_5x5',", "0), ('sep_conv_5x5', 2), ('sep_conv_3x3', 0), ('avg_pool_3x3', 3), ('sep_conv_3x3', 1), ('skip_connect',", "0), ('avg_pool_3x3', 1), ('sep_conv_5x5', 0), ('skip_connect', 3), ('avg_pool_3x3', 2), ('sep_conv_3x3',", "3, 4, 5]) DARTS = DARTS_V2 BATH = Genotype( normal=[('max_pool_3x3',", "('skip_connect', 0), ('sep_conv_3x3', 1), ('sep_conv_3x3', 0), ('skip_connect', 2)], normal_concat=[2, 3,", "'max_pool_3x3', 'avg_pool_3x3', 'skip_connect', 'sep_conv_3x3', 'sep_conv_5x5', 'dil_conv_3x3', 'dil_conv_5x5' ] CRBPRIMITIVES =", "'skip_connect', 'sep_conv_3x3', 'sep_conv_5x5', 'dil_conv_3x3', 'dil_conv_5x5' ] NASNet = Genotype( normal=[", "('max_pool_3x3', 0), ('max_pool_3x3', 0), ('skip_connect', 2), ('skip_connect', 2), ('avg_pool_3x3', 0)],", "], reduce_concat=[4, 5, 6], ) AmoebaNet = Genotype( normal=[ ('avg_pool_3x3',", "('dil_conv_3x3', 2)], normal_concat=[2, 3, 4, 5], reduce=[('max_pool_3x3', 0), ('max_pool_3x3', 1),", "5], reduce=[('max_pool_3x3', 0), ('max_pool_3x3', 1), ('skip_connect', 2), ('max_pool_3x3', 0), ('max_pool_3x3',", "('sep_conv_3x3', 1), ('skip_connect', 0), ('skip_connect', 0), ('dil_conv_3x3', 2)], normal_concat=[2, 3,", "('avg_pool_3x3', 3), ('sep_conv_3x3', 1), ('skip_connect', 1), ('skip_connect', 0), ('avg_pool_3x3', 1),", "namedtuple('Genotype', 'normal normal_concat reduce reduce_concat') PRIMITIVES = [ 'none', 'max_pool_3x3',", "reduce reduce_concat') PRIMITIVES = [ 'none', 'max_pool_3x3', 'avg_pool_3x3', 'skip_connect', 'sep_conv_3x3',", "5, 6], ) AmoebaNet = Genotype( normal=[ ('avg_pool_3x3', 0), ('max_pool_3x3',", "'avg_pool_3x3', 'skip_connect', 'sep_conv_3x3', 'sep_conv_5x5', 'dil_conv_3x3', 'dil_conv_5x5' ] NASNet = Genotype(", "0), ('skip_connect', 2), ('skip_connect', 2), ('avg_pool_3x3', 0)], reduce_concat=[2, 3, 4,", "[ 'none', 'max_pool_3x3', 'avg_pool_3x3', 'skip_connect', 'sep_conv_3x3', 'sep_conv_5x5', 'dil_conv_3x3', 'dil_conv_5x5' ]", "('dil_conv_5x5', 1)], reduce_concat=range(2, 6)) BATH2 = Genotype( normal=[('max_pool_3x3', 1), ('skip_connect',", "= Genotype( normal=[('max_pool_3x3', 0), ('max_pool_3x3', 1), ('max_pool_3x3', 0), ('sep_conv_5x5', 2),", "= Genotype( normal=[('sep_conv_3x3', 0), ('sep_conv_3x3', 1), ('sep_conv_3x3', 0), ('sep_conv_3x3', 1),", "1), ('sep_conv_3x3', 0), ('sep_conv_3x3', 1), ('sep_conv_3x3', 1), ('skip_connect', 0), ('skip_connect',", "2), ('sep_conv_3x3', 2), ('max_pool_3x3', 1), ], reduce_concat=[4, 5, 6], )", "], normal_concat=[2, 3, 4, 5, 6], reduce=[ ('sep_conv_5x5', 1), ('sep_conv_7x7',", "('sep_conv_5x5', 0), ('sep_conv_3x3', 0), ('avg_pool_3x3', 1), ('skip_connect', 0), ('avg_pool_3x3', 0),", "normal=[ ('avg_pool_3x3', 0), ('max_pool_3x3', 1), ('sep_conv_3x3', 0), ('sep_conv_5x5', 2), ('sep_conv_3x3',", "0), ('sep_conv_3x3', 1), ('sep_conv_3x3', 0), ('skip_connect', 2)], normal_concat=[2, 3, 4,", "2), ('dil_conv_5x5', 0), ('max_pool_3x3', 2), ('sep_conv_3x3', 2), ('sep_conv_3x3', 0)], normal_concat=range(2,", "2), ('skip_connect', 3), ('avg_pool_3x3', 2), ('sep_conv_3x3', 4), ('dil_conv_5x5', 1)], reduce_concat=range(2,", "('sep_conv_3x3', 0), ('skip_connect', 2)], normal_concat=[2, 3, 4, 5], reduce=[('max_pool_3x3', 0),", "('skip_connect', 2), ('max_pool_3x3', 1)], reduce_concat=[2, 3, 4, 5]) DARTS =", "reduce_concat=range(2, 6)) BATH2 = Genotype( normal=[('max_pool_3x3', 1), ('skip_connect', 0), ('skip_connect',", "1), ('skip_connect', 2), ('max_pool_3x3', 0), ('max_pool_3x3', 0), ('skip_connect', 2), ('skip_connect',", "2), ('max_pool_3x3', 1), ('skip_connect', 1), ('skip_connect', 2), ('max_pool_3x3', 1), ('max_pool_3x3',", "'sep_conv_3x3', 'sep_conv_5x5', 'dil_conv_3x3', 'dil_conv_5x5' ] NASNet = Genotype( normal=[ ('sep_conv_5x5',", "normal_concat=[4, 5, 6], reduce=[ ('avg_pool_3x3', 0), ('sep_conv_3x3', 1), ('max_pool_3x3', 0),", "reduce=[('max_pool_3x3', 0), ('max_pool_3x3', 1), ('skip_connect', 2), ('max_pool_3x3', 1), ('max_pool_3x3', 0),", "('avg_pool_3x3', 0), ('sep_conv_3x3', 1), ('max_pool_3x3', 0), ('sep_conv_7x7', 2), ('sep_conv_7x7', 0),", "0), ('skip_connect', 3), ('avg_pool_3x3', 2), ('sep_conv_3x3', 2), ('max_pool_3x3', 1), ],", "('sep_conv_7x7', 0), ('avg_pool_3x3', 1), ('max_pool_3x3', 0), ('max_pool_3x3', 1), ('conv_7x1_1x7', 0),", "('sep_conv_3x3', 0), ('sep_conv_5x5', 0), ('sep_conv_3x3', 0), ('avg_pool_3x3', 1), ('skip_connect', 0),", "('sep_conv_5x5', 1), ('sep_conv_7x7', 0), ('max_pool_3x3', 1), ('sep_conv_7x7', 0), ('avg_pool_3x3', 1),", "('skip_connect', 0), ('dil_conv_3x3', 2)], normal_concat=[2, 3, 4, 5], reduce=[('max_pool_3x3', 0),", "0), ('max_pool_3x3', 1), ('conv_7x1_1x7', 0), ('sep_conv_3x3', 5), ], reduce_concat=[3, 4,", "('max_pool_3x3', 1), ('skip_connect', 0), ('dil_conv_3x3', 1), ('skip_connect', 1), ('skip_connect', 0),", "1), ('skip_connect', 1), ('skip_connect', 0), ('dil_conv_5x5', 0), ('sep_conv_3x3', 4)], reduce_concat=range(2,", "('sep_conv_3x3', 2), ('max_pool_3x3', 1), ], reduce_concat=[4, 5, 6], ) AmoebaNet", "('avg_pool_3x3', 1), ('skip_connect', 0), ('avg_pool_3x3', 0), ('avg_pool_3x3', 0), ('sep_conv_3x3', 1),", "5], reduce=[('max_pool_3x3', 0), ('max_pool_3x3', 1), ('skip_connect', 2), ('max_pool_3x3', 1), ('max_pool_3x3',", "4), ('dil_conv_5x5', 1)], reduce_concat=range(2, 6)) BATH2 = Genotype( normal=[('max_pool_3x3', 1),", "1), ('max_pool_3x3', 0), ('sep_conv_5x5', 2), ('dil_conv_5x5', 0), ('max_pool_3x3', 2), ('sep_conv_3x3',", "('skip_connect', 3), ('avg_pool_3x3', 2), ('sep_conv_3x3', 4), ('dil_conv_5x5', 1)], reduce_concat=range(2, 6))", "('max_pool_3x3', 0), ('sep_conv_5x5', 2), ('dil_conv_5x5', 0), ('max_pool_3x3', 2), ('sep_conv_3x3', 2),", "'avg_pool_3x3', 'skip_connect', 'sep_conv_3x3', 'sep_conv_5x5', 'dil_conv_3x3', 'dil_conv_5x5' ] CRBPRIMITIVES = [", "5), ], reduce_concat=[3, 4, 6] ) DARTS_V1 = Genotype( normal=[('sep_conv_3x3',", "('skip_connect', 0), ('skip_connect', 2), ('max_pool_3x3', 1), ('skip_connect', 1), ('skip_connect', 2),", "('sep_conv_5x5', 2), ('sep_conv_3x3', 0), ('avg_pool_3x3', 3), ('sep_conv_3x3', 1), ('skip_connect', 1),", "reduce=[ ('avg_pool_3x3', 0), ('sep_conv_3x3', 1), ('max_pool_3x3', 0), ('sep_conv_7x7', 2), ('sep_conv_7x7',", "0), ('skip_connect', 2), ('skip_connect', 2), ('max_pool_3x3', 1)], reduce_concat=[2, 3, 4,", "'dil_conv_3x3', 'dil_conv_5x5' ] CRBPRIMITIVES = [ 'max_pool_3x3', 'avg_pool_3x3', 'skip_connect', 'sep_conv_3x3',", "1), ('skip_connect', 0), ('sep_conv_3x3', 1), ('sep_conv_3x3', 0), ('skip_connect', 2)], normal_concat=[2,", "'sep_conv_5x5', 'dil_conv_3x3', 'dil_conv_5x5' ] NASNet = Genotype( normal=[ ('sep_conv_5x5', 1),", "1), ('sep_conv_3x3', 0), ('sep_conv_5x5', 0), ('sep_conv_3x3', 0), ('avg_pool_3x3', 1), ('skip_connect',", "('sep_conv_3x3', 1), ('skip_connect', 0), ('sep_conv_3x3', 1), ('sep_conv_3x3', 0), ('skip_connect', 2)],", "('avg_pool_3x3', 2), ('sep_conv_3x3', 4), ('dil_conv_5x5', 1)], reduce_concat=range(2, 6)) BATH2 =", "2), ('sep_conv_3x3', 0)], normal_concat=range(2, 6), reduce=[('max_pool_3x3', 1), ('max_pool_3x3', 0), ('max_pool_3x3',", "1), ], normal_concat=[2, 3, 4, 5, 6], reduce=[ ('sep_conv_5x5', 1),", "= Genotype( normal=[('max_pool_3x3', 1), ('skip_connect', 0), ('skip_connect', 2), ('max_pool_3x3', 1),", "('sep_conv_7x7', 0), ('max_pool_3x3', 1), ('sep_conv_7x7', 0), ('avg_pool_3x3', 1), ('sep_conv_5x5', 0),", "('max_pool_3x3', 1), ('sep_conv_7x7', 0), ('avg_pool_3x3', 1), ('sep_conv_5x5', 0), ('skip_connect', 3),", "0), ('sep_conv_3x3', 1), ('sep_conv_3x3', 0), ('sep_conv_3x3', 1), ('sep_conv_3x3', 1), ('skip_connect',", "'dil_conv_3x3', 'dil_conv_5x5' ] NASNet = Genotype( normal=[ ('sep_conv_5x5', 1), ('sep_conv_3x3',", "Genotype( normal=[ ('sep_conv_5x5', 1), ('sep_conv_3x3', 0), ('sep_conv_5x5', 0), ('sep_conv_3x3', 0),", "('sep_conv_3x3', 0), ('sep_conv_5x5', 2), ('sep_conv_3x3', 0), ('avg_pool_3x3', 3), ('sep_conv_3x3', 1),", "Genotype = namedtuple('Genotype', 'normal normal_concat reduce reduce_concat') PRIMITIVES = [", "NASNet = Genotype( normal=[ ('sep_conv_5x5', 1), ('sep_conv_3x3', 0), ('sep_conv_5x5', 0),", "1), ('sep_conv_3x3', 0), ('sep_conv_5x5', 2), ('sep_conv_3x3', 0), ('avg_pool_3x3', 3), ('sep_conv_3x3',", "1), ('max_pool_3x3', 0), ('sep_conv_7x7', 2), ('sep_conv_7x7', 0), ('avg_pool_3x3', 1), ('max_pool_3x3',", "1), ('max_pool_3x3', 0), ('max_pool_3x3', 1), ('conv_7x1_1x7', 0), ('sep_conv_3x3', 5), ],", "normal=[('sep_conv_3x3', 0), ('sep_conv_3x3', 1), ('sep_conv_3x3', 0), ('sep_conv_3x3', 1), ('sep_conv_3x3', 1),", "('avg_pool_3x3', 0), ('avg_pool_3x3', 0), ('sep_conv_3x3', 1), ('skip_connect', 1), ], normal_concat=[2,", "('skip_connect', 1), ('skip_connect', 2), ('max_pool_3x3', 1), ('max_pool_3x3', 0)], normal_concat=range(2, 6),", "5, 6], reduce=[ ('avg_pool_3x3', 0), ('sep_conv_3x3', 1), ('max_pool_3x3', 0), ('sep_conv_7x7',", "('skip_connect', 2), ('max_pool_3x3', 1), ('max_pool_3x3', 0), ('skip_connect', 2), ('skip_connect', 2),", "from collections import namedtuple Genotype = namedtuple('Genotype', 'normal normal_concat reduce", "normal=[ ('sep_conv_5x5', 1), ('sep_conv_3x3', 0), ('sep_conv_5x5', 0), ('sep_conv_3x3', 0), ('avg_pool_3x3',", "'dil_conv_5x5' ] CRBPRIMITIVES = [ 'max_pool_3x3', 'avg_pool_3x3', 'skip_connect', 'sep_conv_3x3', 'sep_conv_5x5',", "4, 6] ) DARTS_V1 = Genotype( normal=[('sep_conv_3x3', 1), ('sep_conv_3x3', 0),", "'sep_conv_3x3', 'sep_conv_5x5', 'dil_conv_3x3', 'dil_conv_5x5' ] CRBPRIMITIVES = [ 'max_pool_3x3', 'avg_pool_3x3',", "('max_pool_3x3', 0), ('max_pool_3x3', 1), ('conv_7x1_1x7', 0), ('sep_conv_3x3', 5), ], reduce_concat=[3,", "2), ('avg_pool_3x3', 0)], reduce_concat=[2, 3, 4, 5]) DARTS_V2 = Genotype(", "1), ('conv_7x1_1x7', 0), ('sep_conv_3x3', 5), ], reduce_concat=[3, 4, 6] )", "('skip_connect', 3), ('avg_pool_3x3', 2), ('sep_conv_3x3', 2), ('max_pool_3x3', 1), ], reduce_concat=[4,", "('max_pool_3x3', 1), ('max_pool_3x3', 0), ('sep_conv_5x5', 2), ('dil_conv_5x5', 0), ('max_pool_3x3', 2),", "('sep_conv_3x3', 4), ('dil_conv_5x5', 1)], reduce_concat=range(2, 6)) BATH2 = Genotype( normal=[('max_pool_3x3',", "('skip_connect', 0), ('avg_pool_3x3', 1), ], normal_concat=[4, 5, 6], reduce=[ ('avg_pool_3x3',", "1), ('sep_conv_3x3', 0), ('skip_connect', 0), ('sep_conv_3x3', 1), ('skip_connect', 0), ('sep_conv_3x3',", "5]) DARTS_V2 = Genotype( normal=[('sep_conv_3x3', 0), ('sep_conv_3x3', 1), ('sep_conv_3x3', 0),", "6] ) DARTS_V1 = Genotype( normal=[('sep_conv_3x3', 1), ('sep_conv_3x3', 0), ('skip_connect',", "2), ('max_pool_3x3', 1), ], reduce_concat=[4, 5, 6], ) AmoebaNet =", "reduce=[('max_pool_3x3', 0), ('max_pool_3x3', 1), ('skip_connect', 2), ('max_pool_3x3', 0), ('max_pool_3x3', 0),", "2), ('skip_connect', 2), ('max_pool_3x3', 1)], reduce_concat=[2, 3, 4, 5]) DARTS", "('max_pool_3x3', 0), ('sep_conv_7x7', 2), ('sep_conv_7x7', 0), ('avg_pool_3x3', 1), ('max_pool_3x3', 0),", "2), ('skip_connect', 2), ('avg_pool_3x3', 0)], reduce_concat=[2, 3, 4, 5]) DARTS_V2", "('sep_conv_5x5', 2), ('skip_connect', 3), ('avg_pool_3x3', 2), ('sep_conv_3x3', 4), ('dil_conv_5x5', 1)],", "2), ('sep_conv_3x3', 4), ('dil_conv_5x5', 1)], reduce_concat=range(2, 6)) BATH2 = Genotype(", "= [ 'none', 'max_pool_3x3', 'avg_pool_3x3', 'skip_connect', 'sep_conv_3x3', 'sep_conv_5x5', 'dil_conv_3x3', 'dil_conv_5x5'", "('sep_conv_3x3', 1), ('sep_conv_3x3', 0), ('skip_connect', 2)], normal_concat=[2, 3, 4, 5],", "0), ('skip_connect', 2), ('max_pool_3x3', 1), ('skip_connect', 1), ('skip_connect', 2), ('max_pool_3x3',", "('sep_conv_3x3', 1), ('max_pool_3x3', 0), ('sep_conv_7x7', 2), ('sep_conv_7x7', 0), ('avg_pool_3x3', 1),", "DARTS_V2 BATH = Genotype( normal=[('max_pool_3x3', 0), ('max_pool_3x3', 1), ('max_pool_3x3', 0),", "('sep_conv_3x3', 0), ('sep_conv_3x3', 1), ('sep_conv_3x3', 1), ('skip_connect', 0), ('skip_connect', 0),", "2)], normal_concat=[2, 3, 4, 5], reduce=[('max_pool_3x3', 0), ('max_pool_3x3', 1), ('skip_connect',", "('max_pool_3x3', 0)], normal_concat=range(2, 6), reduce=[('max_pool_3x3', 0), ('max_pool_3x3', 1), ('skip_connect', 0),", "('avg_pool_3x3', 1), ], normal_concat=[4, 5, 6], reduce=[ ('avg_pool_3x3', 0), ('sep_conv_3x3',", "normal=[('sep_conv_3x3', 1), ('sep_conv_3x3', 0), ('skip_connect', 0), ('sep_conv_3x3', 1), ('skip_connect', 0),", "0), ('avg_pool_3x3', 1), ('max_pool_3x3', 0), ('max_pool_3x3', 1), ('conv_7x1_1x7', 0), ('sep_conv_3x3',", "normal_concat reduce reduce_concat') PRIMITIVES = [ 'none', 'max_pool_3x3', 'avg_pool_3x3', 'skip_connect',", "] NASNet = Genotype( normal=[ ('sep_conv_5x5', 1), ('sep_conv_3x3', 0), ('sep_conv_5x5',", "0), ('max_pool_3x3', 1), ('sep_conv_7x7', 0), ('avg_pool_3x3', 1), ('sep_conv_5x5', 0), ('skip_connect',", "2), ('max_pool_3x3', 1)], reduce_concat=[2, 3, 4, 5]) DARTS = DARTS_V2", "('max_pool_3x3', 1)], reduce_concat=[2, 3, 4, 5]) DARTS = DARTS_V2 BATH", "('sep_conv_3x3', 0), ('avg_pool_3x3', 3), ('sep_conv_3x3', 1), ('skip_connect', 1), ('skip_connect', 0),", "2), ('sep_conv_3x3', 0), ('avg_pool_3x3', 3), ('sep_conv_3x3', 1), ('skip_connect', 1), ('skip_connect',", "('max_pool_3x3', 1), ('skip_connect', 2), ('max_pool_3x3', 1), ('max_pool_3x3', 0), ('skip_connect', 2),", "1), ('max_pool_3x3', 0)], normal_concat=range(2, 6), reduce=[('max_pool_3x3', 0), ('max_pool_3x3', 1), ('skip_connect',", "[ 'max_pool_3x3', 'avg_pool_3x3', 'skip_connect', 'sep_conv_3x3', 'sep_conv_5x5', 'dil_conv_3x3', 'dil_conv_5x5' ] NASNet", "('max_pool_3x3', 1), ('skip_connect', 2), ('max_pool_3x3', 0), ('max_pool_3x3', 0), ('skip_connect', 2),", "0), ('skip_connect', 0), ('dil_conv_3x3', 2)], normal_concat=[2, 3, 4, 5], reduce=[('max_pool_3x3',", "('sep_conv_3x3', 2), ('sep_conv_3x3', 0)], normal_concat=range(2, 6), reduce=[('max_pool_3x3', 1), ('max_pool_3x3', 0),", "6), reduce=[('max_pool_3x3', 1), ('max_pool_3x3', 0), ('max_pool_3x3', 1), ('sep_conv_5x5', 2), ('skip_connect',", "1), ], reduce_concat=[4, 5, 6], ) AmoebaNet = Genotype( normal=[", "('max_pool_3x3', 1), ('max_pool_3x3', 0)], normal_concat=range(2, 6), reduce=[('max_pool_3x3', 0), ('max_pool_3x3', 1),", "reduce_concat=[2, 3, 4, 5]) DARTS_V2 = Genotype( normal=[('sep_conv_3x3', 0), ('sep_conv_3x3',", "reduce=[ ('sep_conv_5x5', 1), ('sep_conv_7x7', 0), ('max_pool_3x3', 1), ('sep_conv_7x7', 0), ('avg_pool_3x3',", "], reduce_concat=[3, 4, 6] ) DARTS_V1 = Genotype( normal=[('sep_conv_3x3', 1),", "6), reduce=[('max_pool_3x3', 0), ('max_pool_3x3', 1), ('skip_connect', 0), ('dil_conv_3x3', 1), ('skip_connect',", "reduce_concat=[4, 5, 6], ) AmoebaNet = Genotype( normal=[ ('avg_pool_3x3', 0),", "'normal normal_concat reduce reduce_concat') PRIMITIVES = [ 'none', 'max_pool_3x3', 'avg_pool_3x3',", "normal=[('max_pool_3x3', 1), ('skip_connect', 0), ('skip_connect', 2), ('max_pool_3x3', 1), ('skip_connect', 1),", "DARTS = DARTS_V2 BATH = Genotype( normal=[('max_pool_3x3', 0), ('max_pool_3x3', 1),", "('skip_connect', 1), ('skip_connect', 0), ('dil_conv_5x5', 0), ('sep_conv_3x3', 4)], reduce_concat=range(2, 6))", "BATH2 = Genotype( normal=[('max_pool_3x3', 1), ('skip_connect', 0), ('skip_connect', 2), ('max_pool_3x3',", "('skip_connect', 2), ('max_pool_3x3', 1), ('max_pool_3x3', 0)], normal_concat=range(2, 6), reduce=[('max_pool_3x3', 0),", "('max_pool_3x3', 0), ('skip_connect', 2), ('skip_connect', 2), ('max_pool_3x3', 1)], reduce_concat=[2, 3,", "6], ) AmoebaNet = Genotype( normal=[ ('avg_pool_3x3', 0), ('max_pool_3x3', 1),", "('sep_conv_5x5', 2), ('dil_conv_5x5', 0), ('max_pool_3x3', 2), ('sep_conv_3x3', 2), ('sep_conv_3x3', 0)],", "2), ('sep_conv_7x7', 0), ('avg_pool_3x3', 1), ('max_pool_3x3', 0), ('max_pool_3x3', 1), ('conv_7x1_1x7',", "('skip_connect', 2)], normal_concat=[2, 3, 4, 5], reduce=[('max_pool_3x3', 0), ('max_pool_3x3', 1),", "0), ('sep_conv_3x3', 1), ('sep_conv_3x3', 1), ('skip_connect', 0), ('skip_connect', 0), ('dil_conv_3x3',", "collections import namedtuple Genotype = namedtuple('Genotype', 'normal normal_concat reduce reduce_concat')", "('dil_conv_5x5', 0), ('max_pool_3x3', 2), ('sep_conv_3x3', 2), ('sep_conv_3x3', 0)], normal_concat=range(2, 6),", "namedtuple Genotype = namedtuple('Genotype', 'normal normal_concat reduce reduce_concat') PRIMITIVES =", "reduce_concat=[3, 4, 6] ) DARTS_V1 = Genotype( normal=[('sep_conv_3x3', 1), ('sep_conv_3x3',", "DARTS_V1 = Genotype( normal=[('sep_conv_3x3', 1), ('sep_conv_3x3', 0), ('skip_connect', 0), ('sep_conv_3x3',", "4, 5], reduce=[('max_pool_3x3', 0), ('max_pool_3x3', 1), ('skip_connect', 2), ('max_pool_3x3', 1),", "('max_pool_3x3', 1), ('skip_connect', 1), ('skip_connect', 2), ('max_pool_3x3', 1), ('max_pool_3x3', 0)],", "= Genotype( normal=[('sep_conv_3x3', 1), ('sep_conv_3x3', 0), ('skip_connect', 0), ('sep_conv_3x3', 1),", "('skip_connect', 0), ('skip_connect', 0), ('dil_conv_3x3', 2)], normal_concat=[2, 3, 4, 5],", "1)], reduce_concat=[2, 3, 4, 5]) DARTS = DARTS_V2 BATH =", "('avg_pool_3x3', 0), ('max_pool_3x3', 1), ('sep_conv_3x3', 0), ('sep_conv_5x5', 2), ('sep_conv_3x3', 0),", "('skip_connect', 0), ('sep_conv_3x3', 1), ('skip_connect', 0), ('sep_conv_3x3', 1), ('sep_conv_3x3', 0),", ") AmoebaNet = Genotype( normal=[ ('avg_pool_3x3', 0), ('max_pool_3x3', 1), ('sep_conv_3x3',", "= Genotype( normal=[ ('sep_conv_5x5', 1), ('sep_conv_3x3', 0), ('sep_conv_5x5', 0), ('sep_conv_3x3',", "1), ('skip_connect', 0), ('dil_conv_3x3', 1), ('skip_connect', 1), ('skip_connect', 0), ('dil_conv_5x5',", "('conv_7x1_1x7', 0), ('sep_conv_3x3', 5), ], reduce_concat=[3, 4, 6] ) DARTS_V1", "normal_concat=[2, 3, 4, 5, 6], reduce=[ ('sep_conv_5x5', 1), ('sep_conv_7x7', 0),", "BATH = Genotype( normal=[('max_pool_3x3', 0), ('max_pool_3x3', 1), ('max_pool_3x3', 0), ('sep_conv_5x5',", "1), ('sep_conv_7x7', 0), ('max_pool_3x3', 1), ('sep_conv_7x7', 0), ('avg_pool_3x3', 1), ('sep_conv_5x5',", "('sep_conv_5x5', 0), ('skip_connect', 3), ('avg_pool_3x3', 2), ('sep_conv_3x3', 2), ('max_pool_3x3', 1),", "2), ('max_pool_3x3', 0), ('max_pool_3x3', 0), ('skip_connect', 2), ('skip_connect', 2), ('avg_pool_3x3',", "1), ('sep_conv_3x3', 1), ('skip_connect', 0), ('skip_connect', 0), ('dil_conv_3x3', 2)], normal_concat=[2,", "('max_pool_3x3', 0), ('skip_connect', 2), ('skip_connect', 2), ('avg_pool_3x3', 0)], reduce_concat=[2, 3,", "5]) DARTS = DARTS_V2 BATH = Genotype( normal=[('max_pool_3x3', 0), ('max_pool_3x3',", "0), ('max_pool_3x3', 1), ('skip_connect', 2), ('max_pool_3x3', 1), ('max_pool_3x3', 0), ('skip_connect',", "0), ('avg_pool_3x3', 3), ('sep_conv_3x3', 1), ('skip_connect', 1), ('skip_connect', 0), ('avg_pool_3x3',", "1), ('skip_connect', 2), ('max_pool_3x3', 1), ('max_pool_3x3', 0)], normal_concat=range(2, 6), reduce=[('max_pool_3x3',", "Genotype( normal=[('sep_conv_3x3', 0), ('sep_conv_3x3', 1), ('sep_conv_3x3', 0), ('sep_conv_3x3', 1), ('sep_conv_3x3',", "'skip_connect', 'sep_conv_3x3', 'sep_conv_5x5', 'dil_conv_3x3', 'dil_conv_5x5' ] CRBPRIMITIVES = [ 'max_pool_3x3',", "0), ('max_pool_3x3', 1), ('sep_conv_3x3', 0), ('sep_conv_5x5', 2), ('sep_conv_3x3', 0), ('avg_pool_3x3',", "0)], normal_concat=range(2, 6), reduce=[('max_pool_3x3', 0), ('max_pool_3x3', 1), ('skip_connect', 0), ('dil_conv_3x3',", "('skip_connect', 2), ('skip_connect', 2), ('max_pool_3x3', 1)], reduce_concat=[2, 3, 4, 5])", "('sep_conv_7x7', 2), ('sep_conv_7x7', 0), ('avg_pool_3x3', 1), ('max_pool_3x3', 0), ('max_pool_3x3', 1),", "0), ('dil_conv_3x3', 2)], normal_concat=[2, 3, 4, 5], reduce=[('max_pool_3x3', 0), ('max_pool_3x3',", "CRBPRIMITIVES = [ 'max_pool_3x3', 'avg_pool_3x3', 'skip_connect', 'sep_conv_3x3', 'sep_conv_5x5', 'dil_conv_3x3', 'dil_conv_5x5'", "PRIMITIVES = [ 'none', 'max_pool_3x3', 'avg_pool_3x3', 'skip_connect', 'sep_conv_3x3', 'sep_conv_5x5', 'dil_conv_3x3',", "1)], reduce_concat=range(2, 6)) BATH2 = Genotype( normal=[('max_pool_3x3', 1), ('skip_connect', 0),", "('avg_pool_3x3', 0), ('sep_conv_3x3', 1), ('skip_connect', 1), ], normal_concat=[2, 3, 4,", "3, 4, 5, 6], reduce=[ ('sep_conv_5x5', 1), ('sep_conv_7x7', 0), ('max_pool_3x3',", "('sep_conv_3x3', 0), ('skip_connect', 0), ('sep_conv_3x3', 1), ('skip_connect', 0), ('sep_conv_3x3', 1),", "('sep_conv_3x3', 1), ('skip_connect', 1), ('skip_connect', 0), ('avg_pool_3x3', 1), ], normal_concat=[4,", "('max_pool_3x3', 1), ('max_pool_3x3', 0), ('skip_connect', 2), ('skip_connect', 2), ('max_pool_3x3', 1)],", "reduce=[('max_pool_3x3', 1), ('max_pool_3x3', 0), ('max_pool_3x3', 1), ('sep_conv_5x5', 2), ('skip_connect', 3),", "2), ('max_pool_3x3', 1), ('max_pool_3x3', 0), ('skip_connect', 2), ('skip_connect', 2), ('max_pool_3x3',", "1), ('sep_conv_7x7', 0), ('avg_pool_3x3', 1), ('sep_conv_5x5', 0), ('skip_connect', 3), ('avg_pool_3x3',", "3), ('avg_pool_3x3', 2), ('sep_conv_3x3', 2), ('max_pool_3x3', 1), ], reduce_concat=[4, 5,", "1), ('max_pool_3x3', 0), ('max_pool_3x3', 1), ('sep_conv_5x5', 2), ('skip_connect', 3), ('avg_pool_3x3',", "0), ('sep_conv_5x5', 2), ('dil_conv_5x5', 0), ('max_pool_3x3', 2), ('sep_conv_3x3', 2), ('sep_conv_3x3',", "1), ('skip_connect', 0), ('skip_connect', 2), ('max_pool_3x3', 1), ('skip_connect', 1), ('skip_connect',", "], normal_concat=[4, 5, 6], reduce=[ ('avg_pool_3x3', 0), ('sep_conv_3x3', 1), ('max_pool_3x3',", "1), ], normal_concat=[4, 5, 6], reduce=[ ('avg_pool_3x3', 0), ('sep_conv_3x3', 1),", "('max_pool_3x3', 1), ('sep_conv_3x3', 0), ('sep_conv_5x5', 2), ('sep_conv_3x3', 0), ('avg_pool_3x3', 3),", "('skip_connect', 1), ], normal_concat=[2, 3, 4, 5, 6], reduce=[ ('sep_conv_5x5',", "3), ('sep_conv_3x3', 1), ('skip_connect', 1), ('skip_connect', 0), ('avg_pool_3x3', 1), ],", "0), ('sep_conv_3x3', 5), ], reduce_concat=[3, 4, 6] ) DARTS_V1 =", "0), ('max_pool_3x3', 1), ('max_pool_3x3', 0), ('sep_conv_5x5', 2), ('dil_conv_5x5', 0), ('max_pool_3x3',", "6)) BATH2 = Genotype( normal=[('max_pool_3x3', 1), ('skip_connect', 0), ('skip_connect', 2),", "1), ('skip_connect', 1), ], normal_concat=[2, 3, 4, 5, 6], reduce=[", "1), ('skip_connect', 1), ('skip_connect', 0), ('avg_pool_3x3', 1), ], normal_concat=[4, 5,", "<gh_stars>0 from collections import namedtuple Genotype = namedtuple('Genotype', 'normal normal_concat", "2), ('max_pool_3x3', 1), ('max_pool_3x3', 0)], normal_concat=range(2, 6), reduce=[('max_pool_3x3', 0), ('max_pool_3x3',", "0)], reduce_concat=[2, 3, 4, 5]) DARTS_V2 = Genotype( normal=[('sep_conv_3x3', 0),", "0), ('avg_pool_3x3', 0), ('avg_pool_3x3', 0), ('sep_conv_3x3', 1), ('skip_connect', 1), ],", "Genotype( normal=[('sep_conv_3x3', 1), ('sep_conv_3x3', 0), ('skip_connect', 0), ('sep_conv_3x3', 1), ('skip_connect',", "0), ('skip_connect', 2)], normal_concat=[2, 3, 4, 5], reduce=[('max_pool_3x3', 0), ('max_pool_3x3',", "Genotype( normal=[('max_pool_3x3', 1), ('skip_connect', 0), ('skip_connect', 2), ('max_pool_3x3', 1), ('skip_connect',", "'dil_conv_5x5' ] NASNet = Genotype( normal=[ ('sep_conv_5x5', 1), ('sep_conv_3x3', 0),", "0), ('avg_pool_3x3', 1), ('skip_connect', 0), ('avg_pool_3x3', 0), ('avg_pool_3x3', 0), ('sep_conv_3x3',", "6], reduce=[ ('avg_pool_3x3', 0), ('sep_conv_3x3', 1), ('max_pool_3x3', 0), ('sep_conv_7x7', 2),", "0), ('max_pool_3x3', 2), ('sep_conv_3x3', 2), ('sep_conv_3x3', 0)], normal_concat=range(2, 6), reduce=[('max_pool_3x3',", "1), ('skip_connect', 0), ('avg_pool_3x3', 1), ], normal_concat=[4, 5, 6], reduce=[", "('max_pool_3x3', 1), ('conv_7x1_1x7', 0), ('sep_conv_3x3', 5), ], reduce_concat=[3, 4, 6]", "('sep_conv_3x3', 5), ], reduce_concat=[3, 4, 6] ) DARTS_V1 = Genotype(", "0), ('max_pool_3x3', 1), ('skip_connect', 2), ('max_pool_3x3', 0), ('max_pool_3x3', 0), ('skip_connect'," ]
[ "with sub_bar.output_to(0): if SHOW_FULL_QUERY: print( color.Format( 'The following query is", "PROJECT = None # TODO: Should this be renamed to", "global SHOW_FULL_QUERY TABULATED_OUTPUT = tabulated_output SHOW_FULL_QUERY = TABULATED_OUTPUT if not", "def SetDbConnection(connection): global DB_CONNECTION DB_CONNECTION = connection def EnsureAuthenticatedUser(): global", "global DB_CONNECTION DB_CONNECTION = connection def EnsureAuthenticatedUser(): global USER_AUTHENTICATED global", "IPython from IPython.core.magic import register_cell_magic from IPython.display import display import", "support JS.\"\"\" if TABULATED_OUTPUT: return widgets.TabBar(*args) class MockTab: def __init__(self):", "ShowError(error_text): print(color.Format('[ {error}Error{end} ] ' + error_text)) def Logica(line, cell,", "2.0 (the \"License\"); # you may not use this file", "error_text)) def Logica(line, cell, run_query): \"\"\"Running Logica predicates and storing", "sqlite3_logica BQ_READY = True # By default. try: from google.cloud", "as e: print('Encountered error when compiling %s.' % predicate) e.ShowMessage()", "not line: predicates = [] else: predicates = [p.strip() for", "colab_logica.SetDbConnection(connection)\"\"\") return print('Installation succeeded. Connecting...') # Connect to the database.", "cell): Logica(line, cell, run_query=True) def ParseList(line): line = line.strip() if", "-qq install postgresql') result += os.system('sudo service postgresql start') #", "# TODO: Should this be renamed to PSQL_ENGINE, PSQL_CONNECTION? DB_ENGINE", "= self.engine DB_CONNECTION = self.connection def __call__(self, sql, engine, is_final):", "+ ['(Log)']) logs_idx = len(predicates) executions = [] sub_bars =", "in [PREAMBLE, cell] if s) parsed_rules = parse.ParseFile(program)['rule'] except parse.ParsingException", "print('The query was not run.') print(' ') # To activate", "print('Could not import google.colab.widgets.') PROJECT = None # TODO: Should", "under the License. \"\"\"Library for using Logica in CoLab.\"\"\" from", "USER logica PASSWORD \\'<PASSWORD>\\';\"') result += os.system( 'sudo -u postgres", "= create_engine('postgresql+psycopg2://logica:logica@127.0.0.1', pool_recycle=3600) connection = engine.connect() print('Connected.') return engine, connection", "'sudo -u postgres psql -c \"CREATE USER logica WITH SUPERUSER\"')", "-c \"CREATE USER logica WITH SUPERUSER\"') result += os.system( 'sudo", "__enter__(self): pass def __exit__(self, *x): pass class MockTabBar: def __init__(self):", "is stored at %s variable.' % color.Warn(predicate + '_sql')) with", "for UIs that don't support JS.\"\"\" if TABULATED_OUTPUT: return widgets.TabBar(*args)", "learning Logica.\\n' 'Use {warning}@Engine(\"sqlite\");{end} annotation in your program to use", "in line.split(',')] return predicates def RunSQL(sql, engine, connection=None, is_final=False): if", "logica;\\'') if result != 0: print(\"\"\"Installation failed. Please try the", "__init__(self): self.connection = sqlite3_logica.SqliteConnect() # TODO: Sqlite runner should not", "self.engine DB_CONNECTION = self.connection def __call__(self, sql, engine, is_final): return", "= IPython.get_ipython() for idx, predicate in enumerate(predicates): with bar.output_to(logs_idx): try:", "os.system('sudo apt-get -y -qq install postgresql') result += os.system('sudo service", "of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "= True PREAMBLE = None def SetPreamble(preamble): global PREAMBLE PREAMBLE", "update') result += os.system('sudo apt-get -y -qq install postgresql') result", "= None DB_CONNECTION = None USER_AUTHENTICATED = False TABULATED_OUTPUT =", "-c \"ALTER USER logica PASSWORD '<PASSWORD>';\" !sudo -u postgres psql", "print(\"Installing and configuring an empty PostgreSQL database.\") result = 0", "pandas engine = create_engine('postgresql+psycopg2://logica:logica@127.0.0.1', pool_recycle=3600); connection = engine.connect(); colab_logica.SetDbConnection(connection)\"\"\") return", "cell, run_query): \"\"\"Running Logica predicates and storing results.\"\"\" predicates =", "= PostgresJumpStart() DB_ENGINE = self.engine DB_CONNECTION = self.connection def __call__(self,", "'<PASSWORD>';\" !sudo -u postgres psql -U postgres -c 'CREATE DATABASE", "ip = IPython.get_ipython() for idx, predicate in enumerate(predicates): with bar.output_to(logs_idx):", "an empty PostgreSQL database.\") result = 0 result += os.system('sudo", "run BigQuery requests from Google CoLab:\\n' ' https://colab.research.google.com/.\\n' 'Note that", "-u postgres psql -U postgres -c \\'CREATE DATABASE logica;\\'') if", "True PREAMBLE = None def SetPreamble(preamble): global PREAMBLE PREAMBLE =", "= parse.SplitRaw(sql, ';') connection.executescript(sql) if is_final: return pandas.read_sql(statements[-1], connection) else:", "line.strip() if not line: predicates = [] else: predicates =", "failed. Please try the following manually: # Install Logica. !pip", "' 'for now.') class SqliteRunner(object): def __init__(self): self.connection = sqlite3_logica.SqliteConnect()", "data or learning Logica.\\n' 'Use {warning}@Engine(\"sqlite\");{end} annotation in your program", "import color from .common import concertina_lib from .compiler import functors", "TABULATED_OUTPUT = tabulated_output SHOW_FULL_QUERY = TABULATED_OUTPUT if not WIDGETS_IMPORTED: SetTabulatedOutput(False)", "False TABULATED_OUTPUT = True SHOW_FULL_QUERY = True PREAMBLE = None", "e.ShowMessage() return # Publish output to Colab cell. with bar.output_to(idx):", "print(color.Format('[ {error}Error{end} ] ' + error_text)) def Logica(line, cell, run_query):", "bar.output_to(idx): with sub_bars[idx].output_to(1): if run_query: print( color.Format( 'The following table", "exist. result += 0 * os.system( 'sudo -u postgres psql", "def ParseList(line): line = line.strip() if not line: predicates =", "IPython.get_ipython() for idx, predicate in enumerate(predicates): with bar.output_to(logs_idx): try: sql", "executions, sql_runner=sql_runner, sql_engine=engine) for idx, predicate in enumerate(predicates): t =", "stored at %s variable.' % color.Warn(predicate + '_sql')) with bar.output_to(logs_idx):", "SetTabulatedOutput(False) def TabBar(*args): \"\"\"Returns a real TabBar or a mock.", "ParseList(line): line = line.strip() if not line: predicates = []", "use this file except in compliance with the License. #", "'It is the easiest to run BigQuery requests from Google", "import google.cloud.auth.') try: from google.colab import widgets WIDGETS_IMPORTED = True", "return RunSQL(sql, engine, self.connection, is_final) class PostgresRunner(object): def __init__(self): global", "== 'sqlite': sql_runner = SqliteRunner() elif engine == 'psql': sql_runner", "True def SetTabulatedOutput(tabulated_output): global TABULATED_OUTPUT global SHOW_FULL_QUERY TABULATED_OUTPUT = tabulated_output", "for p in line.split(',')] return predicates def RunSQL(sql, engine, connection=None,", "except: BQ_READY = False print('Could not import google.cloud.auth.') try: from", "if USER_AUTHENTICATED: return auth.authenticate_user() if PROJECT is None: print(\"Please enter", "'Use {warning}@Engine(\"sqlite\");{end} annotation in your program to use SQLite.') return", "= [] else: predicates = [p.strip() for p in line.split(',')]", "'sudo -u postgres psql -c \"ALTER USER logica PASSWORD \\'<PASSWORD>\\';\"')", "elif engine == 'bigquery': EnsureAuthenticatedUser() sql_runner = RunSQL else: raise", "the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "fit for working with small data or learning Logica.\\n' 'Use", "License. # You may obtain a copy of the License", "now.') class SqliteRunner(object): def __init__(self): self.connection = sqlite3_logica.SqliteConnect() # TODO:", "' https://colab.research.google.com/.\\n' 'Note that running Logica on SQLite requires no", "using Logica in CoLab.\"\"\" from .common import color from .common", "annotation in your program to use SQLite.') return bar =", "google.cloud.bigquery.') try: from google.colab import auth except: BQ_READY = False", "Google CoLab:\\n' ' https://colab.research.google.com/.\\n' 'Note that running Logica on SQLite", "\"\"\"Returns a real TabBar or a mock. Useful for UIs", "logica;' # Connect to the database. from logica import colab_logica", "x): return MockTab() return MockTabBar() @register_cell_magic def logica(line, cell): Logica(line,", "under the License is distributed on an \"AS IS\" BASIS,", "return MockTabBar() @register_cell_magic def logica(line, cell): Logica(line, cell, run_query=True) def", "predicates to run.') return try: program = ';\\n'.join(s for s", "License for the specific language governing permissions and # limitations", "from google.colab import auth except: BQ_READY = False print('Could not", "Logica in CoLab.\"\"\" from .common import color from .common import", "EnsureAuthenticatedUser() sql_runner = RunSQL else: raise Exception('Logica only supports BigQuery,", "parse.SplitRaw(sql, ';') connection.executescript(sql) if is_final: return pandas.read_sql(statements[-1], connection) else: pass", "now.') result_map = concertina_lib.ExecuteLogicaProgram( executions, sql_runner=sql_runner, sql_engine=engine) for idx, predicate", "default. try: from google.cloud import bigquery except: BQ_READY = False", "running Logica on SQLite requires no installation.\\n' 'This could be", "to run BigQuery requests from Google CoLab:\\n' ' https://colab.research.google.com/.\\n' 'Note", "ip.push({predicate + '_sql': sql}) except rule_translate.RuleCompileException as e: print('Encountered error", "at {warning}%s{end} ' 'variable.' % ( predicate + '_sql'))) print(sql)", "mock. Useful for UIs that don't support JS.\"\"\" if TABULATED_OUTPUT:", "statements = parse.SplitRaw(sql, ';') connection.executescript(sql) if is_final: return pandas.read_sql(statements[-1], connection)", "% ( predicate + '_sql'))) print(sql) else: print('Query is stored", "to Colab cell. with bar.output_to(idx): sub_bar = TabBar(['SQL', 'Result']) sub_bars.append(sub_bar)", "\\'CREATE DATABASE logica;\\'') if result != 0: print(\"\"\"Installation failed. Please", "install logica # Install postgresql server. !sudo apt-get -y -qq", "% color.Warn(predicate + '_sql')) with bar.output_to(logs_idx): if engine == 'sqlite':", "print(' ') # To activate the tabbar. def PostgresJumpStart(): #", "logica WITH SUPERUSER\"') result += os.system( 'sudo -u postgres psql", "else: print('Query is stored at %s variable.' % color.Warn(predicate +", "import google.cloud.bigquery.') try: from google.colab import auth except: BQ_READY =", "PostgreSQL and SQLite ' 'for now.') class SqliteRunner(object): def __init__(self):", "global PREAMBLE PREAMBLE = preamble def SetProject(project): global PROJECT PROJECT", "return connection.execute(sql) elif engine == 'sqlite': statements = parse.SplitRaw(sql, ';')", "apt-get -y -qq install postgresql !sudo service postgresql start #", "def SetTabulatedOutput(tabulated_output): global TABULATED_OUTPUT global SHOW_FULL_QUERY TABULATED_OUTPUT = tabulated_output SHOW_FULL_QUERY", "could be a good fit for working with small data", "{warning}@Engine(\"sqlite\");{end} annotation in your program to use SQLite.') return bar", "print( color.Format( 'The following query is stored at {warning}%s{end} '", "in compliance with the License. # You may obtain a", "RunSQL(sql, engine, connection=None, is_final=False): if engine == 'bigquery': client =", "TABULATED_OUTPUT global SHOW_FULL_QUERY TABULATED_OUTPUT = tabulated_output SHOW_FULL_QUERY = TABULATED_OUTPUT if", "= len(predicates) executions = [] sub_bars = [] ip =", "software # distributed under the License is distributed on an", "result = 0 result += os.system('sudo apt-get -y -qq update')", "on SQLite requires no installation.\\n' 'This could be a good", "# limitations under the License. \"\"\"Library for using Logica in", "not installed. \\n' 'It is the easiest to run BigQuery", "executions = [] sub_bars = [] ip = IPython.get_ipython() for", "predicate in enumerate(predicates): t = result_map[predicate] ip.push({predicate: t}) with bar.output_to(idx):", "functors.FunctorError as e: e.ShowMessage() return engine = program.annotations.Engine() if engine", "postgresql !sudo service postgresql start # Prepare database for Logica.", "server. !sudo apt-get -y -qq update !sudo apt-get -y -qq", "psql -c \"ALTER USER logica PASSWORD \\'<PASSWORD>\\';\"') result += os.system(", "for BigQuery queries.\") PROJECT = input() print(\"project_id is set to", "to run.') return try: program = ';\\n'.join(s for s in", "rule_translate from .compiler import universe import IPython from IPython.core.magic import", "sql_runner = PostgresRunner() elif engine == 'bigquery': EnsureAuthenticatedUser() sql_runner =", "to the database. from logica import colab_logica from sqlalchemy import", "-u postgres psql -U postgres -c 'CREATE DATABASE logica;' #", "supports BigQuery, PostgreSQL and SQLite ' 'for now.') result_map =", "if not predicates: ShowError('No predicates to run.') return try: program", "user creation error, as they may already exist. result +=", "database for Logica. !sudo -u postgres psql -c \"CREATE USER", "psql -U postgres -c 'CREATE DATABASE logica;' # Connect to", "for idx, predicate in enumerate(predicates): t = result_map[predicate] ip.push({predicate: t})", "'bigquery' and not BQ_READY: ShowError( 'BigQuery client and/or authentification is", "update !sudo apt-get -y -qq install postgresql !sudo service postgresql", "+= os.system('sudo apt-get -y -qq update') result += os.system('sudo apt-get", "bar.output_to(idx): sub_bar = TabBar(['SQL', 'Result']) sub_bars.append(sub_bar) with sub_bar.output_to(0): if SHOW_FULL_QUERY:", "enumerate(predicates): t = result_map[predicate] ip.push({predicate: t}) with bar.output_to(idx): with sub_bars[idx].output_to(1):", "sql_runner=sql_runner, sql_engine=engine) for idx, predicate in enumerate(predicates): t = result_map[predicate]", "run.') print(' ') # To activate the tabbar. def PostgresJumpStart():", "BigQuery, PostgreSQL and SQLite ' 'for now.') result_map = concertina_lib.ExecuteLogicaProgram(", "os.system('sudo service postgresql start') # Ignoring user creation error, as", "PREAMBLE = None def SetPreamble(preamble): global PREAMBLE PREAMBLE = preamble", "(self.engine, self.connection) = PostgresJumpStart() DB_ENGINE = self.engine DB_CONNECTION = self.connection", "# # Copyright 2020 Google LLC # # Licensed under", "return RunSQL(sql, engine, self.connection, is_final) def ShowError(error_text): print(color.Format('[ {error}Error{end} ]", "== 'psql': sql_runner = PostgresRunner() elif engine == 'bigquery': EnsureAuthenticatedUser()", "is None: print(\"Please enter project_id to use for BigQuery queries.\")", "to use SQLite.') return bar = TabBar(predicates + ['(Log)']) logs_idx", "None def SetPreamble(preamble): global PREAMBLE PREAMBLE = preamble def SetProject(project):", "Connect to the database. from logica import colab_logica from sqlalchemy", "output_to(self, x): return MockTab() return MockTabBar() @register_cell_magic def logica(line, cell):", "client and/or authentification is not installed. \\n' 'It is the", "None USER_AUTHENTICATED = False TABULATED_OUTPUT = True SHOW_FULL_QUERY = True", "pandas from .parser_py import parse from .common import sqlite3_logica BQ_READY", "client = bigquery.Client(project=PROJECT) return client.query(sql).to_dataframe() elif engine == 'psql': if", "PASSWORD '<PASSWORD>';\" !sudo -u postgres psql -U postgres -c 'CREATE", "'sqlite': statements = parse.SplitRaw(sql, ';') connection.executescript(sql) if is_final: return pandas.read_sql(statements[-1],", "connection def EnsureAuthenticatedUser(): global USER_AUTHENTICATED global PROJECT if USER_AUTHENTICATED: return", "as e: e.ShowMessage() return try: program = universe.LogicaProgram(parsed_rules) except functors.FunctorError", "empty PostgreSQL database.\") result = 0 result += os.system('sudo apt-get", "with logica.colab_logica.SetProject command.\") USER_AUTHENTICATED = True def SetTabulatedOutput(tabulated_output): global TABULATED_OUTPUT", "= True def SetTabulatedOutput(tabulated_output): global TABULATED_OUTPUT global SHOW_FULL_QUERY TABULATED_OUTPUT =", "Logica predicates and storing results.\"\"\" predicates = ParseList(line) if not", "predicates = [p.strip() for p in line.split(',')] return predicates def", "elif engine == 'sqlite': statements = parse.SplitRaw(sql, ';') connection.executescript(sql) if", "PROJECT if USER_AUTHENTICATED: return auth.authenticate_user() if PROJECT is None: print(\"Please", "+ '_sql': sql}) except rule_translate.RuleCompileException as e: print('Encountered error when", "google.colab import widgets WIDGETS_IMPORTED = True except: WIDGETS_IMPORTED = False", "with bar.output_to(idx): sub_bar = TabBar(['SQL', 'Result']) sub_bars.append(sub_bar) with sub_bar.output_to(0): if", "'The following table is stored at {warning}%s{end} ' 'variable.' %", "OF ANY KIND, either express or implied. # See the", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "ANY KIND, either express or implied. # See the License", "See the License for the specific language governing permissions and", "accepting an engine. def __call__(self, sql, engine, is_final): return RunSQL(sql,", "connection) else: return connection.execute(sql) elif engine == 'sqlite': statements =", "if is_final: return pandas.read_sql(statements[-1], connection) else: pass return None else:", "with small data or learning Logica.\\n' 'Use {warning}@Engine(\"sqlite\");{end} annotation in", "Connecting...') # Connect to the database. from logica import colab_logica", "the License. # You may obtain a copy of the", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "for the specific language governing permissions and # limitations under", "permissions and # limitations under the License. \"\"\"Library for using", "input() print(\"project_id is set to %s\" % PROJECT) print(\"You can", "ShowError( 'BigQuery client and/or authentification is not installed. \\n' 'It", "to in writing, software # distributed under the License is", "DATABASE logica;' # Connect to the database. from logica import", "global PROJECT if USER_AUTHENTICATED: return auth.authenticate_user() if PROJECT is None:", "Logica.\\n' 'Use {warning}@Engine(\"sqlite\");{end} annotation in your program to use SQLite.')", "__call__(self, sql, engine, is_final): return RunSQL(sql, engine, self.connection, is_final) class", "# See the License for the specific language governing permissions", "try: from google.colab import widgets WIDGETS_IMPORTED = True except: WIDGETS_IMPORTED", "the easiest to run BigQuery requests from Google CoLab:\\n' '", "or agreed to in writing, software # distributed under the", "*x): pass class MockTabBar: def __init__(self): pass def output_to(self, x):", "required by applicable law or agreed to in writing, software", "e: print('Encountered error when compiling %s.' % predicate) e.ShowMessage() return", "return try: program = universe.LogicaProgram(parsed_rules) except functors.FunctorError as e: e.ShowMessage()", "if run_query: print( color.Format( 'The following table is stored at", "BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "client.query(sql).to_dataframe() elif engine == 'psql': if is_final: return pandas.read_sql(sql, connection)", "+= os.system( 'sudo -u postgres psql -c \"ALTER USER logica", "TabBar(['SQL', 'Result']) sub_bars.append(sub_bar) with sub_bar.output_to(0): if SHOW_FULL_QUERY: print( color.Format( 'The", "with the License. # You may obtain a copy of", "global DB_ENGINE if DB_CONNECTION: self.engine = DB_ENGINE self.connection = DB_CONNECTION", "-c \"ALTER USER logica PASSWORD \\'<PASSWORD>\\';\"') result += os.system( 'sudo", "True except: WIDGETS_IMPORTED = False print('Could not import google.colab.widgets.') PROJECT", "== 'bigquery': EnsureAuthenticatedUser() sql_runner = RunSQL else: raise Exception('Logica only", "PSQL_CONNECTION? DB_ENGINE = None DB_CONNECTION = None USER_AUTHENTICATED = False", "connection) else: pass return None else: raise Exception('Logica only supports", "print(\"project_id is set to %s\" % PROJECT) print(\"You can change", "def TabBar(*args): \"\"\"Returns a real TabBar or a mock. Useful", "PostgresJumpStart(): # Install postgresql server. print(\"Installing and configuring an empty", "as e: e.ShowMessage() return engine = program.annotations.Engine() if engine ==", "use for BigQuery queries.\") PROJECT = input() print(\"project_id is set", "PREAMBLE = preamble def SetProject(project): global PROJECT PROJECT = project", "postgres psql -U postgres -c \\'CREATE DATABASE logica;\\'') if result", "def __init__(self): pass def output_to(self, x): return MockTab() return MockTabBar()", "= 0 result += os.system('sudo apt-get -y -qq update') result", "set to %s\" % PROJECT) print(\"You can change it with", "LLC # # Licensed under the Apache License, Version 2.0", "len(predicates) executions = [] sub_bars = [] ip = IPython.get_ipython()", "sql}) except rule_translate.RuleCompileException as e: print('Encountered error when compiling %s.'", "self.connection = sqlite3_logica.SqliteConnect() # TODO: Sqlite runner should not be", "return bar = TabBar(predicates + ['(Log)']) logs_idx = len(predicates) executions", "{warning}%s{end} ' 'variable.' % predicate)) display(t) else: print('The query was", "compliance with the License. # You may obtain a copy", "engine, self.connection, is_final) class PostgresRunner(object): def __init__(self): global DB_CONNECTION global", "in enumerate(predicates): with bar.output_to(logs_idx): try: sql = program.FormattedPredicateSql(predicate) executions.append(program.execution) ip.push({predicate", "agreed to in writing, software # distributed under the License", "only supports BigQuery, PostgreSQL and SQLite ' 'for now.') result_map", "install postgresql') result += os.system('sudo service postgresql start') # Ignoring", "def __init__(self): pass def __enter__(self): pass def __exit__(self, *x): pass", "google.cloud.auth.') try: from google.colab import widgets WIDGETS_IMPORTED = True except:", "distributed under the License is distributed on an \"AS IS\"", "BigQuery, PostgreSQL and SQLite ' 'for now.') class SqliteRunner(object): def", "enumerate(predicates): with bar.output_to(logs_idx): try: sql = program.FormattedPredicateSql(predicate) executions.append(program.execution) ip.push({predicate +", "the tabbar. def PostgresJumpStart(): # Install postgresql server. print(\"Installing and", "postgres -c \\'CREATE DATABASE logica;\\'') if result != 0: print(\"\"\"Installation", "Please try the following manually: # Install Logica. !pip install", "your program to use SQLite.') return bar = TabBar(predicates +", "program to use SQLite.') return bar = TabBar(predicates + ['(Log)'])", "print('Query is stored at %s variable.' % color.Warn(predicate + '_sql'))", "+= os.system( 'sudo -u postgres psql -U postgres -c \\'CREATE", "return print('Installation succeeded. Connecting...') # Connect to the database. from", "pass def output_to(self, x): return MockTab() return MockTabBar() @register_cell_magic def", "= [] ip = IPython.get_ipython() for idx, predicate in enumerate(predicates):", "= TABULATED_OUTPUT if not WIDGETS_IMPORTED: SetTabulatedOutput(False) def TabBar(*args): \"\"\"Returns a", "__init__(self): global DB_CONNECTION global DB_ENGINE if DB_CONNECTION: self.engine = DB_ENGINE", "predicate + '_sql'))) print(sql) else: print('Query is stored at %s", "express or implied. # See the License for the specific", "connection=None, is_final=False): if engine == 'bigquery': client = bigquery.Client(project=PROJECT) return", "except in compliance with the License. # You may obtain", "if engine == 'sqlite': sql_runner = SqliteRunner() elif engine ==", "postgres psql -c \"ALTER USER logica PASSWORD '<PASSWORD>';\" !sudo -u", "None else: raise Exception('Logica only supports BigQuery, PostgreSQL and SQLite", "not BQ_READY: ShowError( 'BigQuery client and/or authentification is not installed.", "colab_logica from sqlalchemy import create_engine import pandas engine = create_engine('postgresql+psycopg2://logica:logica@127.0.0.1',", "Licensed under the Apache License, Version 2.0 (the \"License\"); #", "from .common import concertina_lib from .compiler import functors from .compiler", "widgets WIDGETS_IMPORTED = True except: WIDGETS_IMPORTED = False print('Could not", "DB_CONNECTION else: (self.engine, self.connection) = PostgresJumpStart() DB_ENGINE = self.engine DB_CONNECTION", "predicates = ParseList(line) if not predicates: ShowError('No predicates to run.')", "not use this file except in compliance with the License.", "Logica on SQLite requires no installation.\\n' 'This could be a", "def PostgresJumpStart(): # Install postgresql server. print(\"Installing and configuring an", ".compiler import functors from .compiler import rule_translate from .compiler import", "with sub_bars[idx].output_to(1): if run_query: print( color.Format( 'The following table is", "class PostgresRunner(object): def __init__(self): global DB_CONNECTION global DB_ENGINE if DB_CONNECTION:", "else: print('The query was not run.') print(' ') # To", "'bigquery': client = bigquery.Client(project=PROJECT) return client.query(sql).to_dataframe() elif engine == 'psql':", "a good fit for working with small data or learning", "\"CREATE USER logica WITH SUPERUSER\" !sudo -u postgres psql -c", "writing, software # distributed under the License is distributed on", "%s variable.' % color.Warn(predicate + '_sql')) with bar.output_to(logs_idx): if engine", "PSQL_ENGINE, PSQL_CONNECTION? DB_ENGINE = None DB_CONNECTION = None USER_AUTHENTICATED =", "WIDGETS_IMPORTED = False print('Could not import google.colab.widgets.') PROJECT = None", "and not BQ_READY: ShowError( 'BigQuery client and/or authentification is not", "you may not use this file except in compliance with", "renamed to PSQL_ENGINE, PSQL_CONNECTION? DB_ENGINE = None DB_CONNECTION = None", "# Licensed under the Apache License, Version 2.0 (the \"License\");", "postgresql start # Prepare database for Logica. !sudo -u postgres", "== 'psql': if is_final: return pandas.read_sql(sql, connection) else: return connection.execute(sql)", "sql_engine=engine) for idx, predicate in enumerate(predicates): t = result_map[predicate] ip.push({predicate:", "from .common import color from .common import concertina_lib from .compiler", "SetTabulatedOutput(tabulated_output): global TABULATED_OUTPUT global SHOW_FULL_QUERY TABULATED_OUTPUT = tabulated_output SHOW_FULL_QUERY =", "else: pass return None else: raise Exception('Logica only supports BigQuery,", "' 'variable.' % ( predicate + '_sql'))) print(sql) else: print('Query", "DB_ENGINE = self.engine DB_CONNECTION = self.connection def __call__(self, sql, engine,", "0 result += os.system('sudo apt-get -y -qq update') result +=", "elif engine == 'psql': if is_final: return pandas.read_sql(sql, connection) else:", "class MockTab: def __init__(self): pass def __enter__(self): pass def __exit__(self,", "tabulated_output SHOW_FULL_QUERY = TABULATED_OUTPUT if not WIDGETS_IMPORTED: SetTabulatedOutput(False) def TabBar(*args):", "is the easiest to run BigQuery requests from Google CoLab:\\n'", "the License. \"\"\"Library for using Logica in CoLab.\"\"\" from .common", "logica PASSWORD \\'<PASSWORD>\\';\"') result += os.system( 'sudo -u postgres psql", "-U postgres -c \\'CREATE DATABASE logica;\\'') if result != 0:", "postgres psql -c \"CREATE USER logica WITH SUPERUSER\"') result +=", "result += os.system( 'sudo -u postgres psql -c \"ALTER USER", "google.cloud import bigquery except: BQ_READY = False print('Could not import", "may already exist. result += 0 * os.system( 'sudo -u", "import colab_logica from sqlalchemy import create_engine import pandas engine =", "CONDITIONS OF ANY KIND, either express or implied. # See", "import register_cell_magic from IPython.display import display import os import pandas", "it with logica.colab_logica.SetProject command.\") USER_AUTHENTICATED = True def SetTabulatedOutput(tabulated_output): global", "__init__(self): pass def __enter__(self): pass def __exit__(self, *x): pass class", "auth except: BQ_READY = False print('Could not import google.cloud.auth.') try:", "from Google CoLab:\\n' ' https://colab.research.google.com/.\\n' 'Note that running Logica on", "Exception('Logica only supports BigQuery, PostgreSQL and SQLite ' 'for now.')", "is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES", "-qq update !sudo apt-get -y -qq install postgresql !sudo service", "\"\"\"Library for using Logica in CoLab.\"\"\" from .common import color", "os import pandas from .parser_py import parse from .common import", ".compiler import universe import IPython from IPython.core.magic import register_cell_magic from", "use SQLite.') return bar = TabBar(predicates + ['(Log)']) logs_idx =", "Useful for UIs that don't support JS.\"\"\" if TABULATED_OUTPUT: return", "sub_bars = [] ip = IPython.get_ipython() for idx, predicate in", "\"ALTER USER logica PASSWORD \\'<PASSWORD>\\';\"') result += os.system( 'sudo -u", "to PSQL_ENGINE, PSQL_CONNECTION? DB_ENGINE = None DB_CONNECTION = None USER_AUTHENTICATED", "display import os import pandas from .parser_py import parse from", "Logica(line, cell, run_query): \"\"\"Running Logica predicates and storing results.\"\"\" predicates", "from .compiler import functors from .compiler import rule_translate from .compiler", "'sudo -u postgres psql -U postgres -c \\'CREATE DATABASE logica;\\'')", "be renamed to PSQL_ENGINE, PSQL_CONNECTION? DB_ENGINE = None DB_CONNECTION =", "real TabBar or a mock. Useful for UIs that don't", "the database. from logica import colab_logica from sqlalchemy import create_engine", "database.\") result = 0 result += os.system('sudo apt-get -y -qq", "color.Warn(predicate + '_sql')) with bar.output_to(logs_idx): if engine == 'sqlite': sql_runner", "global DB_CONNECTION global DB_ENGINE if DB_CONNECTION: self.engine = DB_ENGINE self.connection", "for s in [PREAMBLE, cell] if s) parsed_rules = parse.ParseFile(program)['rule']", "SqliteRunner(object): def __init__(self): self.connection = sqlite3_logica.SqliteConnect() # TODO: Sqlite runner", "postgresql') result += os.system('sudo service postgresql start') # Ignoring user", "apt-get -y -qq update !sudo apt-get -y -qq install postgresql", "t = result_map[predicate] ip.push({predicate: t}) with bar.output_to(idx): with sub_bars[idx].output_to(1): if", "or learning Logica.\\n' 'Use {warning}@Engine(\"sqlite\");{end} annotation in your program to", "with bar.output_to(logs_idx): try: sql = program.FormattedPredicateSql(predicate) executions.append(program.execution) ip.push({predicate + '_sql':", "return client.query(sql).to_dataframe() elif engine == 'psql': if is_final: return pandas.read_sql(sql,", "return auth.authenticate_user() if PROJECT is None: print(\"Please enter project_id to", "USER_AUTHENTICATED: return auth.authenticate_user() if PROJECT is None: print(\"Please enter project_id", "DB_CONNECTION = self.connection def __call__(self, sql, engine, is_final): return RunSQL(sql,", "OR CONDITIONS OF ANY KIND, either express or implied. #", "def __init__(self): self.connection = sqlite3_logica.SqliteConnect() # TODO: Sqlite runner should", "os.system('sudo apt-get -y -qq update') result += os.system('sudo apt-get -y", "pass def __exit__(self, *x): pass class MockTabBar: def __init__(self): pass", ".common import concertina_lib from .compiler import functors from .compiler import", "color.Format( 'The following query is stored at {warning}%s{end} ' 'variable.'", "error, as they may already exist. result += 0 *", "the License is distributed on an \"AS IS\" BASIS, #", "# Install Logica. !pip install logica # Install postgresql server.", "runner should not be accepting an engine. def __call__(self, sql,", "DB_CONNECTION global DB_ENGINE if DB_CONNECTION: self.engine = DB_ENGINE self.connection =", "= [p.strip() for p in line.split(',')] return predicates def RunSQL(sql,", "TabBar or a mock. Useful for UIs that don't support", "self.connection) = PostgresJumpStart() DB_ENGINE = self.engine DB_CONNECTION = self.connection def", "and SQLite ' 'for now.') result_map = concertina_lib.ExecuteLogicaProgram( executions, sql_runner=sql_runner,", "PostgreSQL database.\") result = 0 result += os.system('sudo apt-get -y", "psql -c \"CREATE USER logica WITH SUPERUSER\"') result += os.system(", "\"ALTER USER logica PASSWORD '<PASSWORD>';\" !sudo -u postgres psql -U", "import os import pandas from .parser_py import parse from .common", "By default. try: from google.cloud import bigquery except: BQ_READY =", "return try: program = ';\\n'.join(s for s in [PREAMBLE, cell]", "import concertina_lib from .compiler import functors from .compiler import rule_translate", "for working with small data or learning Logica.\\n' 'Use {warning}@Engine(\"sqlite\");{end}", "supports BigQuery, PostgreSQL and SQLite ' 'for now.') class SqliteRunner(object):", "at %s variable.' % color.Warn(predicate + '_sql')) with bar.output_to(logs_idx): if", "parse.ParseFile(program)['rule'] except parse.ParsingException as e: e.ShowMessage() return try: program =", "'sqlite': sql_runner = SqliteRunner() elif engine == 'psql': sql_runner =", "is_final) class PostgresRunner(object): def __init__(self): global DB_CONNECTION global DB_ENGINE if", "SQLite.') return bar = TabBar(predicates + ['(Log)']) logs_idx = len(predicates)", "register_cell_magic from IPython.display import display import os import pandas from", "pass def __enter__(self): pass def __exit__(self, *x): pass class MockTabBar:", "engine, connection=None, is_final=False): if engine == 'bigquery': client = bigquery.Client(project=PROJECT)", "CoLab:\\n' ' https://colab.research.google.com/.\\n' 'Note that running Logica on SQLite requires", "line.split(',')] return predicates def RunSQL(sql, engine, connection=None, is_final=False): if engine", "False print('Could not import google.colab.widgets.') PROJECT = None # TODO:", "a real TabBar or a mock. Useful for UIs that", "{error}Error{end} ] ' + error_text)) def Logica(line, cell, run_query): \"\"\"Running", "TABULATED_OUTPUT if not WIDGETS_IMPORTED: SetTabulatedOutput(False) def TabBar(*args): \"\"\"Returns a real", "not run.') print(' ') # To activate the tabbar. def", "from .parser_py import parse from .common import sqlite3_logica BQ_READY =", "else: predicates = [p.strip() for p in line.split(',')] return predicates", "pandas.read_sql(statements[-1], connection) else: pass return None else: raise Exception('Logica only", "!= 0: print(\"\"\"Installation failed. Please try the following manually: #", "the following manually: # Install Logica. !pip install logica #", "import create_engine import pandas engine = create_engine('postgresql+psycopg2://logica:logica@127.0.0.1', pool_recycle=3600) connection =", "law or agreed to in writing, software # distributed under", "'This could be a good fit for working with small", "( predicate + '_sql'))) print(sql) else: print('Query is stored at", "except functors.FunctorError as e: e.ShowMessage() return engine = program.annotations.Engine() if", "= TabBar(['SQL', 'Result']) sub_bars.append(sub_bar) with sub_bar.output_to(0): if SHOW_FULL_QUERY: print( color.Format(", "+ '_sql')) with bar.output_to(logs_idx): if engine == 'sqlite': sql_runner =", "TODO: Sqlite runner should not be accepting an engine. def", "start') # Ignoring user creation error, as they may already", "if result != 0: print(\"\"\"Installation failed. Please try the following", "sql_runner = RunSQL else: raise Exception('Logica only supports BigQuery, PostgreSQL", "To activate the tabbar. def PostgresJumpStart(): # Install postgresql server.", "universe.LogicaProgram(parsed_rules) except functors.FunctorError as e: e.ShowMessage() return engine = program.annotations.Engine()", "SQLite ' 'for now.') class SqliteRunner(object): def __init__(self): self.connection =", "s in [PREAMBLE, cell] if s) parsed_rules = parse.ParseFile(program)['rule'] except", "= result_map[predicate] ip.push({predicate: t}) with bar.output_to(idx): with sub_bars[idx].output_to(1): if run_query:", "= None def SetPreamble(preamble): global PREAMBLE PREAMBLE = preamble def", "and storing results.\"\"\" predicates = ParseList(line) if not predicates: ShowError('No", "project_id to use for BigQuery queries.\") PROJECT = input() print(\"project_id", "= TabBar(predicates + ['(Log)']) logs_idx = len(predicates) executions = []", "sqlalchemy import create_engine import pandas engine = create_engine('postgresql+psycopg2://logica:logica@127.0.0.1', pool_recycle=3600); connection", "= self.connection def __call__(self, sql, engine, is_final): return RunSQL(sql, engine,", "DB_CONNECTION = None USER_AUTHENTICATED = False TABULATED_OUTPUT = True SHOW_FULL_QUERY", "SHOW_FULL_QUERY = TABULATED_OUTPUT if not WIDGETS_IMPORTED: SetTabulatedOutput(False) def TabBar(*args): \"\"\"Returns", "postgres psql -U postgres -c 'CREATE DATABASE logica;' # Connect", "import universe import IPython from IPython.core.magic import register_cell_magic from IPython.display", "TABULATED_OUTPUT: return widgets.TabBar(*args) class MockTab: def __init__(self): pass def __enter__(self):", "requests from Google CoLab:\\n' ' https://colab.research.google.com/.\\n' 'Note that running Logica", "os.system( 'sudo -u postgres psql -U postgres -c \\'CREATE DATABASE", "def output_to(self, x): return MockTab() return MockTabBar() @register_cell_magic def logica(line,", "!sudo service postgresql start # Prepare database for Logica. !sudo", "self.connection, is_final) def ShowError(error_text): print(color.Format('[ {error}Error{end} ] ' + error_text))", "def ShowError(error_text): print(color.Format('[ {error}Error{end} ] ' + error_text)) def Logica(line,", "may obtain a copy of the License at # #", "os.system( 'sudo -u postgres psql -c \"CREATE USER logica WITH", "SUPERUSER\"') result += os.system( 'sudo -u postgres psql -c \"ALTER", "= None # TODO: Should this be renamed to PSQL_ENGINE,", "\"\"\"Running Logica predicates and storing results.\"\"\" predicates = ParseList(line) if", "[] ip = IPython.get_ipython() for idx, predicate in enumerate(predicates): with", "False print('Could not import google.cloud.auth.') try: from google.colab import widgets", "True SHOW_FULL_QUERY = True PREAMBLE = None def SetPreamble(preamble): global", "' + error_text)) def Logica(line, cell, run_query): \"\"\"Running Logica predicates", "postgres psql -c \"ALTER USER logica PASSWORD \\'<PASSWORD>\\';\"') result +=", "color.Format( 'The following table is stored at {warning}%s{end} ' 'variable.'", "+= os.system('sudo apt-get -y -qq install postgresql') result += os.system('sudo", "try: program = ';\\n'.join(s for s in [PREAMBLE, cell] if", "ShowError('No predicates to run.') return try: program = ';\\n'.join(s for", "if PROJECT is None: print(\"Please enter project_id to use for", "following manually: # Install Logica. !pip install logica # Install", "IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "WIDGETS_IMPORTED = True except: WIDGETS_IMPORTED = False print('Could not import", "] ' + error_text)) def Logica(line, cell, run_query): \"\"\"Running Logica", "BQ_READY = True # By default. try: from google.cloud import", "apt-get -y -qq install postgresql') result += os.system('sudo service postgresql", "UIs that don't support JS.\"\"\" if TABULATED_OUTPUT: return widgets.TabBar(*args) class", "print(\"Please enter project_id to use for BigQuery queries.\") PROJECT =", "print(\"You can change it with logica.colab_logica.SetProject command.\") USER_AUTHENTICATED = True", "WIDGETS_IMPORTED: SetTabulatedOutput(False) def TabBar(*args): \"\"\"Returns a real TabBar or a", "may not use this file except in compliance with the", "line: predicates = [] else: predicates = [p.strip() for p", "Logica(line, cell, run_query=True) def ParseList(line): line = line.strip() if not", "SHOW_FULL_QUERY: print( color.Format( 'The following query is stored at {warning}%s{end}", "'BigQuery client and/or authentification is not installed. \\n' 'It is", "# Ignoring user creation error, as they may already exist.", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "[] sub_bars = [] ip = IPython.get_ipython() for idx, predicate", "print('Could not import google.cloud.bigquery.') try: from google.colab import auth except:", "in your program to use SQLite.') return bar = TabBar(predicates", "this file except in compliance with the License. # You", "def __call__(self, sql, engine, is_final): return RunSQL(sql, engine, self.connection, is_final)", "following table is stored at {warning}%s{end} ' 'variable.' % predicate))", "governing permissions and # limitations under the License. \"\"\"Library for", "else: return connection.execute(sql) elif engine == 'sqlite': statements = parse.SplitRaw(sql,", "print( color.Format( 'The following table is stored at {warning}%s{end} '", ".common import sqlite3_logica BQ_READY = True # By default. try:", "= sqlite3_logica.SqliteConnect() # TODO: Sqlite runner should not be accepting", "they may already exist. result += 0 * os.system( 'sudo", "\\'<PASSWORD>\\';\"') result += os.system( 'sudo -u postgres psql -U postgres", "import create_engine import pandas engine = create_engine('postgresql+psycopg2://logica:logica@127.0.0.1', pool_recycle=3600); connection =", "from .common import sqlite3_logica BQ_READY = True # By default.", "__exit__(self, *x): pass class MockTabBar: def __init__(self): pass def output_to(self,", "!pip install logica # Install postgresql server. !sudo apt-get -y", "CoLab.\"\"\" from .common import color from .common import concertina_lib from", "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "engine == 'psql': sql_runner = PostgresRunner() elif engine == 'bigquery':", "from google.cloud import bigquery except: BQ_READY = False print('Could not", "# # Licensed under the Apache License, Version 2.0 (the", "command.\") USER_AUTHENTICATED = True def SetTabulatedOutput(tabulated_output): global TABULATED_OUTPUT global SHOW_FULL_QUERY", "not import google.colab.widgets.') PROJECT = None # TODO: Should this", "idx, predicate in enumerate(predicates): t = result_map[predicate] ip.push({predicate: t}) with", "file except in compliance with the License. # You may", "on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS", "try: from google.cloud import bigquery except: BQ_READY = False print('Could", "with bar.output_to(logs_idx): if engine == 'sqlite': sql_runner = SqliteRunner() elif", "'bigquery': EnsureAuthenticatedUser() sql_runner = RunSQL else: raise Exception('Logica only supports", "engine == 'bigquery': client = bigquery.Client(project=PROJECT) return client.query(sql).to_dataframe() elif engine", "Install postgresql server. print(\"Installing and configuring an empty PostgreSQL database.\")", "= project def SetDbConnection(connection): global DB_CONNECTION DB_CONNECTION = connection def", "be a good fit for working with small data or", "import pandas engine = create_engine('postgresql+psycopg2://logica:logica@127.0.0.1', pool_recycle=3600) connection = engine.connect() print('Connected.')", "line = line.strip() if not line: predicates = [] else:", "is_final): return RunSQL(sql, engine, self.connection, is_final) class PostgresRunner(object): def __init__(self):", "= True # By default. try: from google.cloud import bigquery", "IPython.display import display import os import pandas from .parser_py import", "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "PROJECT = project def SetDbConnection(connection): global DB_CONNECTION DB_CONNECTION = connection", "Copyright 2020 Google LLC # # Licensed under the Apache", "import google.colab.widgets.') PROJECT = None # TODO: Should this be", "program.FormattedPredicateSql(predicate) executions.append(program.execution) ip.push({predicate + '_sql': sql}) except rule_translate.RuleCompileException as e:", "PROJECT) print(\"You can change it with logica.colab_logica.SetProject command.\") USER_AUTHENTICATED =", "logica(line, cell): Logica(line, cell, run_query=True) def ParseList(line): line = line.strip()", "'for now.') class SqliteRunner(object): def __init__(self): self.connection = sqlite3_logica.SqliteConnect() #", "result += os.system('sudo apt-get -y -qq update') result += os.system('sudo", "pool_recycle=3600); connection = engine.connect(); colab_logica.SetDbConnection(connection)\"\"\") return print('Installation succeeded. Connecting...') #", "DB_CONNECTION: self.engine = DB_ENGINE self.connection = DB_CONNECTION else: (self.engine, self.connection)", "engine == 'bigquery' and not BQ_READY: ShowError( 'BigQuery client and/or", "= False TABULATED_OUTPUT = True SHOW_FULL_QUERY = True PREAMBLE =", "# Install postgresql server. print(\"Installing and configuring an empty PostgreSQL", "= ParseList(line) if not predicates: ShowError('No predicates to run.') return", "USER logica PASSWORD '<PASSWORD>';\" !sudo -u postgres psql -U postgres", "None # TODO: Should this be renamed to PSQL_ENGINE, PSQL_CONNECTION?", "elif engine == 'psql': sql_runner = PostgresRunner() elif engine ==", "database. from logica import colab_logica from sqlalchemy import create_engine import", "engine.connect(); colab_logica.SetDbConnection(connection)\"\"\") return print('Installation succeeded. Connecting...') # Connect to the", "Should this be renamed to PSQL_ENGINE, PSQL_CONNECTION? DB_ENGINE = None", "' 'variable.' % predicate)) display(t) else: print('The query was not", "pandas.read_sql(sql, connection) else: return connection.execute(sql) elif engine == 'sqlite': statements", "global USER_AUTHENTICATED global PROJECT if USER_AUTHENTICATED: return auth.authenticate_user() if PROJECT", "ParseList(line) if not predicates: ShowError('No predicates to run.') return try:", "EnsureAuthenticatedUser(): global USER_AUTHENTICATED global PROJECT if USER_AUTHENTICATED: return auth.authenticate_user() if", "if engine == 'bigquery' and not BQ_READY: ShowError( 'BigQuery client", "'_sql')) with bar.output_to(logs_idx): if engine == 'sqlite': sql_runner = SqliteRunner()", "DB_ENGINE self.connection = DB_CONNECTION else: (self.engine, self.connection) = PostgresJumpStart() DB_ENGINE", "from IPython.core.magic import register_cell_magic from IPython.display import display import os", "with bar.output_to(idx): with sub_bars[idx].output_to(1): if run_query: print( color.Format( 'The following", "= parse.ParseFile(program)['rule'] except parse.ParsingException as e: e.ShowMessage() return try: program", "import pandas from .parser_py import parse from .common import sqlite3_logica", "return pandas.read_sql(sql, connection) else: return connection.execute(sql) elif engine == 'sqlite':", "is stored at {warning}%s{end} ' 'variable.' % predicate)) display(t) else:", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "SHOW_FULL_QUERY TABULATED_OUTPUT = tabulated_output SHOW_FULL_QUERY = TABULATED_OUTPUT if not WIDGETS_IMPORTED:", "result_map = concertina_lib.ExecuteLogicaProgram( executions, sql_runner=sql_runner, sql_engine=engine) for idx, predicate in", "BQ_READY = False print('Could not import google.cloud.bigquery.') try: from google.colab", "import pandas engine = create_engine('postgresql+psycopg2://logica:logica@127.0.0.1', pool_recycle=3600); connection = engine.connect(); colab_logica.SetDbConnection(connection)\"\"\")", "to use for BigQuery queries.\") PROJECT = input() print(\"project_id is", "not predicates: ShowError('No predicates to run.') return try: program =", "BigQuery requests from Google CoLab:\\n' ' https://colab.research.google.com/.\\n' 'Note that running", "# Publish output to Colab cell. with bar.output_to(idx): sub_bar =", "result += 0 * os.system( 'sudo -u postgres psql -c", "this be renamed to PSQL_ENGINE, PSQL_CONNECTION? DB_ENGINE = None DB_CONNECTION", "to %s\" % PROJECT) print(\"You can change it with logica.colab_logica.SetProject", "Colab cell. with bar.output_to(idx): sub_bar = TabBar(['SQL', 'Result']) sub_bars.append(sub_bar) with", "no installation.\\n' 'This could be a good fit for working", "that running Logica on SQLite requires no installation.\\n' 'This could", "= create_engine('postgresql+psycopg2://logica:logica@127.0.0.1', pool_recycle=3600); connection = engine.connect(); colab_logica.SetDbConnection(connection)\"\"\") return print('Installation succeeded.", "or implied. # See the License for the specific language", "connection.execute(sql) elif engine == 'sqlite': statements = parse.SplitRaw(sql, ';') connection.executescript(sql)", "SetDbConnection(connection): global DB_CONNECTION DB_CONNECTION = connection def EnsureAuthenticatedUser(): global USER_AUTHENTICATED", "as they may already exist. result += 0 * os.system(", "results.\"\"\" predicates = ParseList(line) if not predicates: ShowError('No predicates to", "KIND, either express or implied. # See the License for", "specific language governing permissions and # limitations under the License.", "raise Exception('Logica only supports BigQuery, PostgreSQL and SQLite ' 'for", "an engine. def __call__(self, sql, engine, is_final): return RunSQL(sql, engine,", "PREAMBLE PREAMBLE = preamble def SetProject(project): global PROJECT PROJECT =", "engine, is_final): return RunSQL(sql, engine, self.connection, is_final) class PostgresRunner(object): def", "= line.strip() if not line: predicates = [] else: predicates", "if is_final: return pandas.read_sql(sql, connection) else: return connection.execute(sql) elif engine", "bar = TabBar(predicates + ['(Log)']) logs_idx = len(predicates) executions =", "None DB_CONNECTION = None USER_AUTHENTICATED = False TABULATED_OUTPUT = True", "start # Prepare database for Logica. !sudo -u postgres psql", "concertina_lib.ExecuteLogicaProgram( executions, sql_runner=sql_runner, sql_engine=engine) for idx, predicate in enumerate(predicates): t", "psql -c \"ALTER USER logica PASSWORD '<PASSWORD>';\" !sudo -u postgres", "logica import colab_logica from sqlalchemy import create_engine import pandas engine", "except: BQ_READY = False print('Could not import google.cloud.bigquery.') try: from", "output to Colab cell. with bar.output_to(idx): sub_bar = TabBar(['SQL', 'Result'])", "Publish output to Colab cell. with bar.output_to(idx): sub_bar = TabBar(['SQL',", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "ip.push({predicate: t}) with bar.output_to(idx): with sub_bars[idx].output_to(1): if run_query: print( color.Format(", "PostgreSQL and SQLite ' 'for now.') result_map = concertina_lib.ExecuteLogicaProgram( executions,", "os.system( 'sudo -u postgres psql -c \"ALTER USER logica PASSWORD", "RunSQL else: raise Exception('Logica only supports BigQuery, PostgreSQL and SQLite", "sqlite3_logica.SqliteConnect() # TODO: Sqlite runner should not be accepting an", "manually: # Install Logica. !pip install logica # Install postgresql", "= program.annotations.Engine() if engine == 'bigquery' and not BQ_READY: ShowError(", "# Connect to the database. from logica import colab_logica from", "language governing permissions and # limitations under the License. \"\"\"Library", "= False print('Could not import google.cloud.auth.') try: from google.colab import", "!sudo -u postgres psql -U postgres -c 'CREATE DATABASE logica;'", "is_final: return pandas.read_sql(sql, connection) else: return connection.execute(sql) elif engine ==", "install postgresql !sudo service postgresql start # Prepare database for", "engine. def __call__(self, sql, engine, is_final): return RunSQL(sql, engine, self.connection,", "preamble def SetProject(project): global PROJECT PROJECT = project def SetDbConnection(connection):", "return MockTab() return MockTabBar() @register_cell_magic def logica(line, cell): Logica(line, cell,", "[PREAMBLE, cell] if s) parsed_rules = parse.ParseFile(program)['rule'] except parse.ParsingException as", "predicate)) display(t) else: print('The query was not run.') print(' ')", "+ error_text)) def Logica(line, cell, run_query): \"\"\"Running Logica predicates and", "-c 'CREATE DATABASE logica;' # Connect to the database. from", "(the \"License\"); # you may not use this file except", "[] else: predicates = [p.strip() for p in line.split(',')] return", "BQ_READY: ShowError( 'BigQuery client and/or authentification is not installed. \\n'", "USER_AUTHENTICATED global PROJECT if USER_AUTHENTICATED: return auth.authenticate_user() if PROJECT is", "auth.authenticate_user() if PROJECT is None: print(\"Please enter project_id to use", "# you may not use this file except in compliance", "sql, engine, is_final): return RunSQL(sql, engine, self.connection, is_final) class PostgresRunner(object):", "+= 0 * os.system( 'sudo -u postgres psql -c \"CREATE", "PostgresRunner(object): def __init__(self): global DB_CONNECTION global DB_ENGINE if DB_CONNECTION: self.engine", "is not installed. \\n' 'It is the easiest to run", "except parse.ParsingException as e: e.ShowMessage() return try: program = universe.LogicaProgram(parsed_rules)", "sql_runner = SqliteRunner() elif engine == 'psql': sql_runner = PostgresRunner()", "predicates = [] else: predicates = [p.strip() for p in", "DB_CONNECTION = connection def EnsureAuthenticatedUser(): global USER_AUTHENTICATED global PROJECT if", "def __enter__(self): pass def __exit__(self, *x): pass class MockTabBar: def", "parse.ParsingException as e: e.ShowMessage() return try: program = universe.LogicaProgram(parsed_rules) except", "'variable.' % ( predicate + '_sql'))) print(sql) else: print('Query is", "#!/usr/bin/python # # Copyright 2020 Google LLC # # Licensed", "True # By default. try: from google.cloud import bigquery except:", "run_query=True) def ParseList(line): line = line.strip() if not line: predicates", "engine = program.annotations.Engine() if engine == 'bigquery' and not BQ_READY:", "# Install postgresql server. !sudo apt-get -y -qq update !sudo", "program.annotations.Engine() if engine == 'bigquery' and not BQ_READY: ShowError( 'BigQuery", "when compiling %s.' % predicate) e.ShowMessage() return # Publish output", "%s.' % predicate) e.ShowMessage() return # Publish output to Colab", "-u postgres psql -c \"ALTER USER logica PASSWORD \\'<PASSWORD>\\';\"') result", "google.colab import auth except: BQ_READY = False print('Could not import", "0 * os.system( 'sudo -u postgres psql -c \"CREATE USER", "-y -qq update !sudo apt-get -y -qq install postgresql !sudo", "USER logica WITH SUPERUSER\"') result += os.system( 'sudo -u postgres", "# # Unless required by applicable law or agreed to", "-y -qq update') result += os.system('sudo apt-get -y -qq install", "in CoLab.\"\"\" from .common import color from .common import concertina_lib", "return pandas.read_sql(statements[-1], connection) else: pass return None else: raise Exception('Logica", "'psql': if is_final: return pandas.read_sql(sql, connection) else: return connection.execute(sql) elif", "https://colab.research.google.com/.\\n' 'Note that running Logica on SQLite requires no installation.\\n'", "sub_bar = TabBar(['SQL', 'Result']) sub_bars.append(sub_bar) with sub_bar.output_to(0): if SHOW_FULL_QUERY: print(", "obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0", "PROJECT is None: print(\"Please enter project_id to use for BigQuery", "import parse from .common import sqlite3_logica BQ_READY = True #", "color from .common import concertina_lib from .compiler import functors from", "Version 2.0 (the \"License\"); # you may not use this", "good fit for working with small data or learning Logica.\\n'", "-c \\'CREATE DATABASE logica;\\'') if result != 0: print(\"\"\"Installation failed.", "program = ';\\n'.join(s for s in [PREAMBLE, cell] if s)", "pass return None else: raise Exception('Logica only supports BigQuery, PostgreSQL", "s) parsed_rules = parse.ParseFile(program)['rule'] except parse.ParsingException as e: e.ShowMessage() return", "e.ShowMessage() return try: program = universe.LogicaProgram(parsed_rules) except functors.FunctorError as e:", "= DB_ENGINE self.connection = DB_CONNECTION else: (self.engine, self.connection) = PostgresJumpStart()", "License. \"\"\"Library for using Logica in CoLab.\"\"\" from .common import", "print('Could not import google.cloud.auth.') try: from google.colab import widgets WIDGETS_IMPORTED", "not WIDGETS_IMPORTED: SetTabulatedOutput(False) def TabBar(*args): \"\"\"Returns a real TabBar or", "postgresql start') # Ignoring user creation error, as they may", "t}) with bar.output_to(idx): with sub_bars[idx].output_to(1): if run_query: print( color.Format( 'The", "for Logica. !sudo -u postgres psql -c \"CREATE USER logica", "IPython.core.magic import register_cell_magic from IPython.display import display import os import", "result += os.system('sudo service postgresql start') # Ignoring user creation", "rule_translate.RuleCompileException as e: print('Encountered error when compiling %s.' % predicate)", "= PostgresRunner() elif engine == 'bigquery': EnsureAuthenticatedUser() sql_runner = RunSQL", "\"CREATE USER logica WITH SUPERUSER\"') result += os.system( 'sudo -u", "logica PASSWORD '<PASSWORD>';\" !sudo -u postgres psql -U postgres -c", "implied. # See the License for the specific language governing", "tabbar. def PostgresJumpStart(): # Install postgresql server. print(\"Installing and configuring", "under the Apache License, Version 2.0 (the \"License\"); # you", "and/or authentification is not installed. \\n' 'It is the easiest", "= DB_CONNECTION else: (self.engine, self.connection) = PostgresJumpStart() DB_ENGINE = self.engine", "% predicate) e.ShowMessage() return # Publish output to Colab cell.", "__init__(self): pass def output_to(self, x): return MockTab() return MockTabBar() @register_cell_magic", "engine == 'bigquery': EnsureAuthenticatedUser() sql_runner = RunSQL else: raise Exception('Logica", "== 'sqlite': statements = parse.SplitRaw(sql, ';') connection.executescript(sql) if is_final: return", "PostgresJumpStart() DB_ENGINE = self.engine DB_CONNECTION = self.connection def __call__(self, sql,", "return engine = program.annotations.Engine() if engine == 'bigquery' and not", "def RunSQL(sql, engine, connection=None, is_final=False): if engine == 'bigquery': client", "else: (self.engine, self.connection) = PostgresJumpStart() DB_ENGINE = self.engine DB_CONNECTION =", "PASSWORD \\'<PASSWORD>\\';\"') result += os.system( 'sudo -u postgres psql -U", "USER_AUTHENTICATED = True def SetTabulatedOutput(tabulated_output): global TABULATED_OUTPUT global SHOW_FULL_QUERY TABULATED_OUTPUT", "= preamble def SetProject(project): global PROJECT PROJECT = project def", "apt-get -y -qq update') result += os.system('sudo apt-get -y -qq", "already exist. result += 0 * os.system( 'sudo -u postgres", "') # To activate the tabbar. def PostgresJumpStart(): # Install", "# Prepare database for Logica. !sudo -u postgres psql -c", "by applicable law or agreed to in writing, software #", "[p.strip() for p in line.split(',')] return predicates def RunSQL(sql, engine,", "psql -U postgres -c \\'CREATE DATABASE logica;\\'') if result !=", "from sqlalchemy import create_engine import pandas engine = create_engine('postgresql+psycopg2://logica:logica@127.0.0.1', pool_recycle=3600);", "configuring an empty PostgreSQL database.\") result = 0 result +=", "predicates and storing results.\"\"\" predicates = ParseList(line) if not predicates:", "postgresql server. print(\"Installing and configuring an empty PostgreSQL database.\") result", "for using Logica in CoLab.\"\"\" from .common import color from", "is_final=False): if engine == 'bigquery': client = bigquery.Client(project=PROJECT) return client.query(sql).to_dataframe()", "DB_ENGINE = None DB_CONNECTION = None USER_AUTHENTICATED = False TABULATED_OUTPUT", "authentification is not installed. \\n' 'It is the easiest to", "engine == 'psql': if is_final: return pandas.read_sql(sql, connection) else: return", "is_final): return RunSQL(sql, engine, self.connection, is_final) def ShowError(error_text): print(color.Format('[ {error}Error{end}", "logica.colab_logica.SetProject command.\") USER_AUTHENTICATED = True def SetTabulatedOutput(tabulated_output): global TABULATED_OUTPUT global", "if SHOW_FULL_QUERY: print( color.Format( 'The following query is stored at", "JS.\"\"\" if TABULATED_OUTPUT: return widgets.TabBar(*args) class MockTab: def __init__(self): pass", "run.') return try: program = ';\\n'.join(s for s in [PREAMBLE,", "not import google.cloud.auth.') try: from google.colab import widgets WIDGETS_IMPORTED =", "display(t) else: print('The query was not run.') print(' ') #", "return None else: raise Exception('Logica only supports BigQuery, PostgreSQL and", "__call__(self, sql, engine, is_final): return RunSQL(sql, engine, self.connection, is_final) def", "stored at {warning}%s{end} ' 'variable.' % predicate)) display(t) else: print('The", "SUPERUSER\" !sudo -u postgres psql -c \"ALTER USER logica PASSWORD", "cell] if s) parsed_rules = parse.ParseFile(program)['rule'] except parse.ParsingException as e:", "connection.executescript(sql) if is_final: return pandas.read_sql(statements[-1], connection) else: pass return None", "else: raise Exception('Logica only supports BigQuery, PostgreSQL and SQLite '", "try the following manually: # Install Logica. !pip install logica", "import auth except: BQ_READY = False print('Could not import google.cloud.auth.')", "limitations under the License. \"\"\"Library for using Logica in CoLab.\"\"\"", "SQLite requires no installation.\\n' 'This could be a good fit", "# Copyright 2020 Google LLC # # Licensed under the", "google.colab.widgets.') PROJECT = None # TODO: Should this be renamed", "@register_cell_magic def logica(line, cell): Logica(line, cell, run_query=True) def ParseList(line): line", "Install postgresql server. !sudo apt-get -y -qq update !sudo apt-get", "cell. with bar.output_to(idx): sub_bar = TabBar(['SQL', 'Result']) sub_bars.append(sub_bar) with sub_bar.output_to(0):", "self.engine = DB_ENGINE self.connection = DB_CONNECTION else: (self.engine, self.connection) =", "sqlalchemy import create_engine import pandas engine = create_engine('postgresql+psycopg2://logica:logica@127.0.0.1', pool_recycle=3600) connection", "from sqlalchemy import create_engine import pandas engine = create_engine('postgresql+psycopg2://logica:logica@127.0.0.1', pool_recycle=3600)", "predicates def RunSQL(sql, engine, connection=None, is_final=False): if engine == 'bigquery':", "= False print('Could not import google.cloud.bigquery.') try: from google.colab import", "False print('Could not import google.cloud.bigquery.') try: from google.colab import auth", "return predicates def RunSQL(sql, engine, connection=None, is_final=False): if engine ==", "easiest to run BigQuery requests from Google CoLab:\\n' ' https://colab.research.google.com/.\\n'", "%s\" % PROJECT) print(\"You can change it with logica.colab_logica.SetProject command.\")", "-qq install postgresql !sudo service postgresql start # Prepare database", "change it with logica.colab_logica.SetProject command.\") USER_AUTHENTICATED = True def SetTabulatedOutput(tabulated_output):", "e: e.ShowMessage() return engine = program.annotations.Engine() if engine == 'bigquery'", "'Note that running Logica on SQLite requires no installation.\\n' 'This", "create_engine import pandas engine = create_engine('postgresql+psycopg2://logica:logica@127.0.0.1', pool_recycle=3600); connection = engine.connect();", "predicates: ShowError('No predicates to run.') return try: program = ';\\n'.join(s", "an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF", ".common import color from .common import concertina_lib from .compiler import", "Unless required by applicable law or agreed to in writing,", "e.ShowMessage() return engine = program.annotations.Engine() if engine == 'bigquery' and", "bigquery except: BQ_READY = False print('Could not import google.cloud.bigquery.') try:", "is set to %s\" % PROJECT) print(\"You can change it", "# To activate the tabbar. def PostgresJumpStart(): # Install postgresql", "variable.' % color.Warn(predicate + '_sql')) with bar.output_to(logs_idx): if engine ==", "'variable.' % predicate)) display(t) else: print('The query was not run.')", "!sudo -u postgres psql -c \"ALTER USER logica PASSWORD '<PASSWORD>';\"", "== 'bigquery': client = bigquery.Client(project=PROJECT) return client.query(sql).to_dataframe() elif engine ==", "';') connection.executescript(sql) if is_final: return pandas.read_sql(statements[-1], connection) else: pass return", "at {warning}%s{end} ' 'variable.' % predicate)) display(t) else: print('The query", "Sqlite runner should not be accepting an engine. def __call__(self,", "enter project_id to use for BigQuery queries.\") PROJECT = input()", "can change it with logica.colab_logica.SetProject command.\") USER_AUTHENTICATED = True def", "logica # Install postgresql server. !sudo apt-get -y -qq update", "the specific language governing permissions and # limitations under the", "if not WIDGETS_IMPORTED: SetTabulatedOutput(False) def TabBar(*args): \"\"\"Returns a real TabBar", "BigQuery queries.\") PROJECT = input() print(\"project_id is set to %s\"", "MockTabBar: def __init__(self): pass def output_to(self, x): return MockTab() return", "from google.colab import widgets WIDGETS_IMPORTED = True except: WIDGETS_IMPORTED =", "MockTabBar() @register_cell_magic def logica(line, cell): Logica(line, cell, run_query=True) def ParseList(line):", "try: program = universe.LogicaProgram(parsed_rules) except functors.FunctorError as e: e.ShowMessage() return", "applicable law or agreed to in writing, software # distributed", "DATABASE logica;\\'') if result != 0: print(\"\"\"Installation failed. Please try", "pandas engine = create_engine('postgresql+psycopg2://logica:logica@127.0.0.1', pool_recycle=3600) connection = engine.connect() print('Connected.') return", "!sudo -u postgres psql -c \"CREATE USER logica WITH SUPERUSER\"", "import display import os import pandas from .parser_py import parse", "if TABULATED_OUTPUT: return widgets.TabBar(*args) class MockTab: def __init__(self): pass def", "= universe.LogicaProgram(parsed_rules) except functors.FunctorError as e: e.ShowMessage() return engine =", "-qq update') result += os.system('sudo apt-get -y -qq install postgresql')", "DB_ENGINE if DB_CONNECTION: self.engine = DB_ENGINE self.connection = DB_CONNECTION else:", "# By default. try: from google.cloud import bigquery except: BQ_READY", "import widgets WIDGETS_IMPORTED = True except: WIDGETS_IMPORTED = False print('Could", "functors from .compiler import rule_translate from .compiler import universe import", "RunSQL(sql, engine, self.connection, is_final) def ShowError(error_text): print(color.Format('[ {error}Error{end} ] '", "or a mock. Useful for UIs that don't support JS.\"\"\"", "Google LLC # # Licensed under the Apache License, Version", "!sudo apt-get -y -qq install postgresql !sudo service postgresql start", "logs_idx = len(predicates) executions = [] sub_bars = [] ip", "'psql': sql_runner = PostgresRunner() elif engine == 'bigquery': EnsureAuthenticatedUser() sql_runner", "service postgresql start') # Ignoring user creation error, as they", "in writing, software # distributed under the License is distributed", "server. print(\"Installing and configuring an empty PostgreSQL database.\") result =", "= input() print(\"project_id is set to %s\" % PROJECT) print(\"You", "-u postgres psql -c \"CREATE USER logica WITH SUPERUSER\"') result", "creation error, as they may already exist. result += 0", "should not be accepting an engine. def __call__(self, sql, engine,", "Logica. !pip install logica # Install postgresql server. !sudo apt-get", "'Result']) sub_bars.append(sub_bar) with sub_bar.output_to(0): if SHOW_FULL_QUERY: print( color.Format( 'The following", "bar.output_to(logs_idx): try: sql = program.FormattedPredicateSql(predicate) executions.append(program.execution) ip.push({predicate + '_sql': sql})", "= RunSQL else: raise Exception('Logica only supports BigQuery, PostgreSQL and", "not import google.cloud.bigquery.') try: from google.colab import auth except: BQ_READY", "global TABULATED_OUTPUT global SHOW_FULL_QUERY TABULATED_OUTPUT = tabulated_output SHOW_FULL_QUERY = TABULATED_OUTPUT", "-y -qq install postgresql') result += os.system('sudo service postgresql start')", "Ignoring user creation error, as they may already exist. result", "['(Log)']) logs_idx = len(predicates) executions = [] sub_bars = []", "requires no installation.\\n' 'This could be a good fit for", "def SetPreamble(preamble): global PREAMBLE PREAMBLE = preamble def SetProject(project): global", "bigquery.Client(project=PROJECT) return client.query(sql).to_dataframe() elif engine == 'psql': if is_final: return", "self.connection, is_final) class PostgresRunner(object): def __init__(self): global DB_CONNECTION global DB_ENGINE", "= connection def EnsureAuthenticatedUser(): global USER_AUTHENTICATED global PROJECT if USER_AUTHENTICATED:", "is_final) def ShowError(error_text): print(color.Format('[ {error}Error{end} ] ' + error_text)) def", "engine == 'sqlite': sql_runner = SqliteRunner() elif engine == 'psql':", "= True SHOW_FULL_QUERY = True PREAMBLE = None def SetPreamble(preamble):", "= concertina_lib.ExecuteLogicaProgram( executions, sql_runner=sql_runner, sql_engine=engine) for idx, predicate in enumerate(predicates):", "MockTab() return MockTabBar() @register_cell_magic def logica(line, cell): Logica(line, cell, run_query=True)", "service postgresql start # Prepare database for Logica. !sudo -u", "Logica. !sudo -u postgres psql -c \"CREATE USER logica WITH", "for idx, predicate in enumerate(predicates): with bar.output_to(logs_idx): try: sql =", "except rule_translate.RuleCompileException as e: print('Encountered error when compiling %s.' %", "TODO: Should this be renamed to PSQL_ENGINE, PSQL_CONNECTION? DB_ENGINE =", "RunSQL(sql, engine, self.connection, is_final) class PostgresRunner(object): def __init__(self): global DB_CONNECTION", "activate the tabbar. def PostgresJumpStart(): # Install postgresql server. print(\"Installing", "print('Installation succeeded. Connecting...') # Connect to the database. from logica", "table is stored at {warning}%s{end} ' 'variable.' % predicate)) display(t)", "def Logica(line, cell, run_query): \"\"\"Running Logica predicates and storing results.\"\"\"", "' 'for now.') result_map = concertina_lib.ExecuteLogicaProgram( executions, sql_runner=sql_runner, sql_engine=engine) for", "License is distributed on an \"AS IS\" BASIS, # WITHOUT", "BQ_READY = False print('Could not import google.cloud.auth.') try: from google.colab", "= SqliteRunner() elif engine == 'psql': sql_runner = PostgresRunner() elif", "License, Version 2.0 (the \"License\"); # you may not use", "parse from .common import sqlite3_logica BQ_READY = True # By", "was not run.') print(' ') # To activate the tabbar.", "project def SetDbConnection(connection): global DB_CONNECTION DB_CONNECTION = connection def EnsureAuthenticatedUser():", "# You may obtain a copy of the License at", "TabBar(*args): \"\"\"Returns a real TabBar or a mock. Useful for", "= bigquery.Client(project=PROJECT) return client.query(sql).to_dataframe() elif engine == 'psql': if is_final:", "SqliteRunner() elif engine == 'psql': sql_runner = PostgresRunner() elif engine", "# TODO: Sqlite runner should not be accepting an engine.", "= program.FormattedPredicateSql(predicate) executions.append(program.execution) ip.push({predicate + '_sql': sql}) except rule_translate.RuleCompileException as", "and SQLite ' 'for now.') class SqliteRunner(object): def __init__(self): self.connection", "engine, is_final): return RunSQL(sql, engine, self.connection, is_final) def ShowError(error_text): print(color.Format('[", "that don't support JS.\"\"\" if TABULATED_OUTPUT: return widgets.TabBar(*args) class MockTab:", "copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #", "and configuring an empty PostgreSQL database.\") result = 0 result", "import functors from .compiler import rule_translate from .compiler import universe", "MockTab: def __init__(self): pass def __enter__(self): pass def __exit__(self, *x):", "sub_bars[idx].output_to(1): if run_query: print( color.Format( 'The following table is stored", "-u postgres psql -c \"CREATE USER logica WITH SUPERUSER\" !sudo", "e: e.ShowMessage() return try: program = universe.LogicaProgram(parsed_rules) except functors.FunctorError as", "0: print(\"\"\"Installation failed. Please try the following manually: # Install", "import rule_translate from .compiler import universe import IPython from IPython.core.magic", "sub_bars.append(sub_bar) with sub_bar.output_to(0): if SHOW_FULL_QUERY: print( color.Format( 'The following query", "result_map[predicate] ip.push({predicate: t}) with bar.output_to(idx): with sub_bars[idx].output_to(1): if run_query: print(", "engine, self.connection, is_final) def ShowError(error_text): print(color.Format('[ {error}Error{end} ] ' +", "\\n' 'It is the easiest to run BigQuery requests from", "def SetProject(project): global PROJECT PROJECT = project def SetDbConnection(connection): global", "the License for the specific language governing permissions and #", "'_sql'))) print(sql) else: print('Query is stored at %s variable.' %", "Install Logica. !pip install logica # Install postgresql server. !sudo", "cell, run_query=True) def ParseList(line): line = line.strip() if not line:", "-y -qq install postgresql !sudo service postgresql start # Prepare", "Apache License, Version 2.0 (the \"License\"); # you may not", "-U postgres -c 'CREATE DATABASE logica;' # Connect to the", "2020 Google LLC # # Licensed under the Apache License,", "SHOW_FULL_QUERY = True PREAMBLE = None def SetPreamble(preamble): global PREAMBLE", "either express or implied. # See the License for the", "result += os.system( 'sudo -u postgres psql -U postgres -c", "don't support JS.\"\"\" if TABULATED_OUTPUT: return widgets.TabBar(*args) class MockTab: def", "+= os.system('sudo service postgresql start') # Ignoring user creation error,", "global PROJECT PROJECT = project def SetDbConnection(connection): global DB_CONNECTION DB_CONNECTION", "import sqlite3_logica BQ_READY = True # By default. try: from", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "Prepare database for Logica. !sudo -u postgres psql -c \"CREATE", "is_final: return pandas.read_sql(statements[-1], connection) else: pass return None else: raise", "+ '_sql'))) print(sql) else: print('Query is stored at %s variable.'", "{warning}%s{end} ' 'variable.' % ( predicate + '_sql'))) print(sql) else:", "import IPython from IPython.core.magic import register_cell_magic from IPython.display import display", "PROJECT PROJECT = project def SetDbConnection(connection): global DB_CONNECTION DB_CONNECTION =", "% predicate)) display(t) else: print('The query was not run.') print('", "WITH SUPERUSER\"') result += os.system( 'sudo -u postgres psql -c", "query is stored at {warning}%s{end} ' 'variable.' % ( predicate", "USER logica WITH SUPERUSER\" !sudo -u postgres psql -c \"ALTER", "psql -c \"CREATE USER logica WITH SUPERUSER\" !sudo -u postgres", "engine = create_engine('postgresql+psycopg2://logica:logica@127.0.0.1', pool_recycle=3600); connection = engine.connect(); colab_logica.SetDbConnection(connection)\"\"\") return print('Installation", "def __exit__(self, *x): pass class MockTabBar: def __init__(self): pass def", "not be accepting an engine. def __call__(self, sql, engine, is_final):", "= [] sub_bars = [] ip = IPython.get_ipython() for idx,", "query was not run.') print(' ') # To activate the", "if DB_CONNECTION: self.engine = DB_ENGINE self.connection = DB_CONNECTION else: (self.engine,", "if s) parsed_rules = parse.ParseFile(program)['rule'] except parse.ParsingException as e: e.ShowMessage()", "connection = engine.connect(); colab_logica.SetDbConnection(connection)\"\"\") return print('Installation succeeded. Connecting...') # Connect", "print(sql) else: print('Query is stored at %s variable.' % color.Warn(predicate", "postgres -c 'CREATE DATABASE logica;' # Connect to the database.", "def __init__(self): global DB_CONNECTION global DB_ENGINE if DB_CONNECTION: self.engine =", "result != 0: print(\"\"\"Installation failed. Please try the following manually:", "a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #", "storing results.\"\"\" predicates = ParseList(line) if not predicates: ShowError('No predicates", "compiling %s.' % predicate) e.ShowMessage() return # Publish output to", ".compiler import rule_translate from .compiler import universe import IPython from", "def EnsureAuthenticatedUser(): global USER_AUTHENTICATED global PROJECT if USER_AUTHENTICATED: return auth.authenticate_user()", "if not line: predicates = [] else: predicates = [p.strip()", "'CREATE DATABASE logica;' # Connect to the database. from logica", "only supports BigQuery, PostgreSQL and SQLite ' 'for now.') class", "pass class MockTabBar: def __init__(self): pass def output_to(self, x): return", "from .compiler import universe import IPython from IPython.core.magic import register_cell_magic", "= None USER_AUTHENTICATED = False TABULATED_OUTPUT = True SHOW_FULL_QUERY =", "self.connection = DB_CONNECTION else: (self.engine, self.connection) = PostgresJumpStart() DB_ENGINE =", "is stored at {warning}%s{end} ' 'variable.' % ( predicate +", "print('Encountered error when compiling %s.' % predicate) e.ShowMessage() return #", "widgets.TabBar(*args) class MockTab: def __init__(self): pass def __enter__(self): pass def", "installed. \\n' 'It is the easiest to run BigQuery requests", "result += os.system('sudo apt-get -y -qq install postgresql') result +=", "= engine.connect(); colab_logica.SetDbConnection(connection)\"\"\") return print('Installation succeeded. Connecting...') # Connect to", "universe import IPython from IPython.core.magic import register_cell_magic from IPython.display import", "in enumerate(predicates): t = result_map[predicate] ip.push({predicate: t}) with bar.output_to(idx): with", "'The following query is stored at {warning}%s{end} ' 'variable.' %", "if engine == 'bigquery': client = bigquery.Client(project=PROJECT) return client.query(sql).to_dataframe() elif", "queries.\") PROJECT = input() print(\"project_id is set to %s\" %", ".parser_py import parse from .common import sqlite3_logica BQ_READY = True", "\"License\"); # you may not use this file except in", "SetPreamble(preamble): global PREAMBLE PREAMBLE = preamble def SetProject(project): global PROJECT", "error when compiling %s.' % predicate) e.ShowMessage() return # Publish", "concertina_lib from .compiler import functors from .compiler import rule_translate from", "try: from google.colab import auth except: BQ_READY = False print('Could", "except: WIDGETS_IMPORTED = False print('Could not import google.colab.widgets.') PROJECT =", "== 'bigquery' and not BQ_READY: ShowError( 'BigQuery client and/or authentification", "bar.output_to(logs_idx): if engine == 'sqlite': sql_runner = SqliteRunner() elif engine", "= ';\\n'.join(s for s in [PREAMBLE, cell] if s) parsed_rules", "distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR", "= True except: WIDGETS_IMPORTED = False print('Could not import google.colab.widgets.')", "= False print('Could not import google.colab.widgets.') PROJECT = None #", "import bigquery except: BQ_READY = False print('Could not import google.cloud.bigquery.')", "return widgets.TabBar(*args) class MockTab: def __init__(self): pass def __enter__(self): pass", "run_query: print( color.Format( 'The following table is stored at {warning}%s{end}", "-c \"CREATE USER logica WITH SUPERUSER\" !sudo -u postgres psql", "logica WITH SUPERUSER\" !sudo -u postgres psql -c \"ALTER USER", "sub_bar.output_to(0): if SHOW_FULL_QUERY: print( color.Format( 'The following query is stored", "PostgresRunner() elif engine == 'bigquery': EnsureAuthenticatedUser() sql_runner = RunSQL else:", "self.connection def __call__(self, sql, engine, is_final): return RunSQL(sql, engine, self.connection,", "and # limitations under the License. \"\"\"Library for using Logica", "# distributed under the License is distributed on an \"AS", "DB_CONNECTION DB_CONNECTION = connection def EnsureAuthenticatedUser(): global USER_AUTHENTICATED global PROJECT", "idx, predicate in enumerate(predicates): with bar.output_to(logs_idx): try: sql = program.FormattedPredicateSql(predicate)", "installation.\\n' 'This could be a good fit for working with", "'_sql': sql}) except rule_translate.RuleCompileException as e: print('Encountered error when compiling", "be accepting an engine. def __call__(self, sql, engine, is_final): return", "# Unless required by applicable law or agreed to in", "from .compiler import rule_translate from .compiler import universe import IPython", "from IPython.display import display import os import pandas from .parser_py", "* os.system( 'sudo -u postgres psql -c \"CREATE USER logica", "postgresql server. !sudo apt-get -y -qq update !sudo apt-get -y", "PROJECT = input() print(\"project_id is set to %s\" % PROJECT)", "None: print(\"Please enter project_id to use for BigQuery queries.\") PROJECT", "run_query): \"\"\"Running Logica predicates and storing results.\"\"\" predicates = ParseList(line)", "try: sql = program.FormattedPredicateSql(predicate) executions.append(program.execution) ip.push({predicate + '_sql': sql}) except", "SetProject(project): global PROJECT PROJECT = project def SetDbConnection(connection): global DB_CONNECTION", "USER_AUTHENTICATED = False TABULATED_OUTPUT = True SHOW_FULL_QUERY = True PREAMBLE", "\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY", "create_engine('postgresql+psycopg2://logica:logica@127.0.0.1', pool_recycle=3600); connection = engine.connect(); colab_logica.SetDbConnection(connection)\"\"\") return print('Installation succeeded. Connecting...')", "def logica(line, cell): Logica(line, cell, run_query=True) def ParseList(line): line =", "small data or learning Logica.\\n' 'Use {warning}@Engine(\"sqlite\");{end} annotation in your", "print(\"\"\"Installation failed. Please try the following manually: # Install Logica.", "p in line.split(',')] return predicates def RunSQL(sql, engine, connection=None, is_final=False):", "a mock. Useful for UIs that don't support JS.\"\"\" if", "postgres psql -c \"CREATE USER logica WITH SUPERUSER\" !sudo -u", "You may obtain a copy of the License at #", "WITH SUPERUSER\" !sudo -u postgres psql -c \"ALTER USER logica", "from logica import colab_logica from sqlalchemy import create_engine import pandas", "program = universe.LogicaProgram(parsed_rules) except functors.FunctorError as e: e.ShowMessage() return engine", "sql = program.FormattedPredicateSql(predicate) executions.append(program.execution) ip.push({predicate + '_sql': sql}) except rule_translate.RuleCompileException", "following query is stored at {warning}%s{end} ' 'variable.' % (", "predicate in enumerate(predicates): with bar.output_to(logs_idx): try: sql = program.FormattedPredicateSql(predicate) executions.append(program.execution)", "!sudo apt-get -y -qq update !sudo apt-get -y -qq install", "class SqliteRunner(object): def __init__(self): self.connection = sqlite3_logica.SqliteConnect() # TODO: Sqlite", "engine = create_engine('postgresql+psycopg2://logica:logica@127.0.0.1', pool_recycle=3600) connection = engine.connect() print('Connected.') return engine,", "create_engine import pandas engine = create_engine('postgresql+psycopg2://logica:logica@127.0.0.1', pool_recycle=3600) connection = engine.connect()", "executions.append(program.execution) ip.push({predicate + '_sql': sql}) except rule_translate.RuleCompileException as e: print('Encountered", "% PROJECT) print(\"You can change it with logica.colab_logica.SetProject command.\") USER_AUTHENTICATED", "predicate) e.ShowMessage() return # Publish output to Colab cell. with", "SQLite ' 'for now.') result_map = concertina_lib.ExecuteLogicaProgram( executions, sql_runner=sql_runner, sql_engine=engine)", "sql, engine, is_final): return RunSQL(sql, engine, self.connection, is_final) def ShowError(error_text):", "the Apache License, Version 2.0 (the \"License\"); # you may", "TabBar(predicates + ['(Log)']) logs_idx = len(predicates) executions = [] sub_bars", "TABULATED_OUTPUT = True SHOW_FULL_QUERY = True PREAMBLE = None def", "return # Publish output to Colab cell. with bar.output_to(idx): sub_bar", "stored at {warning}%s{end} ' 'variable.' % ( predicate + '_sql')))", "succeeded. Connecting...') # Connect to the database. from logica import", "class MockTabBar: def __init__(self): pass def output_to(self, x): return MockTab()", "parsed_rules = parse.ParseFile(program)['rule'] except parse.ParsingException as e: e.ShowMessage() return try:", "';\\n'.join(s for s in [PREAMBLE, cell] if s) parsed_rules =", "working with small data or learning Logica.\\n' 'Use {warning}@Engine(\"sqlite\");{end} annotation", "'for now.') result_map = concertina_lib.ExecuteLogicaProgram( executions, sql_runner=sql_runner, sql_engine=engine) for idx,", "<gh_stars>0 #!/usr/bin/python # # Copyright 2020 Google LLC # #", "engine == 'sqlite': statements = parse.SplitRaw(sql, ';') connection.executescript(sql) if is_final:", "= tabulated_output SHOW_FULL_QUERY = TABULATED_OUTPUT if not WIDGETS_IMPORTED: SetTabulatedOutput(False) def", "-u postgres psql -c \"ALTER USER logica PASSWORD '<PASSWORD>';\" !sudo" ]
[ "import Basic as sp_Basic class Basic(sp_Basic): \"\"\"Basic class for Pyccel", "fst): \"\"\"Sets the redbaron fst.\"\"\" self._fst = fst @property def", "Pyccel AST.\"\"\" _fst = None def set_fst(self, fst): \"\"\"Sets the", "redbaron fst.\"\"\" self._fst = fst @property def fst(self): return self._fst", "sp_Basic class Basic(sp_Basic): \"\"\"Basic class for Pyccel AST.\"\"\" _fst =", "class Basic(sp_Basic): \"\"\"Basic class for Pyccel AST.\"\"\" _fst = None", "None def set_fst(self, fst): \"\"\"Sets the redbaron fst.\"\"\" self._fst =", "Basic(sp_Basic): \"\"\"Basic class for Pyccel AST.\"\"\" _fst = None def", "from sympy.core.basic import Basic as sp_Basic class Basic(sp_Basic): \"\"\"Basic class", "for Pyccel AST.\"\"\" _fst = None def set_fst(self, fst): \"\"\"Sets", "the redbaron fst.\"\"\" self._fst = fst @property def fst(self): return", "AST.\"\"\" _fst = None def set_fst(self, fst): \"\"\"Sets the redbaron", "class for Pyccel AST.\"\"\" _fst = None def set_fst(self, fst):", "= None def set_fst(self, fst): \"\"\"Sets the redbaron fst.\"\"\" self._fst", "as sp_Basic class Basic(sp_Basic): \"\"\"Basic class for Pyccel AST.\"\"\" _fst", "sympy.core.basic import Basic as sp_Basic class Basic(sp_Basic): \"\"\"Basic class for", "\"\"\"Sets the redbaron fst.\"\"\" self._fst = fst @property def fst(self):", "def set_fst(self, fst): \"\"\"Sets the redbaron fst.\"\"\" self._fst = fst", "Basic as sp_Basic class Basic(sp_Basic): \"\"\"Basic class for Pyccel AST.\"\"\"", "\"\"\"Basic class for Pyccel AST.\"\"\" _fst = None def set_fst(self,", "_fst = None def set_fst(self, fst): \"\"\"Sets the redbaron fst.\"\"\"", "set_fst(self, fst): \"\"\"Sets the redbaron fst.\"\"\" self._fst = fst @property" ]
[ "assert (x_disc[:, k].max() == len(perc)).all() for i in range(x.shape[1]): if", "0).all() assert (x_disc[:, k].max() == len(perc)).all() for i in range(x.shape[1]):", "assert (x_disc[:, k].min() == 0).all() assert (x_disc[:, k].max() == len(perc)).all()", "= disc.discretize(x) for k, v in disc.names.items(): assert len(v) <=", "disc.discretize(x) for k, v in disc.names.items(): assert len(v) <= len(perc)", "for k, v in disc.names.items(): assert len(v) <= len(perc) +", "25)), list(np.arange(10, 100, 10))] tests = list(product(categorical_features, percentiles)) n_tests =", "@pytest.fixture def cats_and_percentiles(request): cat, perc = tests[request.param] return cat, perc", "= list(disc.names.keys()) assert len(to_disc) == (x.shape[1] - len(cat)) x_disc =", "categorical_features = [[], [1, 3]] percentiles = [list(np.arange(25, 100, 25)),", "<= len(perc) + 1 assert callable(disc.lambdas[k]) assert (x_disc[:, k].min() ==", "perc = cats_and_percentiles disc = Discretizer(x, cat, feature_names, perc) to_disc", "perc @pytest.mark.parametrize('cats_and_percentiles', list(range(n_tests)), indirect=True) def test_discretizer(cats_and_percentiles): cat, perc = cats_and_percentiles", "list(disc.names.keys()) assert len(to_disc) == (x.shape[1] - len(cat)) x_disc = disc.discretize(x)", "range(n_features)] categorical_features = [[], [1, 3]] percentiles = [list(np.arange(25, 100,", "assert len(v) <= len(perc) + 1 assert callable(disc.lambdas[k]) assert (x_disc[:,", "= Discretizer(x, cat, feature_names, perc) to_disc = list(disc.names.keys()) assert len(to_disc)", "[str(_) for _ in range(n_features)] categorical_features = [[], [1, 3]]", "len(perc)).all() for i in range(x.shape[1]): if i not in to_disc:", "def test_discretizer(cats_and_percentiles): cat, perc = cats_and_percentiles disc = Discretizer(x, cat,", "= np.random.rand(10, 4) n_features = x.shape[1] feature_names = [str(_) for", "[[], [1, 3]] percentiles = [list(np.arange(25, 100, 25)), list(np.arange(10, 100,", "np.random.rand(10, 4) n_features = x.shape[1] feature_names = [str(_) for _", "product import numpy as np import pytest from alibi_detect.utils.discretizer import", "perc) to_disc = list(disc.names.keys()) assert len(to_disc) == (x.shape[1] - len(cat))", "cat, perc = tests[request.param] return cat, perc @pytest.mark.parametrize('cats_and_percentiles', list(range(n_tests)), indirect=True)", "len(v) <= len(perc) + 1 assert callable(disc.lambdas[k]) assert (x_disc[:, k].min()", "cats_and_percentiles(request): cat, perc = tests[request.param] return cat, perc @pytest.mark.parametrize('cats_and_percentiles', list(range(n_tests)),", "v in disc.names.items(): assert len(v) <= len(perc) + 1 assert", "x_disc = disc.discretize(x) for k, v in disc.names.items(): assert len(v)", "len(to_disc) == (x.shape[1] - len(cat)) x_disc = disc.discretize(x) for k,", "= [list(np.arange(25, 100, 25)), list(np.arange(10, 100, 10))] tests = list(product(categorical_features,", "len(perc) + 1 assert callable(disc.lambdas[k]) assert (x_disc[:, k].min() == 0).all()", "perc = tests[request.param] return cat, perc @pytest.mark.parametrize('cats_and_percentiles', list(range(n_tests)), indirect=True) def", "import pytest from alibi_detect.utils.discretizer import Discretizer x = np.random.rand(10, 4)", "disc.names.items(): assert len(v) <= len(perc) + 1 assert callable(disc.lambdas[k]) assert", "= cats_and_percentiles disc = Discretizer(x, cat, feature_names, perc) to_disc =", "as np import pytest from alibi_detect.utils.discretizer import Discretizer x =", "list(product(categorical_features, percentiles)) n_tests = len(tests) @pytest.fixture def cats_and_percentiles(request): cat, perc", "list(range(n_tests)), indirect=True) def test_discretizer(cats_and_percentiles): cat, perc = cats_and_percentiles disc =", "i not in to_disc: assert (x_disc[:, i] == x[:, i]).all()", "(x.shape[1] - len(cat)) x_disc = disc.discretize(x) for k, v in", "return cat, perc @pytest.mark.parametrize('cats_and_percentiles', list(range(n_tests)), indirect=True) def test_discretizer(cats_and_percentiles): cat, perc", "[list(np.arange(25, 100, 25)), list(np.arange(10, 100, 10))] tests = list(product(categorical_features, percentiles))", "numpy as np import pytest from alibi_detect.utils.discretizer import Discretizer x", "x.shape[1] feature_names = [str(_) for _ in range(n_features)] categorical_features =", "n_tests = len(tests) @pytest.fixture def cats_and_percentiles(request): cat, perc = tests[request.param]", "from alibi_detect.utils.discretizer import Discretizer x = np.random.rand(10, 4) n_features =", "feature_names, perc) to_disc = list(disc.names.keys()) assert len(to_disc) == (x.shape[1] -", "in range(x.shape[1]): if i not in to_disc: assert (x_disc[:, i]", "cat, perc = cats_and_percentiles disc = Discretizer(x, cat, feature_names, perc)", "k].min() == 0).all() assert (x_disc[:, k].max() == len(perc)).all() for i", "cat, perc @pytest.mark.parametrize('cats_and_percentiles', list(range(n_tests)), indirect=True) def test_discretizer(cats_and_percentiles): cat, perc =", "itertools import product import numpy as np import pytest from", "Discretizer(x, cat, feature_names, perc) to_disc = list(disc.names.keys()) assert len(to_disc) ==", "4) n_features = x.shape[1] feature_names = [str(_) for _ in", "Discretizer x = np.random.rand(10, 4) n_features = x.shape[1] feature_names =", "tests[request.param] return cat, perc @pytest.mark.parametrize('cats_and_percentiles', list(range(n_tests)), indirect=True) def test_discretizer(cats_and_percentiles): cat,", "disc = Discretizer(x, cat, feature_names, perc) to_disc = list(disc.names.keys()) assert", "3]] percentiles = [list(np.arange(25, 100, 25)), list(np.arange(10, 100, 10))] tests", "for _ in range(n_features)] categorical_features = [[], [1, 3]] percentiles", "@pytest.mark.parametrize('cats_and_percentiles', list(range(n_tests)), indirect=True) def test_discretizer(cats_and_percentiles): cat, perc = cats_and_percentiles disc", "in disc.names.items(): assert len(v) <= len(perc) + 1 assert callable(disc.lambdas[k])", "(x_disc[:, k].max() == len(perc)).all() for i in range(x.shape[1]): if i", "k].max() == len(perc)).all() for i in range(x.shape[1]): if i not", "pytest from alibi_detect.utils.discretizer import Discretizer x = np.random.rand(10, 4) n_features", "k, v in disc.names.items(): assert len(v) <= len(perc) + 1", "+ 1 assert callable(disc.lambdas[k]) assert (x_disc[:, k].min() == 0).all() assert", "list(np.arange(10, 100, 10))] tests = list(product(categorical_features, percentiles)) n_tests = len(tests)", "= [str(_) for _ in range(n_features)] categorical_features = [[], [1,", "len(tests) @pytest.fixture def cats_and_percentiles(request): cat, perc = tests[request.param] return cat,", "in range(n_features)] categorical_features = [[], [1, 3]] percentiles = [list(np.arange(25,", "x = np.random.rand(10, 4) n_features = x.shape[1] feature_names = [str(_)", "= tests[request.param] return cat, perc @pytest.mark.parametrize('cats_and_percentiles', list(range(n_tests)), indirect=True) def test_discretizer(cats_and_percentiles):", "== len(perc)).all() for i in range(x.shape[1]): if i not in", "if i not in to_disc: assert (x_disc[:, i] == x[:,", "i in range(x.shape[1]): if i not in to_disc: assert (x_disc[:,", "range(x.shape[1]): if i not in to_disc: assert (x_disc[:, i] ==", "percentiles)) n_tests = len(tests) @pytest.fixture def cats_and_percentiles(request): cat, perc =", "[1, 3]] percentiles = [list(np.arange(25, 100, 25)), list(np.arange(10, 100, 10))]", "assert len(to_disc) == (x.shape[1] - len(cat)) x_disc = disc.discretize(x) for", "import product import numpy as np import pytest from alibi_detect.utils.discretizer", "indirect=True) def test_discretizer(cats_and_percentiles): cat, perc = cats_and_percentiles disc = Discretizer(x,", "def cats_and_percentiles(request): cat, perc = tests[request.param] return cat, perc @pytest.mark.parametrize('cats_and_percentiles',", "= len(tests) @pytest.fixture def cats_and_percentiles(request): cat, perc = tests[request.param] return", "to_disc = list(disc.names.keys()) assert len(to_disc) == (x.shape[1] - len(cat)) x_disc", "100, 25)), list(np.arange(10, 100, 10))] tests = list(product(categorical_features, percentiles)) n_tests", "cats_and_percentiles disc = Discretizer(x, cat, feature_names, perc) to_disc = list(disc.names.keys())", "10))] tests = list(product(categorical_features, percentiles)) n_tests = len(tests) @pytest.fixture def", "1 assert callable(disc.lambdas[k]) assert (x_disc[:, k].min() == 0).all() assert (x_disc[:,", "import Discretizer x = np.random.rand(10, 4) n_features = x.shape[1] feature_names", "import numpy as np import pytest from alibi_detect.utils.discretizer import Discretizer", "= [[], [1, 3]] percentiles = [list(np.arange(25, 100, 25)), list(np.arange(10,", "= list(product(categorical_features, percentiles)) n_tests = len(tests) @pytest.fixture def cats_and_percentiles(request): cat,", "feature_names = [str(_) for _ in range(n_features)] categorical_features = [[],", "tests = list(product(categorical_features, percentiles)) n_tests = len(tests) @pytest.fixture def cats_and_percentiles(request):", "_ in range(n_features)] categorical_features = [[], [1, 3]] percentiles =", "== (x.shape[1] - len(cat)) x_disc = disc.discretize(x) for k, v", "alibi_detect.utils.discretizer import Discretizer x = np.random.rand(10, 4) n_features = x.shape[1]", "- len(cat)) x_disc = disc.discretize(x) for k, v in disc.names.items():", "test_discretizer(cats_and_percentiles): cat, perc = cats_and_percentiles disc = Discretizer(x, cat, feature_names,", "percentiles = [list(np.arange(25, 100, 25)), list(np.arange(10, 100, 10))] tests =", "cat, feature_names, perc) to_disc = list(disc.names.keys()) assert len(to_disc) == (x.shape[1]", "np import pytest from alibi_detect.utils.discretizer import Discretizer x = np.random.rand(10,", "(x_disc[:, k].min() == 0).all() assert (x_disc[:, k].max() == len(perc)).all() for", "= x.shape[1] feature_names = [str(_) for _ in range(n_features)] categorical_features", "100, 10))] tests = list(product(categorical_features, percentiles)) n_tests = len(tests) @pytest.fixture", "n_features = x.shape[1] feature_names = [str(_) for _ in range(n_features)]", "from itertools import product import numpy as np import pytest", "for i in range(x.shape[1]): if i not in to_disc: assert", "callable(disc.lambdas[k]) assert (x_disc[:, k].min() == 0).all() assert (x_disc[:, k].max() ==", "== 0).all() assert (x_disc[:, k].max() == len(perc)).all() for i in", "assert callable(disc.lambdas[k]) assert (x_disc[:, k].min() == 0).all() assert (x_disc[:, k].max()", "len(cat)) x_disc = disc.discretize(x) for k, v in disc.names.items(): assert" ]
[ "2 self.assertEqual(ps.get_common_id([dim1, dim2]), \"0.2_2\") dim1.value = 0.3 dim2.value = 2", "p2.value) p1.value = 3 p2.value = -0.1 result = ps.run_process(func)", "= [\"0.1_1\", \"0.2_1\", \"0.3_1\", \"0.1_2\", \"0.2_2\", \"0.3_2\"] ps = ParameterSpace(\"ps\")", "dim2.value = 1 self.assertEqual(ps.get_common_id([dim1, dim2]), \"0.2_1\") dim1.value = 0.1 dim2.value", "= [0.1,0.2,0.3,0.4, 0.5] ps = ParameterSpace(\"ps\") ps.register_parameters([dim1, dim2, dim3]) ps.set_current_path_template(\"file_%%dim1%%_%%dim2:INDEX%%\")", "complete tests see C++ tests for parameter space def test_common_id(self):", "= 0.1 dim2.value = 2 self.assertEqual(ps.get_common_id([dim1, dim2]), \"0.1_2\") dim1.value =", "dim3]) ps.set_current_path_template(\"file_%%dim1%%_%%dim2:INDEX%%\") dim1.value=0.2 dim2.value=0.2 self.assertEqual(ps.get_current_relative_path(), 'file_0.2_1') # TODO ML complete", "0.3 dim2.value = 2 self.assertEqual(ps.get_common_id([dim1, dim2]), \"0.3_2\") if __name__ ==", "ps.set_current_path_template(\"file_%%dim1%%_%%dim2:INDEX%%\") dim1.value=0.2 dim2.value=0.2 self.assertEqual(ps.get_current_relative_path(), 'file_0.2_1') # TODO ML complete tests", "= [-0.3,-0.2, -0.1, 0] ps = ParameterSpace(\"ps\") ps.register_parameters([p1, p2]) ps.enable_cache(\"ps_test\")", "1 self.assertEqual(ps.get_common_id([dim1, dim2]), \"0.2_1\") dim1.value = 0.1 dim2.value = 2", "self.assertEqual(ps.get_common_id([dim1, dim2]), \"0.2_1\") dim1.value = 0.1 dim2.value = 2 self.assertEqual(ps.get_common_id([dim1,", "# -*- coding: utf-8 -*- \"\"\" Created on Mon Jun", "ps.enable_cache(\"ps_test\") def func(param1, param2): return param1 * param2 ps.sweep(func) def", "def test_parameter(self): p1 = Parameter(\"param1\") p2 = Parameter(\"param2\") ps =", "return param1 * param2 ps.sweep(func) def test_data_directories(self): dim1 = Parameter(\"dim1\")", "= [1,1,1,2,2,2] dim2.ids = [\"0.1_1\", \"0.2_1\", \"0.3_1\", \"0.1_2\", \"0.2_2\", \"0.3_2\"]", "[1,1,1,2,2,2] dim2.ids = [\"0.1_1\", \"0.2_1\", \"0.3_1\", \"0.1_2\", \"0.2_2\", \"0.3_2\"] ps", "p1.value * p2.value) p1.value = 3 p2.value = -0.1 result", "dim2]) dim1.value = 0.1 dim2.value = 1 self.assertEqual(ps.get_common_id([dim1, dim2]), \"0.1_1\")", "0.1 dim2.value = 1 self.assertEqual(ps.get_common_id([dim1, dim2]), \"0.1_1\") dim1.value = 0.2", "* param2 result = ps.run_process(func) self.assertAlmostEqual(result, p1.value * p2.value) p1.value", "ParameterSpace(\"ps\") ps.register_parameters([p1, p2]) def test_process(self): p1 = Parameter(\"param1\") p1.values =", "= Parameter(\"param2\") ps = ParameterSpace(\"ps\") ps.register_parameters([p1, p2]) def test_process(self): p1", "\"0.3_1\" ,\"0.3_2\"] dim2 = Parameter(\"dim2\") dim2.set_space_representation_type(parameter_space_representation_types.INDEX) dim2.values = [1,1,1,2,2,2] dim2.ids", "C++ tests for parameter space def test_common_id(self): dim1 = Parameter(\"dim1\")", "= 1 self.assertEqual(ps.get_common_id([dim1, dim2]), \"0.2_1\") dim1.value = 0.1 dim2.value =", "test_sweep_cache(self): p1 = Parameter(\"param1\") p1.values = [0, 1,2,3,4] p2 =", "= [0.1,0.2,0.3,0.4, 0.5] dim2 = Parameter(\"dim2\") dim2.set_space_representation_type(parameter_space_representation_types.INDEX) dim2.values = [0.1,0.2,0.3,0.4,", "* class ParameterSpaceTest(unittest.TestCase): def test_parameter(self): p1 = Parameter(\"param1\") p2 =", "dim1.ids = [\"0.1_1\" ,\"0.1_2\",\"0.2_1\" ,\"0.2_2\", \"0.3_1\" ,\"0.3_2\"] dim2 = Parameter(\"dim2\")", "[-0.3,-0.2, -0.1, 0] ps = ParameterSpace(\"ps\") ps.register_parameters([p1, p2]) ps.enable_cache(\"ps_test\") def", "test_process(self): p1 = Parameter(\"param1\") p1.values = [0, 1,2,3,4] p2 =", "\"0.3_1\", \"0.1_2\", \"0.2_2\", \"0.3_2\"] ps = ParameterSpace(\"ps\") ps.register_parameters([dim1, dim2]) dim1.value", "ps.register_parameters([p1, p2]) def test_process(self): p1 = Parameter(\"param1\") p1.values = [0,", "dim2.set_space_representation_type(parameter_space_representation_types.INDEX) dim2.values = [0.1,0.2,0.3,0.4, 0.5] dim3 = Parameter(\"dim3\") dim3.set_space_representation_type(parameter_space_representation_types.ID) dim2.values", "= Parameter(\"dim1\") dim1.values = [0.1,0.2,0.3,0.4, 0.5] dim2 = Parameter(\"dim2\") dim2.set_space_representation_type(parameter_space_representation_types.INDEX)", "[0.1,0.2,0.3,0.4, 0.5] dim3 = Parameter(\"dim3\") dim3.set_space_representation_type(parameter_space_representation_types.ID) dim2.values = [0.1,0.2,0.3,0.4, 0.5]", "dim1.value = 0.1 dim2.value = 1 self.assertEqual(ps.get_common_id([dim1, dim2]), \"0.1_1\") dim1.value", "self.assertAlmostEqual(result, p1.value * p2.value) p1.value = 3 p2.value = -0.1", "ps = ParameterSpace(\"ps\") ps.register_parameters([p1, p2]) def func(param1, param2): return param1", "dim2.values = [0.1,0.2,0.3,0.4, 0.5] dim3 = Parameter(\"dim3\") dim3.set_space_representation_type(parameter_space_representation_types.ID) dim2.values =", "-*- \"\"\" Created on Mon Jun 14 11:49:43 2021 @author:", "ps = ParameterSpace(\"ps\") ps.register_parameters([dim1, dim2, dim3]) ps.set_current_path_template(\"file_%%dim1%%_%%dim2:INDEX%%\") dim1.value=0.2 dim2.value=0.2 self.assertEqual(ps.get_current_relative_path(),", "from tinc import * class ParameterSpaceTest(unittest.TestCase): def test_parameter(self): p1 =", "dim2.value=0.2 self.assertEqual(ps.get_current_relative_path(), 'file_0.2_1') # TODO ML complete tests see C++", "= ParameterSpace(\"ps\") ps.register_parameters([dim1, dim2, dim3]) ps.set_current_path_template(\"file_%%dim1%%_%%dim2:INDEX%%\") dim1.value=0.2 dim2.value=0.2 self.assertEqual(ps.get_current_relative_path(), 'file_0.2_1')", "def test_data_directories(self): dim1 = Parameter(\"dim1\") dim1.values = [0.1,0.2,0.3,0.4, 0.5] dim2", "coding: utf-8 -*- \"\"\" Created on Mon Jun 14 11:49:43", "= Parameter(\"param2\") p2.values = [-0.3,-0.2, -0.1, 0] ps = ParameterSpace(\"ps\")", "p1.value * p2.value) p1.value = 3 p2.value = -0.1 def", "2021 @author: Andres \"\"\" import sys,time import unittest from tinc", "test_parameter(self): p1 = Parameter(\"param1\") p2 = Parameter(\"param2\") ps = ParameterSpace(\"ps\")", "-0.1, 0] ps = ParameterSpace(\"ps\") ps.register_parameters([p1, p2]) ps.enable_cache(\"ps_test\") def func(param1,", "Parameter(\"dim1\") dim1.values = [0.1,0.2,0.3,0.4, 0.5] dim2 = Parameter(\"dim2\") dim2.set_space_representation_type(parameter_space_representation_types.INDEX) dim2.values", "Parameter(\"dim2\") dim2.set_space_representation_type(parameter_space_representation_types.INDEX) dim2.values = [0.1,0.2,0.3,0.4, 0.5] dim3 = Parameter(\"dim3\") dim3.set_space_representation_type(parameter_space_representation_types.ID)", "def test_sweep_cache(self): p1 = Parameter(\"param1\") p1.values = [0, 1,2,3,4] p2", "dim2]), \"0.1_1\") dim1.value = 0.2 dim2.value = 1 self.assertEqual(ps.get_common_id([dim1, dim2]),", "dim1.value = 0.2 dim2.value = 1 self.assertEqual(ps.get_common_id([dim1, dim2]), \"0.2_1\") dim1.value", "= 2 self.assertEqual(ps.get_common_id([dim1, dim2]), \"0.1_2\") dim1.value = 0.2 dim2.value =", "dim3 = Parameter(\"dim3\") dim3.set_space_representation_type(parameter_space_representation_types.ID) dim2.values = [0.1,0.2,0.3,0.4, 0.5] ps =", "2 self.assertEqual(ps.get_common_id([dim1, dim2]), \"0.1_2\") dim1.value = 0.2 dim2.value = 2", "'file_0.2_1') # TODO ML complete tests see C++ tests for", "dim2.ids = [\"0.1_1\", \"0.2_1\", \"0.3_1\", \"0.1_2\", \"0.2_2\", \"0.3_2\"] ps =", "import * class ParameterSpaceTest(unittest.TestCase): def test_parameter(self): p1 = Parameter(\"param1\") p2", "ParameterSpace(\"ps\") ps.register_parameters([p1, p2]) def func(param1, param2): return param1 * param2", "[0.1, 0.1, 0.2, 0.2, 0.3, 0.3] dim1.ids = [\"0.1_1\" ,\"0.1_2\",\"0.2_1\"", "= 0.2 dim2.value = 1 self.assertEqual(ps.get_common_id([dim1, dim2]), \"0.2_1\") dim1.value =", "= ParameterSpace(\"ps\") ps.register_parameters([dim1, dim2]) dim1.value = 0.1 dim2.value = 1", "Parameter(\"param1\") p1.values = [0, 1,2,3,4] p2 = Parameter(\"param2\") p2.values =", "0.5] ps = ParameterSpace(\"ps\") ps.register_parameters([dim1, dim2, dim3]) ps.set_current_path_template(\"file_%%dim1%%_%%dim2:INDEX%%\") dim1.value=0.2 dim2.value=0.2", "0.2 dim2.value = 1 self.assertEqual(ps.get_common_id([dim1, dim2]), \"0.2_1\") dim1.value = 0.1", "\"\"\" import sys,time import unittest from tinc import * class", "utf-8 -*- \"\"\" Created on Mon Jun 14 11:49:43 2021", "14 11:49:43 2021 @author: Andres \"\"\" import sys,time import unittest", "ps = ParameterSpace(\"ps\") ps.register_parameters([p1, p2]) def test_process(self): p1 = Parameter(\"param1\")", "1,2,3,4] p2 = Parameter(\"param2\") p2.values = [-0.3,-0.2, -0.1, 0] ps", "ps.register_parameters([p1, p2]) def func(param1, param2): return param1 * param2 result", "self.assertEqual(ps.get_common_id([dim1, dim2]), \"0.1_2\") dim1.value = 0.2 dim2.value = 2 self.assertEqual(ps.get_common_id([dim1,", "p2 = Parameter(\"param2\") ps = ParameterSpace(\"ps\") ps.register_parameters([p1, p2]) def test_process(self):", "tinc import * class ParameterSpaceTest(unittest.TestCase): def test_parameter(self): p1 = Parameter(\"param1\")", "tests for parameter space def test_common_id(self): dim1 = Parameter(\"dim1\") dim1.values", "Parameter(\"dim1\") dim1.values = [0.1, 0.1, 0.2, 0.2, 0.3, 0.3] dim1.ids", "dim2.set_space_representation_type(parameter_space_representation_types.INDEX) dim2.values = [1,1,1,2,2,2] dim2.ids = [\"0.1_1\", \"0.2_1\", \"0.3_1\", \"0.1_2\",", "[-0.3,-0.2, -0.1, 0] ps = ParameterSpace(\"ps\") ps.register_parameters([p1, p2]) def func(param1,", "TODO ML complete tests see C++ tests for parameter space", "test_data_directories(self): dim1 = Parameter(\"dim1\") dim1.values = [0.1,0.2,0.3,0.4, 0.5] dim2 =", "dim2 = Parameter(\"dim2\") dim2.set_space_representation_type(parameter_space_representation_types.INDEX) dim2.values = [0.1,0.2,0.3,0.4, 0.5] dim3 =", "dim2.value = 1 self.assertEqual(ps.get_common_id([dim1, dim2]), \"0.1_1\") dim1.value = 0.2 dim2.value", "= Parameter(\"dim1\") dim1.values = [0.1, 0.1, 0.2, 0.2, 0.3, 0.3]", "\"0.2_1\", \"0.3_1\", \"0.1_2\", \"0.2_2\", \"0.3_2\"] ps = ParameterSpace(\"ps\") ps.register_parameters([dim1, dim2])", "= 0.2 dim2.value = 2 self.assertEqual(ps.get_common_id([dim1, dim2]), \"0.2_2\") dim1.value =", "= 2 self.assertEqual(ps.get_common_id([dim1, dim2]), \"0.2_2\") dim1.value = 0.3 dim2.value =", "* param2 ps.sweep(func) def test_data_directories(self): dim1 = Parameter(\"dim1\") dim1.values =", "dim1.values = [0.1,0.2,0.3,0.4, 0.5] dim2 = Parameter(\"dim2\") dim2.set_space_representation_type(parameter_space_representation_types.INDEX) dim2.values =", "= 2 self.assertEqual(ps.get_common_id([dim1, dim2]), \"0.3_2\") if __name__ == '__main__': unittest.main()", "param1 * param2 result = ps.run_process(func) self.assertAlmostEqual(result, p1.value * p2.value)", "1 self.assertEqual(ps.get_common_id([dim1, dim2]), \"0.1_1\") dim1.value = 0.2 dim2.value = 1", "param2): return param1 * param2 ps.sweep(func) def test_data_directories(self): dim1 =", "0] ps = ParameterSpace(\"ps\") ps.register_parameters([p1, p2]) ps.enable_cache(\"ps_test\") def func(param1, param2):", "Jun 14 11:49:43 2021 @author: Andres \"\"\" import sys,time import", "0.1 dim2.value = 2 self.assertEqual(ps.get_common_id([dim1, dim2]), \"0.1_2\") dim1.value = 0.2", "p1.values = [0, 1,2,3,4] p2 = Parameter(\"param2\") p2.values = [-0.3,-0.2,", "dim2.values = [0.1,0.2,0.3,0.4, 0.5] ps = ParameterSpace(\"ps\") ps.register_parameters([dim1, dim2, dim3])", "func(param1, param2): return param1 * param2 result = ps.run_process(func) self.assertAlmostEqual(result,", "3 p2.value = -0.1 result = ps.run_process(func) self.assertAlmostEqual(result, p1.value *", "= Parameter(\"dim2\") dim2.set_space_representation_type(parameter_space_representation_types.INDEX) dim2.values = [0.1,0.2,0.3,0.4, 0.5] dim3 = Parameter(\"dim3\")", ",\"0.1_2\",\"0.2_1\" ,\"0.2_2\", \"0.3_1\" ,\"0.3_2\"] dim2 = Parameter(\"dim2\") dim2.set_space_representation_type(parameter_space_representation_types.INDEX) dim2.values =", "= [0, 1,2,3,4] p2 = Parameter(\"param2\") p2.values = [-0.3,-0.2, -0.1,", "def func(param1, param2): return param1 * param2 result = ps.run_process(func)", "dim2.value = 2 self.assertEqual(ps.get_common_id([dim1, dim2]), \"0.1_2\") dim1.value = 0.2 dim2.value", "ParameterSpace(\"ps\") ps.register_parameters([dim1, dim2, dim3]) ps.set_current_path_template(\"file_%%dim1%%_%%dim2:INDEX%%\") dim1.value=0.2 dim2.value=0.2 self.assertEqual(ps.get_current_relative_path(), 'file_0.2_1') #", "p2.value) p1.value = 3 p2.value = -0.1 def test_sweep_cache(self): p1", "def test_process(self): p1 = Parameter(\"param1\") p1.values = [0, 1,2,3,4] p2", "dim2 = Parameter(\"dim2\") dim2.set_space_representation_type(parameter_space_representation_types.INDEX) dim2.values = [1,1,1,2,2,2] dim2.ids = [\"0.1_1\",", "= 3 p2.value = -0.1 def test_sweep_cache(self): p1 = Parameter(\"param1\")", "0.5] dim2 = Parameter(\"dim2\") dim2.set_space_representation_type(parameter_space_representation_types.INDEX) dim2.values = [0.1,0.2,0.3,0.4, 0.5] dim3", "= -0.1 def test_sweep_cache(self): p1 = Parameter(\"param1\") p1.values = [0,", "for parameter space def test_common_id(self): dim1 = Parameter(\"dim1\") dim1.values =", "param1 * param2 ps.sweep(func) def test_data_directories(self): dim1 = Parameter(\"dim1\") dim1.values", "\"0.1_2\") dim1.value = 0.2 dim2.value = 2 self.assertEqual(ps.get_common_id([dim1, dim2]), \"0.2_2\")", "\"\"\" Created on Mon Jun 14 11:49:43 2021 @author: Andres", "p2 = Parameter(\"param2\") p2.values = [-0.3,-0.2, -0.1, 0] ps =", "= ParameterSpace(\"ps\") ps.register_parameters([p1, p2]) def func(param1, param2): return param1 *", "\"0.3_2\"] ps = ParameterSpace(\"ps\") ps.register_parameters([dim1, dim2]) dim1.value = 0.1 dim2.value", "0.2, 0.2, 0.3, 0.3] dim1.ids = [\"0.1_1\" ,\"0.1_2\",\"0.2_1\" ,\"0.2_2\", \"0.3_1\"", "\"0.2_2\") dim1.value = 0.3 dim2.value = 2 self.assertEqual(ps.get_common_id([dim1, dim2]), \"0.3_2\")", "Created on Mon Jun 14 11:49:43 2021 @author: Andres \"\"\"", "return param1 * param2 result = ps.run_process(func) self.assertAlmostEqual(result, p1.value *", "ps = ParameterSpace(\"ps\") ps.register_parameters([dim1, dim2]) dim1.value = 0.1 dim2.value =", "= ParameterSpace(\"ps\") ps.register_parameters([p1, p2]) def test_process(self): p1 = Parameter(\"param1\") p1.values", "p1.value = 3 p2.value = -0.1 def test_sweep_cache(self): p1 =", "= Parameter(\"dim3\") dim3.set_space_representation_type(parameter_space_representation_types.ID) dim2.values = [0.1,0.2,0.3,0.4, 0.5] ps = ParameterSpace(\"ps\")", "p2]) def func(param1, param2): return param1 * param2 result =", "test_common_id(self): dim1 = Parameter(\"dim1\") dim1.values = [0.1, 0.1, 0.2, 0.2,", "def test_common_id(self): dim1 = Parameter(\"dim1\") dim1.values = [0.1, 0.1, 0.2,", "space def test_common_id(self): dim1 = Parameter(\"dim1\") dim1.values = [0.1, 0.1,", "Mon Jun 14 11:49:43 2021 @author: Andres \"\"\" import sys,time", "p2]) def test_process(self): p1 = Parameter(\"param1\") p1.values = [0, 1,2,3,4]", "sys,time import unittest from tinc import * class ParameterSpaceTest(unittest.TestCase): def", "param2): return param1 * param2 result = ps.run_process(func) self.assertAlmostEqual(result, p1.value", "ps = ParameterSpace(\"ps\") ps.register_parameters([p1, p2]) ps.enable_cache(\"ps_test\") def func(param1, param2): return", "0.3, 0.3] dim1.ids = [\"0.1_1\" ,\"0.1_2\",\"0.2_1\" ,\"0.2_2\", \"0.3_1\" ,\"0.3_2\"] dim2", "self.assertEqual(ps.get_common_id([dim1, dim2]), \"0.2_2\") dim1.value = 0.3 dim2.value = 2 self.assertEqual(ps.get_common_id([dim1,", "p1 = Parameter(\"param1\") p2 = Parameter(\"param2\") ps = ParameterSpace(\"ps\") ps.register_parameters([p1,", "p2.values = [-0.3,-0.2, -0.1, 0] ps = ParameterSpace(\"ps\") ps.register_parameters([p1, p2])", "# TODO ML complete tests see C++ tests for parameter", "= Parameter(\"dim2\") dim2.set_space_representation_type(parameter_space_representation_types.INDEX) dim2.values = [1,1,1,2,2,2] dim2.ids = [\"0.1_1\", \"0.2_1\",", "ps.register_parameters([dim1, dim2]) dim1.value = 0.1 dim2.value = 1 self.assertEqual(ps.get_common_id([dim1, dim2]),", "p2.value = -0.1 result = ps.run_process(func) self.assertAlmostEqual(result, p1.value * p2.value)", "dim1.value=0.2 dim2.value=0.2 self.assertEqual(ps.get_current_relative_path(), 'file_0.2_1') # TODO ML complete tests see", "see C++ tests for parameter space def test_common_id(self): dim1 =", "dim2.value = 2 self.assertEqual(ps.get_common_id([dim1, dim2]), \"0.3_2\") if __name__ == '__main__':", "* p2.value) p1.value = 3 p2.value = -0.1 def test_sweep_cache(self):", "parameter space def test_common_id(self): dim1 = Parameter(\"dim1\") dim1.values = [0.1,", "= Parameter(\"param1\") p1.values = [0, 1,2,3,4] p2 = Parameter(\"param2\") p2.values", "= Parameter(\"param1\") p2 = Parameter(\"param2\") ps = ParameterSpace(\"ps\") ps.register_parameters([p1, p2])", "= 1 self.assertEqual(ps.get_common_id([dim1, dim2]), \"0.1_1\") dim1.value = 0.2 dim2.value =", "dim1.value = 0.2 dim2.value = 2 self.assertEqual(ps.get_common_id([dim1, dim2]), \"0.2_2\") dim1.value", "param2 result = ps.run_process(func) self.assertAlmostEqual(result, p1.value * p2.value) p1.value =", "dim2, dim3]) ps.set_current_path_template(\"file_%%dim1%%_%%dim2:INDEX%%\") dim1.value=0.2 dim2.value=0.2 self.assertEqual(ps.get_current_relative_path(), 'file_0.2_1') # TODO ML", "Parameter(\"param2\") ps = ParameterSpace(\"ps\") ps.register_parameters([p1, p2]) def test_process(self): p1 =", "= -0.1 result = ps.run_process(func) self.assertAlmostEqual(result, p1.value * p2.value) p1.value", "dim1.value = 0.3 dim2.value = 2 self.assertEqual(ps.get_common_id([dim1, dim2]), \"0.3_2\") if", "= [-0.3,-0.2, -0.1, 0] ps = ParameterSpace(\"ps\") ps.register_parameters([p1, p2]) def", "dim1 = Parameter(\"dim1\") dim1.values = [0.1,0.2,0.3,0.4, 0.5] dim2 = Parameter(\"dim2\")", "\"0.1_1\") dim1.value = 0.2 dim2.value = 1 self.assertEqual(ps.get_common_id([dim1, dim2]), \"0.2_1\")", "dim1.values = [0.1, 0.1, 0.2, 0.2, 0.3, 0.3] dim1.ids =", "self.assertEqual(ps.get_common_id([dim1, dim2]), \"0.1_1\") dim1.value = 0.2 dim2.value = 1 self.assertEqual(ps.get_common_id([dim1,", "param2 ps.sweep(func) def test_data_directories(self): dim1 = Parameter(\"dim1\") dim1.values = [0.1,0.2,0.3,0.4,", "Parameter(\"param2\") p2.values = [-0.3,-0.2, -0.1, 0] ps = ParameterSpace(\"ps\") ps.register_parameters([p1,", "dim2.value = 2 self.assertEqual(ps.get_common_id([dim1, dim2]), \"0.2_2\") dim1.value = 0.3 dim2.value", "ParameterSpace(\"ps\") ps.register_parameters([dim1, dim2]) dim1.value = 0.1 dim2.value = 1 self.assertEqual(ps.get_common_id([dim1,", "on Mon Jun 14 11:49:43 2021 @author: Andres \"\"\" import", "self.assertEqual(ps.get_current_relative_path(), 'file_0.2_1') # TODO ML complete tests see C++ tests", "dim1 = Parameter(\"dim1\") dim1.values = [0.1, 0.1, 0.2, 0.2, 0.3,", ",\"0.3_2\"] dim2 = Parameter(\"dim2\") dim2.set_space_representation_type(parameter_space_representation_types.INDEX) dim2.values = [1,1,1,2,2,2] dim2.ids =", "ps.register_parameters([dim1, dim2, dim3]) ps.set_current_path_template(\"file_%%dim1%%_%%dim2:INDEX%%\") dim1.value=0.2 dim2.value=0.2 self.assertEqual(ps.get_current_relative_path(), 'file_0.2_1') # TODO", "3 p2.value = -0.1 def test_sweep_cache(self): p1 = Parameter(\"param1\") p1.values", "result = ps.run_process(func) self.assertAlmostEqual(result, p1.value * p2.value) p1.value = 3", "\"0.2_2\", \"0.3_2\"] ps = ParameterSpace(\"ps\") ps.register_parameters([dim1, dim2]) dim1.value = 0.1", "dim2]), \"0.2_2\") dim1.value = 0.3 dim2.value = 2 self.assertEqual(ps.get_common_id([dim1, dim2]),", "Parameter(\"param1\") p2 = Parameter(\"param2\") ps = ParameterSpace(\"ps\") ps.register_parameters([p1, p2]) def", "@author: Andres \"\"\" import sys,time import unittest from tinc import", "[0, 1,2,3,4] p2 = Parameter(\"param2\") p2.values = [-0.3,-0.2, -0.1, 0]", "-0.1, 0] ps = ParameterSpace(\"ps\") ps.register_parameters([p1, p2]) def func(param1, param2):", "[0.1,0.2,0.3,0.4, 0.5] dim2 = Parameter(\"dim2\") dim2.set_space_representation_type(parameter_space_representation_types.INDEX) dim2.values = [0.1,0.2,0.3,0.4, 0.5]", "p1 = Parameter(\"param1\") p1.values = [0, 1,2,3,4] p2 = Parameter(\"param2\")", "ML complete tests see C++ tests for parameter space def", "0.2, 0.3, 0.3] dim1.ids = [\"0.1_1\" ,\"0.1_2\",\"0.2_1\" ,\"0.2_2\", \"0.3_1\" ,\"0.3_2\"]", "= [\"0.1_1\" ,\"0.1_2\",\"0.2_1\" ,\"0.2_2\", \"0.3_1\" ,\"0.3_2\"] dim2 = Parameter(\"dim2\") dim2.set_space_representation_type(parameter_space_representation_types.INDEX)", "dim2.values = [1,1,1,2,2,2] dim2.ids = [\"0.1_1\", \"0.2_1\", \"0.3_1\", \"0.1_2\", \"0.2_2\",", "\"0.1_2\", \"0.2_2\", \"0.3_2\"] ps = ParameterSpace(\"ps\") ps.register_parameters([dim1, dim2]) dim1.value =", "= 3 p2.value = -0.1 result = ps.run_process(func) self.assertAlmostEqual(result, p1.value", "func(param1, param2): return param1 * param2 ps.sweep(func) def test_data_directories(self): dim1", "import sys,time import unittest from tinc import * class ParameterSpaceTest(unittest.TestCase):", "= [0.1,0.2,0.3,0.4, 0.5] dim3 = Parameter(\"dim3\") dim3.set_space_representation_type(parameter_space_representation_types.ID) dim2.values = [0.1,0.2,0.3,0.4,", "dim2]), \"0.2_1\") dim1.value = 0.1 dim2.value = 2 self.assertEqual(ps.get_common_id([dim1, dim2]),", "= [0.1, 0.1, 0.2, 0.2, 0.3, 0.3] dim1.ids = [\"0.1_1\"", "\"0.2_1\") dim1.value = 0.1 dim2.value = 2 self.assertEqual(ps.get_common_id([dim1, dim2]), \"0.1_2\")", "p2]) ps.enable_cache(\"ps_test\") def func(param1, param2): return param1 * param2 ps.sweep(func)", "ps.sweep(func) def test_data_directories(self): dim1 = Parameter(\"dim1\") dim1.values = [0.1,0.2,0.3,0.4, 0.5]", "p1.value = 3 p2.value = -0.1 result = ps.run_process(func) self.assertAlmostEqual(result,", "0.1, 0.2, 0.2, 0.3, 0.3] dim1.ids = [\"0.1_1\" ,\"0.1_2\",\"0.2_1\" ,\"0.2_2\",", "import unittest from tinc import * class ParameterSpaceTest(unittest.TestCase): def test_parameter(self):", ",\"0.2_2\", \"0.3_1\" ,\"0.3_2\"] dim2 = Parameter(\"dim2\") dim2.set_space_representation_type(parameter_space_representation_types.INDEX) dim2.values = [1,1,1,2,2,2]", "dim2]), \"0.1_2\") dim1.value = 0.2 dim2.value = 2 self.assertEqual(ps.get_common_id([dim1, dim2]),", "ParameterSpace(\"ps\") ps.register_parameters([p1, p2]) ps.enable_cache(\"ps_test\") def func(param1, param2): return param1 *", "unittest from tinc import * class ParameterSpaceTest(unittest.TestCase): def test_parameter(self): p1", "= ParameterSpace(\"ps\") ps.register_parameters([p1, p2]) ps.enable_cache(\"ps_test\") def func(param1, param2): return param1", "Andres \"\"\" import sys,time import unittest from tinc import *", "Parameter(\"dim2\") dim2.set_space_representation_type(parameter_space_representation_types.INDEX) dim2.values = [1,1,1,2,2,2] dim2.ids = [\"0.1_1\", \"0.2_1\", \"0.3_1\",", "0.2 dim2.value = 2 self.assertEqual(ps.get_common_id([dim1, dim2]), \"0.2_2\") dim1.value = 0.3", "= 0.3 dim2.value = 2 self.assertEqual(ps.get_common_id([dim1, dim2]), \"0.3_2\") if __name__", "0] ps = ParameterSpace(\"ps\") ps.register_parameters([p1, p2]) def func(param1, param2): return", "class ParameterSpaceTest(unittest.TestCase): def test_parameter(self): p1 = Parameter(\"param1\") p2 = Parameter(\"param2\")", "0.3] dim1.ids = [\"0.1_1\" ,\"0.1_2\",\"0.2_1\" ,\"0.2_2\", \"0.3_1\" ,\"0.3_2\"] dim2 =", "[0.1,0.2,0.3,0.4, 0.5] ps = ParameterSpace(\"ps\") ps.register_parameters([dim1, dim2, dim3]) ps.set_current_path_template(\"file_%%dim1%%_%%dim2:INDEX%%\") dim1.value=0.2", "0.5] dim3 = Parameter(\"dim3\") dim3.set_space_representation_type(parameter_space_representation_types.ID) dim2.values = [0.1,0.2,0.3,0.4, 0.5] ps", "[\"0.1_1\" ,\"0.1_2\",\"0.2_1\" ,\"0.2_2\", \"0.3_1\" ,\"0.3_2\"] dim2 = Parameter(\"dim2\") dim2.set_space_representation_type(parameter_space_representation_types.INDEX) dim2.values", "dim1.value = 0.1 dim2.value = 2 self.assertEqual(ps.get_common_id([dim1, dim2]), \"0.1_2\") dim1.value", "ParameterSpaceTest(unittest.TestCase): def test_parameter(self): p1 = Parameter(\"param1\") p2 = Parameter(\"param2\") ps", "def func(param1, param2): return param1 * param2 ps.sweep(func) def test_data_directories(self):", "tests see C++ tests for parameter space def test_common_id(self): dim1", "Parameter(\"dim3\") dim3.set_space_representation_type(parameter_space_representation_types.ID) dim2.values = [0.1,0.2,0.3,0.4, 0.5] ps = ParameterSpace(\"ps\") ps.register_parameters([dim1,", "= 0.1 dim2.value = 1 self.assertEqual(ps.get_common_id([dim1, dim2]), \"0.1_1\") dim1.value =", "p2.value = -0.1 def test_sweep_cache(self): p1 = Parameter(\"param1\") p1.values =", "11:49:43 2021 @author: Andres \"\"\" import sys,time import unittest from", "dim3.set_space_representation_type(parameter_space_representation_types.ID) dim2.values = [0.1,0.2,0.3,0.4, 0.5] ps = ParameterSpace(\"ps\") ps.register_parameters([dim1, dim2,", "-0.1 def test_sweep_cache(self): p1 = Parameter(\"param1\") p1.values = [0, 1,2,3,4]", "-0.1 result = ps.run_process(func) self.assertAlmostEqual(result, p1.value * p2.value) p1.value =", "ps.register_parameters([p1, p2]) ps.enable_cache(\"ps_test\") def func(param1, param2): return param1 * param2", "-*- coding: utf-8 -*- \"\"\" Created on Mon Jun 14", "* p2.value) p1.value = 3 p2.value = -0.1 result =", "= ps.run_process(func) self.assertAlmostEqual(result, p1.value * p2.value) p1.value = 3 p2.value", "[\"0.1_1\", \"0.2_1\", \"0.3_1\", \"0.1_2\", \"0.2_2\", \"0.3_2\"] ps = ParameterSpace(\"ps\") ps.register_parameters([dim1,", "ps.run_process(func) self.assertAlmostEqual(result, p1.value * p2.value) p1.value = 3 p2.value =" ]
[ "agent, ENV_NAME): running_reward_array = [] for episode in range(episodes): reward", "== 'cart': init_env = gym.make('CartPole-v1') dim_in = init_env.observation_space.shape[0] dim_out =", "'cart': init_env = gym.make('CartPole-v1') dim_in = init_env.observation_space.shape[0] dim_out = init_env.action_space.n", "env = gym.make('LunarLander-v2') elif ENV_NAME == 'cart': env = gym.make('CartPole-v1')", "== 'lunar': init_env = gym.make('LunarLander-v2') dim_in = init_env.observation_space.shape[0] dim_out =", "def run_episode(q, agent_in, ENV_NAME, seed=0): agent = agent_in.duplicate() if ENV_NAME", "\"__main__\": parser = argparse.ArgumentParser() parser.add_argument(\"-a\", \"--agent_type\", help=\"architecture of agent to", "default='ddt') parser.add_argument(\"-e\", \"--episodes\", help=\"how many episodes\", type=int, default=2000) parser.add_argument(\"-l\", \"--num_leaves\",", "raise Exception('No valid environment selected') done = False torch.manual_seed(seed) env.seed(seed)", "done = False torch.manual_seed(seed) env.seed(seed) np.random.seed(seed) env.action_space.seed(seed) random.seed(seed) state =", "gym.make('CartPole-v1') else: raise Exception('No valid environment selected') done = False", "hidden layers for MLP \", type=int, default=0) parser.add_argument(\"-env\", \"--env_type\", help=\"environment", "if episode % 500 == 0: agent.save('../models/'+str(episode)+'th') return running_reward_array if", "agent = agent_in.duplicate() if ENV_NAME == 'lunar': env = gym.make('LunarLander-v2')", "if __name__ == \"__main__\": parser = argparse.ArgumentParser() parser.add_argument(\"-a\", \"--agent_type\", help=\"architecture", "to_return return to_return def main(episodes, agent, ENV_NAME): running_reward_array = []", "ENV_NAME, seed=0): agent = agent_in.duplicate() if ENV_NAME == 'lunar': env", "USE_GPU = args.gpu # Applies for 'prolo' only. use gpu?", "Step through environment using chosen action state, reward, done, _", "torch.manual_seed(seed) env.seed(seed) np.random.seed(seed) env.action_space.seed(seed) random.seed(seed) state = env.reset() # Reset", "== 0: print(f'Episode {episode} Last Reward: {reward} Average Reward: {running_reward}')", "agent.replay_buffer.value_list, agent.replay_buffer.deeper_value_list) agent.replay_buffer.rewards_list = rewards_list agent.replay_buffer.advantage_list = advantage_list agent.replay_buffer.deeper_advantage_list =", "= gym.make('CartPole-v1') else: raise Exception('No valid environment selected') done =", "= rewards_list agent.replay_buffer.advantage_list = advantage_list agent.replay_buffer.deeper_advantage_list = deeper_advantage_list to_return =", "help=\"architecture of agent to run\", type=str, default='ddt') parser.add_argument(\"-e\", \"--episodes\", help=\"how", "0 returned_object = run_episode(None, agent_in=agent, ENV_NAME=ENV_NAME) reward += returned_object[0] running_reward_array.append(returned_object[0])", "range(5): bot_name = AGENT_TYPE + ENV_TYPE if USE_GPU: bot_name +=", "RuntimeError as e: print(e) return to_return return to_return def main(episodes,", "+= returned_object[0] running_reward_array.append(returned_object[0]) agent.replay_buffer.extend(returned_object[1]) if reward >= 499: agent.save('../models/'+str(episode)+'th') agent.end_episode(reward)", "action state, reward, done, _ = env.step(action) # env.render() #", "= env.step(action) # env.render() # Save reward agent.save_reward(reward) if done:", "if AGENT_TYPE == 'ddt': policy_agent = DDTAgent(bot_name=bot_name, input_dim=dim_in, output_dim=dim_out, rule_list=False,", "if episode % 50 == 0: print(f'Episode {episode} Last Reward:", "help=\"run on GPU?\", action='store_true') args = parser.parse_args() AGENT_TYPE = args.agent_type", "ENV_TYPE == 'cart': init_env = gym.make('CartPole-v1') dim_in = init_env.observation_space.shape[0] dim_out", "by <NAME> on 8/28/19 import gym import numpy as np", "help=\"number of leaves for DDT/DRL \", type=int, default=8) parser.add_argument(\"-n\", \"--num_hidden\",", "8/28/19 import gym import numpy as np import torch from", "try: q.put(to_return) except RuntimeError as e: print(e) return to_return return", "output_dim=dim_out, rule_list=False, num_rules=args.num_leaves) elif AGENT_TYPE == 'mlp': policy_agent = MLPAgent(input_dim=dim_in,", "Reward: {running_reward}') if episode % 500 == 0: agent.save('../models/'+str(episode)+'th') return", "on\", type=str, default='cart') parser.add_argument(\"-gpu\", help=\"run on GPU?\", action='store_true') args =", "state, reward, done, _ = env.step(action) # env.render() # Save", "environment selected') print(f\"Agent {AGENT_TYPE} on {ENV_TYPE} \") # mp.set_start_method('spawn') mp.set_sharing_strategy('file_system')", "done, _ = env.step(action) # env.render() # Save reward agent.save_reward(reward)", "__name__ == \"__main__\": parser = argparse.ArgumentParser() parser.add_argument(\"-a\", \"--agent_type\", help=\"architecture of", "for 'prolo' only. use gpu? Default false if ENV_TYPE ==", "= np.sum(agent.replay_buffer.rewards_list) rewards_list, advantage_list, deeper_advantage_list = discount_reward(agent.replay_buffer.rewards_list, agent.replay_buffer.value_list, agent.replay_buffer.deeper_value_list) agent.replay_buffer.rewards_list", "episode % 500 == 0: agent.save('../models/'+str(episode)+'th') return running_reward_array if __name__", "_ = env.step(action) # env.render() # Save reward agent.save_reward(reward) if", "'ddt': policy_agent = DDTAgent(bot_name=bot_name, input_dim=dim_in, output_dim=dim_out, rule_list=False, num_rules=args.num_leaves) elif AGENT_TYPE", "running_reward_array = [] for episode in range(episodes): reward = 0", "reward_sum = np.sum(agent.replay_buffer.rewards_list) rewards_list, advantage_list, deeper_advantage_list = discount_reward(agent.replay_buffer.rewards_list, agent.replay_buffer.value_list, agent.replay_buffer.deeper_value_list)", "input_dim=dim_in, output_dim=dim_out, rule_list=False, num_rules=args.num_leaves) elif AGENT_TYPE == 'mlp': policy_agent =", "parser.add_argument(\"-e\", \"--episodes\", help=\"how many episodes\", type=int, default=2000) parser.add_argument(\"-l\", \"--num_leaves\", help=\"number", "init_env.action_space.n else: raise Exception('No valid environment selected') print(f\"Agent {AGENT_TYPE} on", "# 'cart' or 'lunar' Default 'cart' USE_GPU = args.gpu #", "as np import torch from interpretable_ddts.agents.ddt_agent import DDTAgent from interpretable_ddts.agents.mlp_agent", "agent.replay_buffer.rewards_list = rewards_list agent.replay_buffer.advantage_list = advantage_list agent.replay_buffer.deeper_advantage_list = deeper_advantage_list to_return", "float(min(100.0, len(running_reward_array))) if episode % 50 == 0: print(f'Episode {episode}", "Exception('No valid environment selected') done = False torch.manual_seed(seed) env.seed(seed) np.random.seed(seed)", "Created by <NAME> on 8/28/19 import gym import numpy as", "agent to run\", type=str, default='ddt') parser.add_argument(\"-e\", \"--episodes\", help=\"how many episodes\",", "returned_object[0] running_reward_array.append(returned_object[0]) agent.replay_buffer.extend(returned_object[1]) if reward >= 499: agent.save('../models/'+str(episode)+'th') agent.end_episode(reward) running_reward", "if ENV_NAME == 'lunar': env = gym.make('LunarLander-v2') elif ENV_NAME ==", "Default 'cart' USE_GPU = args.gpu # Applies for 'prolo' only.", "\"--env_type\", help=\"environment to run on\", type=str, default='cart') parser.add_argument(\"-gpu\", help=\"run on", "import argparse import copy import random def run_episode(q, agent_in, ENV_NAME,", "\"--num_hidden\", help=\"number of hidden layers for MLP \", type=int, default=0)", "leaves for DDT/DRL \", type=int, default=8) parser.add_argument(\"-n\", \"--num_hidden\", help=\"number of", "agent_in.duplicate() if ENV_NAME == 'lunar': env = gym.make('LunarLander-v2') elif ENV_NAME", "reward >= 499: agent.save('../models/'+str(episode)+'th') agent.end_episode(reward) running_reward = sum(running_reward_array[-100:]) / float(min(100.0,", "init_env.action_space.n elif ENV_TYPE == 'cart': init_env = gym.make('CartPole-v1') dim_in =", "is not None: try: q.put(to_return) except RuntimeError as e: print(e)", "gym.make('LunarLander-v2') dim_in = init_env.observation_space.shape[0] dim_out = init_env.action_space.n elif ENV_TYPE ==", "default=8) parser.add_argument(\"-n\", \"--num_hidden\", help=\"number of hidden layers for MLP \",", "on GPU?\", action='store_true') args = parser.parse_args() AGENT_TYPE = args.agent_type #", "None: try: q.put(to_return) except RuntimeError as e: print(e) return to_return", "\"--episodes\", help=\"how many episodes\", type=int, default=2000) parser.add_argument(\"-l\", \"--num_leaves\", help=\"number of", "{reward} Average Reward: {running_reward}') if episode % 500 == 0:", "default=2000) parser.add_argument(\"-l\", \"--num_leaves\", help=\"number of leaves for DDT/DRL \", type=int,", "agent.replay_buffer.advantage_list = advantage_list agent.replay_buffer.deeper_advantage_list = deeper_advantage_list to_return = [reward_sum, copy.deepcopy(agent.replay_buffer.__getstate__())]", "= agent_in.duplicate() if ENV_NAME == 'lunar': env = gym.make('LunarLander-v2') elif", "% 50 == 0: print(f'Episode {episode} Last Reward: {reward} Average", "bot_name += 'GPU' if AGENT_TYPE == 'ddt': policy_agent = DDTAgent(bot_name=bot_name,", "discount_reward(agent.replay_buffer.rewards_list, agent.replay_buffer.value_list, agent.replay_buffer.deeper_value_list) agent.replay_buffer.rewards_list = rewards_list agent.replay_buffer.advantage_list = advantage_list agent.replay_buffer.deeper_advantage_list", "# Reset environment and record the starting state while not", "default=0) parser.add_argument(\"-env\", \"--env_type\", help=\"environment to run on\", type=str, default='cart') parser.add_argument(\"-gpu\",", "num_rules=args.num_leaves) elif AGENT_TYPE == 'mlp': policy_agent = MLPAgent(input_dim=dim_in, bot_name=bot_name, output_dim=dim_out,", "AGENT_TYPE == 'ddt': policy_agent = DDTAgent(bot_name=bot_name, input_dim=dim_in, output_dim=dim_out, rule_list=False, num_rules=args.num_leaves)", "import numpy as np import torch from interpretable_ddts.agents.ddt_agent import DDTAgent", "ENV_TYPE == 'lunar': init_env = gym.make('LunarLander-v2') dim_in = init_env.observation_space.shape[0] dim_out", ">= 499: agent.save('../models/'+str(episode)+'th') agent.end_episode(reward) running_reward = sum(running_reward_array[-100:]) / float(min(100.0, len(running_reward_array)))", "on {ENV_TYPE} \") # mp.set_start_method('spawn') mp.set_sharing_strategy('file_system') for i in range(5):", "# Created by <NAME> on 8/28/19 import gym import numpy", "sum(running_reward_array[-100:]) / float(min(100.0, len(running_reward_array))) if episode % 50 == 0:", "'lunar' Default 'cart' USE_GPU = args.gpu # Applies for 'prolo'", "else: raise Exception('No valid environment selected') done = False torch.manual_seed(seed)", "= discount_reward(agent.replay_buffer.rewards_list, agent.replay_buffer.value_list, agent.replay_buffer.deeper_value_list) agent.replay_buffer.rewards_list = rewards_list agent.replay_buffer.advantage_list = advantage_list", "num_hidden=args.num_hidden) else: raise Exception('No valid network selected') reward_array = main(NUM_EPS,", "parser = argparse.ArgumentParser() parser.add_argument(\"-a\", \"--agent_type\", help=\"architecture of agent to run\",", "reward += returned_object[0] running_reward_array.append(returned_object[0]) agent.replay_buffer.extend(returned_object[1]) if reward >= 499: agent.save('../models/'+str(episode)+'th')", "mp.set_sharing_strategy('file_system') for i in range(5): bot_name = AGENT_TYPE + ENV_TYPE", "GPU?\", action='store_true') args = parser.parse_args() AGENT_TYPE = args.agent_type # 'ddt',", "of hidden layers for MLP \", type=int, default=0) parser.add_argument(\"-env\", \"--env_type\",", "Last Reward: {reward} Average Reward: {running_reward}') if episode % 500", "elif ENV_TYPE == 'cart': init_env = gym.make('CartPole-v1') dim_in = init_env.observation_space.shape[0]", "for episode in range(episodes): reward = 0 returned_object = run_episode(None,", "import gym import numpy as np import torch from interpretable_ddts.agents.ddt_agent", "\", type=int, default=0) parser.add_argument(\"-env\", \"--env_type\", help=\"environment to run on\", type=str,", "action = agent.get_action(state) # Step through environment using chosen action", "agent_in=agent, ENV_NAME=ENV_NAME) reward += returned_object[0] running_reward_array.append(returned_object[0]) agent.replay_buffer.extend(returned_object[1]) if reward >=", "import copy import random def run_episode(q, agent_in, ENV_NAME, seed=0): agent", "== 'mlp': policy_agent = MLPAgent(input_dim=dim_in, bot_name=bot_name, output_dim=dim_out, num_hidden=args.num_hidden) else: raise", "print(f'Episode {episode} Last Reward: {reward} Average Reward: {running_reward}') if episode", "as e: print(e) return to_return return to_return def main(episodes, agent,", "np.sum(agent.replay_buffer.rewards_list) rewards_list, advantage_list, deeper_advantage_list = discount_reward(agent.replay_buffer.rewards_list, agent.replay_buffer.value_list, agent.replay_buffer.deeper_value_list) agent.replay_buffer.rewards_list =", "run\", type=str, default='ddt') parser.add_argument(\"-e\", \"--episodes\", help=\"how many episodes\", type=int, default=2000)", "raise Exception('No valid environment selected') print(f\"Agent {AGENT_TYPE} on {ENV_TYPE} \")", "0: agent.save('../models/'+str(episode)+'th') return running_reward_array if __name__ == \"__main__\": parser =", "valid environment selected') print(f\"Agent {AGENT_TYPE} on {ENV_TYPE} \") # mp.set_start_method('spawn')", "discount_reward import torch.multiprocessing as mp import argparse import copy import", "episodes Default 1000 ENV_TYPE = args.env_type # 'cart' or 'lunar'", "gym.make('LunarLander-v2') elif ENV_NAME == 'cart': env = gym.make('CartPole-v1') else: raise", "'cart': env = gym.make('CartPole-v1') else: raise Exception('No valid environment selected')", "\") # mp.set_start_method('spawn') mp.set_sharing_strategy('file_system') for i in range(5): bot_name =", "== \"__main__\": parser = argparse.ArgumentParser() parser.add_argument(\"-a\", \"--agent_type\", help=\"architecture of agent", "agent.save_reward(reward) if done: break reward_sum = np.sum(agent.replay_buffer.rewards_list) rewards_list, advantage_list, deeper_advantage_list", "np.random.seed(seed) env.action_space.seed(seed) random.seed(seed) state = env.reset() # Reset environment and", "rewards_list agent.replay_buffer.advantage_list = advantage_list agent.replay_buffer.deeper_advantage_list = deeper_advantage_list to_return = [reward_sum,", "range(episodes): reward = 0 returned_object = run_episode(None, agent_in=agent, ENV_NAME=ENV_NAME) reward", "main(episodes, agent, ENV_NAME): running_reward_array = [] for episode in range(episodes):", "= gym.make('CartPole-v1') dim_in = init_env.observation_space.shape[0] dim_out = init_env.action_space.n else: raise", "mp.set_start_method('spawn') mp.set_sharing_strategy('file_system') for i in range(5): bot_name = AGENT_TYPE +", "else: raise Exception('No valid environment selected') print(f\"Agent {AGENT_TYPE} on {ENV_TYPE}", "for i in range(5): bot_name = AGENT_TYPE + ENV_TYPE if", "if q is not None: try: q.put(to_return) except RuntimeError as", "'cart' USE_GPU = args.gpu # Applies for 'prolo' only. use", "False torch.manual_seed(seed) env.seed(seed) np.random.seed(seed) env.action_space.seed(seed) random.seed(seed) state = env.reset() #", "Exception('No valid environment selected') print(f\"Agent {AGENT_TYPE} on {ENV_TYPE} \") #", "'cart' or 'lunar' Default 'cart' USE_GPU = args.gpu # Applies", "parser.parse_args() AGENT_TYPE = args.agent_type # 'ddt', 'mlp' NUM_EPS = args.episodes", "or 'lunar' Default 'cart' USE_GPU = args.gpu # Applies for", "499: agent.save('../models/'+str(episode)+'th') agent.end_episode(reward) running_reward = sum(running_reward_array[-100:]) / float(min(100.0, len(running_reward_array))) if", "use gpu? Default false if ENV_TYPE == 'lunar': init_env =", "print(e) return to_return return to_return def main(episodes, agent, ENV_NAME): running_reward_array", "num episodes Default 1000 ENV_TYPE = args.env_type # 'cart' or", "run_episode(None, agent_in=agent, ENV_NAME=ENV_NAME) reward += returned_object[0] running_reward_array.append(returned_object[0]) agent.replay_buffer.extend(returned_object[1]) if reward", "= init_env.action_space.n elif ENV_TYPE == 'cart': init_env = gym.make('CartPole-v1') dim_in", "= [] for episode in range(episodes): reward = 0 returned_object", "for DDT/DRL \", type=int, default=8) parser.add_argument(\"-n\", \"--num_hidden\", help=\"number of hidden", "to_return def main(episodes, agent, ENV_NAME): running_reward_array = [] for episode", "done: action = agent.get_action(state) # Step through environment using chosen", "# env.render() # Save reward agent.save_reward(reward) if done: break reward_sum", "action='store_true') args = parser.parse_args() AGENT_TYPE = args.agent_type # 'ddt', 'mlp'", "args.agent_type # 'ddt', 'mlp' NUM_EPS = args.episodes # num episodes", "MLPAgent(input_dim=dim_in, bot_name=bot_name, output_dim=dim_out, num_hidden=args.num_hidden) else: raise Exception('No valid network selected')", "init_env = gym.make('CartPole-v1') dim_in = init_env.observation_space.shape[0] dim_out = init_env.action_space.n else:", "== 0: agent.save('../models/'+str(episode)+'th') return running_reward_array if __name__ == \"__main__\": parser", "np import torch from interpretable_ddts.agents.ddt_agent import DDTAgent from interpretable_ddts.agents.mlp_agent import", "q.put(to_return) except RuntimeError as e: print(e) return to_return return to_return", "== 'lunar': env = gym.make('LunarLander-v2') elif ENV_NAME == 'cart': env", "DDTAgent(bot_name=bot_name, input_dim=dim_in, output_dim=dim_out, rule_list=False, num_rules=args.num_leaves) elif AGENT_TYPE == 'mlp': policy_agent", "run on\", type=str, default='cart') parser.add_argument(\"-gpu\", help=\"run on GPU?\", action='store_true') args", "if USE_GPU: bot_name += 'GPU' if AGENT_TYPE == 'ddt': policy_agent", "many episodes\", type=int, default=2000) parser.add_argument(\"-l\", \"--num_leaves\", help=\"number of leaves for", "else: raise Exception('No valid network selected') reward_array = main(NUM_EPS, policy_agent,", "'mlp' NUM_EPS = args.episodes # num episodes Default 1000 ENV_TYPE", "parser.add_argument(\"-a\", \"--agent_type\", help=\"architecture of agent to run\", type=str, default='ddt') parser.add_argument(\"-e\",", "= 0 returned_object = run_episode(None, agent_in=agent, ENV_NAME=ENV_NAME) reward += returned_object[0]", "ENV_TYPE = args.env_type # 'cart' or 'lunar' Default 'cart' USE_GPU", "on 8/28/19 import gym import numpy as np import torch", "chosen action state, reward, done, _ = env.step(action) # env.render()", "agent.save('../models/'+str(episode)+'th') return running_reward_array if __name__ == \"__main__\": parser = argparse.ArgumentParser()", "i in range(5): bot_name = AGENT_TYPE + ENV_TYPE if USE_GPU:", "as mp import argparse import copy import random def run_episode(q,", "q is not None: try: q.put(to_return) except RuntimeError as e:", "= init_env.observation_space.shape[0] dim_out = init_env.action_space.n elif ENV_TYPE == 'cart': init_env", "ENV_NAME=ENV_NAME) reward += returned_object[0] running_reward_array.append(returned_object[0]) agent.replay_buffer.extend(returned_object[1]) if reward >= 499:", "Reset environment and record the starting state while not done:", "to run\", type=str, default='ddt') parser.add_argument(\"-e\", \"--episodes\", help=\"how many episodes\", type=int,", "dim_in = init_env.observation_space.shape[0] dim_out = init_env.action_space.n elif ENV_TYPE == 'cart':", "parser.add_argument(\"-env\", \"--env_type\", help=\"environment to run on\", type=str, default='cart') parser.add_argument(\"-gpu\", help=\"run", "[reward_sum, copy.deepcopy(agent.replay_buffer.__getstate__())] if q is not None: try: q.put(to_return) except", "and record the starting state while not done: action =", "{ENV_TYPE} \") # mp.set_start_method('spawn') mp.set_sharing_strategy('file_system') for i in range(5): bot_name", "'ddt', 'mlp' NUM_EPS = args.episodes # num episodes Default 1000", "return running_reward_array if __name__ == \"__main__\": parser = argparse.ArgumentParser() parser.add_argument(\"-a\",", "agent.end_episode(reward) running_reward = sum(running_reward_array[-100:]) / float(min(100.0, len(running_reward_array))) if episode %", "gpu? Default false if ENV_TYPE == 'lunar': init_env = gym.make('LunarLander-v2')", "type=int, default=0) parser.add_argument(\"-env\", \"--env_type\", help=\"environment to run on\", type=str, default='cart')", "agent.replay_buffer.deeper_advantage_list = deeper_advantage_list to_return = [reward_sum, copy.deepcopy(agent.replay_buffer.__getstate__())] if q is", "import MLPAgent from interpretable_ddts.opt_helpers.replay_buffer import discount_reward import torch.multiprocessing as mp", "'GPU' if AGENT_TYPE == 'ddt': policy_agent = DDTAgent(bot_name=bot_name, input_dim=dim_in, output_dim=dim_out,", "false if ENV_TYPE == 'lunar': init_env = gym.make('LunarLander-v2') dim_in =", "from interpretable_ddts.agents.mlp_agent import MLPAgent from interpretable_ddts.opt_helpers.replay_buffer import discount_reward import torch.multiprocessing", "running_reward_array if __name__ == \"__main__\": parser = argparse.ArgumentParser() parser.add_argument(\"-a\", \"--agent_type\",", "record the starting state while not done: action = agent.get_action(state)", "elif AGENT_TYPE == 'mlp': policy_agent = MLPAgent(input_dim=dim_in, bot_name=bot_name, output_dim=dim_out, num_hidden=args.num_hidden)", "interpretable_ddts.agents.ddt_agent import DDTAgent from interpretable_ddts.agents.mlp_agent import MLPAgent from interpretable_ddts.opt_helpers.replay_buffer import", "0: print(f'Episode {episode} Last Reward: {reward} Average Reward: {running_reward}') if", "torch from interpretable_ddts.agents.ddt_agent import DDTAgent from interpretable_ddts.agents.mlp_agent import MLPAgent from", "elif ENV_NAME == 'cart': env = gym.make('CartPole-v1') else: raise Exception('No", "gym.make('CartPole-v1') dim_in = init_env.observation_space.shape[0] dim_out = init_env.action_space.n else: raise Exception('No", "running_reward_array.append(returned_object[0]) agent.replay_buffer.extend(returned_object[1]) if reward >= 499: agent.save('../models/'+str(episode)+'th') agent.end_episode(reward) running_reward =", "env.seed(seed) np.random.seed(seed) env.action_space.seed(seed) random.seed(seed) state = env.reset() # Reset environment", "= sum(running_reward_array[-100:]) / float(min(100.0, len(running_reward_array))) if episode % 50 ==", "print(f\"Agent {AGENT_TYPE} on {ENV_TYPE} \") # mp.set_start_method('spawn') mp.set_sharing_strategy('file_system') for i", "== 'cart': env = gym.make('CartPole-v1') else: raise Exception('No valid environment", "if done: break reward_sum = np.sum(agent.replay_buffer.rewards_list) rewards_list, advantage_list, deeper_advantage_list =", "numpy as np import torch from interpretable_ddts.agents.ddt_agent import DDTAgent from", "# mp.set_start_method('spawn') mp.set_sharing_strategy('file_system') for i in range(5): bot_name = AGENT_TYPE", "{episode} Last Reward: {reward} Average Reward: {running_reward}') if episode %", "if ENV_TYPE == 'lunar': init_env = gym.make('LunarLander-v2') dim_in = init_env.observation_space.shape[0]", "copy.deepcopy(agent.replay_buffer.__getstate__())] if q is not None: try: q.put(to_return) except RuntimeError", "init_env.observation_space.shape[0] dim_out = init_env.action_space.n elif ENV_TYPE == 'cart': init_env =", "gym import numpy as np import torch from interpretable_ddts.agents.ddt_agent import", "torch.multiprocessing as mp import argparse import copy import random def", "== 'ddt': policy_agent = DDTAgent(bot_name=bot_name, input_dim=dim_in, output_dim=dim_out, rule_list=False, num_rules=args.num_leaves) elif", "to_return = [reward_sum, copy.deepcopy(agent.replay_buffer.__getstate__())] if q is not None: try:", "ENV_TYPE if USE_GPU: bot_name += 'GPU' if AGENT_TYPE == 'ddt':", "args.gpu # Applies for 'prolo' only. use gpu? Default false", "env.reset() # Reset environment and record the starting state while", "= [reward_sum, copy.deepcopy(agent.replay_buffer.__getstate__())] if q is not None: try: q.put(to_return)", "args = parser.parse_args() AGENT_TYPE = args.agent_type # 'ddt', 'mlp' NUM_EPS", "agent.get_action(state) # Step through environment using chosen action state, reward,", "env.render() # Save reward agent.save_reward(reward) if done: break reward_sum =", "# Save reward agent.save_reward(reward) if done: break reward_sum = np.sum(agent.replay_buffer.rewards_list)", "advantage_list agent.replay_buffer.deeper_advantage_list = deeper_advantage_list to_return = [reward_sum, copy.deepcopy(agent.replay_buffer.__getstate__())] if q", "using chosen action state, reward, done, _ = env.step(action) #", "episode % 50 == 0: print(f'Episode {episode} Last Reward: {reward}", "selected') print(f\"Agent {AGENT_TYPE} on {ENV_TYPE} \") # mp.set_start_method('spawn') mp.set_sharing_strategy('file_system') for", "reward agent.save_reward(reward) if done: break reward_sum = np.sum(agent.replay_buffer.rewards_list) rewards_list, advantage_list,", "len(running_reward_array))) if episode % 50 == 0: print(f'Episode {episode} Last", "dim_out = init_env.action_space.n else: raise Exception('No valid environment selected') print(f\"Agent", "argparse.ArgumentParser() parser.add_argument(\"-a\", \"--agent_type\", help=\"architecture of agent to run\", type=str, default='ddt')", "not None: try: q.put(to_return) except RuntimeError as e: print(e) return", "'lunar': env = gym.make('LunarLander-v2') elif ENV_NAME == 'cart': env =", "'mlp': policy_agent = MLPAgent(input_dim=dim_in, bot_name=bot_name, output_dim=dim_out, num_hidden=args.num_hidden) else: raise Exception('No", "deeper_advantage_list to_return = [reward_sum, copy.deepcopy(agent.replay_buffer.__getstate__())] if q is not None:", "= deeper_advantage_list to_return = [reward_sum, copy.deepcopy(agent.replay_buffer.__getstate__())] if q is not", "# num episodes Default 1000 ENV_TYPE = args.env_type # 'cart'", "parser.add_argument(\"-l\", \"--num_leaves\", help=\"number of leaves for DDT/DRL \", type=int, default=8)", "random def run_episode(q, agent_in, ENV_NAME, seed=0): agent = agent_in.duplicate() if", "agent_in, ENV_NAME, seed=0): agent = agent_in.duplicate() if ENV_NAME == 'lunar':", "Default false if ENV_TYPE == 'lunar': init_env = gym.make('LunarLander-v2') dim_in", "rule_list=False, num_rules=args.num_leaves) elif AGENT_TYPE == 'mlp': policy_agent = MLPAgent(input_dim=dim_in, bot_name=bot_name,", "import torch from interpretable_ddts.agents.ddt_agent import DDTAgent from interpretable_ddts.agents.mlp_agent import MLPAgent", "DDT/DRL \", type=int, default=8) parser.add_argument(\"-n\", \"--num_hidden\", help=\"number of hidden layers", "state while not done: action = agent.get_action(state) # Step through", "type=str, default='ddt') parser.add_argument(\"-e\", \"--episodes\", help=\"how many episodes\", type=int, default=2000) parser.add_argument(\"-l\",", "# 'ddt', 'mlp' NUM_EPS = args.episodes # num episodes Default", "+ ENV_TYPE if USE_GPU: bot_name += 'GPU' if AGENT_TYPE ==", "bot_name=bot_name, output_dim=dim_out, num_hidden=args.num_hidden) else: raise Exception('No valid network selected') reward_array", "dim_out = init_env.action_space.n elif ENV_TYPE == 'cart': init_env = gym.make('CartPole-v1')", "def main(episodes, agent, ENV_NAME): running_reward_array = [] for episode in", "\"--num_leaves\", help=\"number of leaves for DDT/DRL \", type=int, default=8) parser.add_argument(\"-n\",", "of agent to run\", type=str, default='ddt') parser.add_argument(\"-e\", \"--episodes\", help=\"how many", "help=\"environment to run on\", type=str, default='cart') parser.add_argument(\"-gpu\", help=\"run on GPU?\",", "= False torch.manual_seed(seed) env.seed(seed) np.random.seed(seed) env.action_space.seed(seed) random.seed(seed) state = env.reset()", "type=str, default='cart') parser.add_argument(\"-gpu\", help=\"run on GPU?\", action='store_true') args = parser.parse_args()", "agent.replay_buffer.deeper_value_list) agent.replay_buffer.rewards_list = rewards_list agent.replay_buffer.advantage_list = advantage_list agent.replay_buffer.deeper_advantage_list = deeper_advantage_list", "env.action_space.seed(seed) random.seed(seed) state = env.reset() # Reset environment and record", "init_env = gym.make('LunarLander-v2') dim_in = init_env.observation_space.shape[0] dim_out = init_env.action_space.n elif", "= advantage_list agent.replay_buffer.deeper_advantage_list = deeper_advantage_list to_return = [reward_sum, copy.deepcopy(agent.replay_buffer.__getstate__())] if", "\", type=int, default=8) parser.add_argument(\"-n\", \"--num_hidden\", help=\"number of hidden layers for", "rewards_list, advantage_list, deeper_advantage_list = discount_reward(agent.replay_buffer.rewards_list, agent.replay_buffer.value_list, agent.replay_buffer.deeper_value_list) agent.replay_buffer.rewards_list = rewards_list", "agent.replay_buffer.extend(returned_object[1]) if reward >= 499: agent.save('../models/'+str(episode)+'th') agent.end_episode(reward) running_reward = sum(running_reward_array[-100:])", "except RuntimeError as e: print(e) return to_return return to_return def", "break reward_sum = np.sum(agent.replay_buffer.rewards_list) rewards_list, advantage_list, deeper_advantage_list = discount_reward(agent.replay_buffer.rewards_list, agent.replay_buffer.value_list,", "env = gym.make('CartPole-v1') else: raise Exception('No valid environment selected') done", "done: break reward_sum = np.sum(agent.replay_buffer.rewards_list) rewards_list, advantage_list, deeper_advantage_list = discount_reward(agent.replay_buffer.rewards_list,", "returned_object = run_episode(None, agent_in=agent, ENV_NAME=ENV_NAME) reward += returned_object[0] running_reward_array.append(returned_object[0]) agent.replay_buffer.extend(returned_object[1])", "/ float(min(100.0, len(running_reward_array))) if episode % 50 == 0: print(f'Episode", "args.env_type # 'cart' or 'lunar' Default 'cart' USE_GPU = args.gpu", "= MLPAgent(input_dim=dim_in, bot_name=bot_name, output_dim=dim_out, num_hidden=args.num_hidden) else: raise Exception('No valid network", "mp import argparse import copy import random def run_episode(q, agent_in,", "args.episodes # num episodes Default 1000 ENV_TYPE = args.env_type #", "import discount_reward import torch.multiprocessing as mp import argparse import copy", "import DDTAgent from interpretable_ddts.agents.mlp_agent import MLPAgent from interpretable_ddts.opt_helpers.replay_buffer import discount_reward", "return to_return def main(episodes, agent, ENV_NAME): running_reward_array = [] for", "to run on\", type=str, default='cart') parser.add_argument(\"-gpu\", help=\"run on GPU?\", action='store_true')", "bot_name = AGENT_TYPE + ENV_TYPE if USE_GPU: bot_name += 'GPU'", "type=int, default=8) parser.add_argument(\"-n\", \"--num_hidden\", help=\"number of hidden layers for MLP", "deeper_advantage_list = discount_reward(agent.replay_buffer.rewards_list, agent.replay_buffer.value_list, agent.replay_buffer.deeper_value_list) agent.replay_buffer.rewards_list = rewards_list agent.replay_buffer.advantage_list =", "AGENT_TYPE + ENV_TYPE if USE_GPU: bot_name += 'GPU' if AGENT_TYPE", "output_dim=dim_out, num_hidden=args.num_hidden) else: raise Exception('No valid network selected') reward_array =", "1000 ENV_TYPE = args.env_type # 'cart' or 'lunar' Default 'cart'", "= env.reset() # Reset environment and record the starting state", "# Applies for 'prolo' only. use gpu? Default false if", "default='cart') parser.add_argument(\"-gpu\", help=\"run on GPU?\", action='store_true') args = parser.parse_args() AGENT_TYPE", "e: print(e) return to_return return to_return def main(episodes, agent, ENV_NAME):", "from interpretable_ddts.agents.ddt_agent import DDTAgent from interpretable_ddts.agents.mlp_agent import MLPAgent from interpretable_ddts.opt_helpers.replay_buffer", "not done: action = agent.get_action(state) # Step through environment using", "NUM_EPS = args.episodes # num episodes Default 1000 ENV_TYPE =", "type=int, default=2000) parser.add_argument(\"-l\", \"--num_leaves\", help=\"number of leaves for DDT/DRL \",", "= init_env.action_space.n else: raise Exception('No valid environment selected') print(f\"Agent {AGENT_TYPE}", "layers for MLP \", type=int, default=0) parser.add_argument(\"-env\", \"--env_type\", help=\"environment to", "random.seed(seed) state = env.reset() # Reset environment and record the", "parser.add_argument(\"-n\", \"--num_hidden\", help=\"number of hidden layers for MLP \", type=int,", "environment and record the starting state while not done: action", "of leaves for DDT/DRL \", type=int, default=8) parser.add_argument(\"-n\", \"--num_hidden\", help=\"number", "Average Reward: {running_reward}') if episode % 500 == 0: agent.save('../models/'+str(episode)+'th')", "init_env.observation_space.shape[0] dim_out = init_env.action_space.n else: raise Exception('No valid environment selected')", "interpretable_ddts.agents.mlp_agent import MLPAgent from interpretable_ddts.opt_helpers.replay_buffer import discount_reward import torch.multiprocessing as", "\"--agent_type\", help=\"architecture of agent to run\", type=str, default='ddt') parser.add_argument(\"-e\", \"--episodes\",", "episode in range(episodes): reward = 0 returned_object = run_episode(None, agent_in=agent,", "only. use gpu? Default false if ENV_TYPE == 'lunar': init_env", "MLPAgent from interpretable_ddts.opt_helpers.replay_buffer import discount_reward import torch.multiprocessing as mp import", "Save reward agent.save_reward(reward) if done: break reward_sum = np.sum(agent.replay_buffer.rewards_list) rewards_list,", "<NAME> on 8/28/19 import gym import numpy as np import", "running_reward = sum(running_reward_array[-100:]) / float(min(100.0, len(running_reward_array))) if episode % 50", "500 == 0: agent.save('../models/'+str(episode)+'th') return running_reward_array if __name__ == \"__main__\":", "= args.gpu # Applies for 'prolo' only. use gpu? Default", "ENV_NAME): running_reward_array = [] for episode in range(episodes): reward =", "valid environment selected') done = False torch.manual_seed(seed) env.seed(seed) np.random.seed(seed) env.action_space.seed(seed)", "= run_episode(None, agent_in=agent, ENV_NAME=ENV_NAME) reward += returned_object[0] running_reward_array.append(returned_object[0]) agent.replay_buffer.extend(returned_object[1]) if", "return to_return return to_return def main(episodes, agent, ENV_NAME): running_reward_array =", "ENV_NAME == 'cart': env = gym.make('CartPole-v1') else: raise Exception('No valid", "episodes\", type=int, default=2000) parser.add_argument(\"-l\", \"--num_leaves\", help=\"number of leaves for DDT/DRL", "AGENT_TYPE = args.agent_type # 'ddt', 'mlp' NUM_EPS = args.episodes #", "DDTAgent from interpretable_ddts.agents.mlp_agent import MLPAgent from interpretable_ddts.opt_helpers.replay_buffer import discount_reward import", "import torch.multiprocessing as mp import argparse import copy import random", "{running_reward}') if episode % 500 == 0: agent.save('../models/'+str(episode)+'th') return running_reward_array", "Reward: {reward} Average Reward: {running_reward}') if episode % 500 ==", "= argparse.ArgumentParser() parser.add_argument(\"-a\", \"--agent_type\", help=\"architecture of agent to run\", type=str,", "'prolo' only. use gpu? Default false if ENV_TYPE == 'lunar':", "= init_env.observation_space.shape[0] dim_out = init_env.action_space.n else: raise Exception('No valid environment", "from interpretable_ddts.opt_helpers.replay_buffer import discount_reward import torch.multiprocessing as mp import argparse", "if reward >= 499: agent.save('../models/'+str(episode)+'th') agent.end_episode(reward) running_reward = sum(running_reward_array[-100:]) /", "env.step(action) # env.render() # Save reward agent.save_reward(reward) if done: break", "USE_GPU: bot_name += 'GPU' if AGENT_TYPE == 'ddt': policy_agent =", "# Step through environment using chosen action state, reward, done,", "= parser.parse_args() AGENT_TYPE = args.agent_type # 'ddt', 'mlp' NUM_EPS =", "Applies for 'prolo' only. use gpu? Default false if ENV_TYPE", "reward = 0 returned_object = run_episode(None, agent_in=agent, ENV_NAME=ENV_NAME) reward +=", "policy_agent = DDTAgent(bot_name=bot_name, input_dim=dim_in, output_dim=dim_out, rule_list=False, num_rules=args.num_leaves) elif AGENT_TYPE ==", "[] for episode in range(episodes): reward = 0 returned_object =", "starting state while not done: action = agent.get_action(state) # Step", "= DDTAgent(bot_name=bot_name, input_dim=dim_in, output_dim=dim_out, rule_list=False, num_rules=args.num_leaves) elif AGENT_TYPE == 'mlp':", "parser.add_argument(\"-gpu\", help=\"run on GPU?\", action='store_true') args = parser.parse_args() AGENT_TYPE =", "state = env.reset() # Reset environment and record the starting", "import random def run_episode(q, agent_in, ENV_NAME, seed=0): agent = agent_in.duplicate()", "agent.save('../models/'+str(episode)+'th') agent.end_episode(reward) running_reward = sum(running_reward_array[-100:]) / float(min(100.0, len(running_reward_array))) if episode", "reward, done, _ = env.step(action) # env.render() # Save reward", "= AGENT_TYPE + ENV_TYPE if USE_GPU: bot_name += 'GPU' if", "seed=0): agent = agent_in.duplicate() if ENV_NAME == 'lunar': env =", "% 500 == 0: agent.save('../models/'+str(episode)+'th') return running_reward_array if __name__ ==", "'lunar': init_env = gym.make('LunarLander-v2') dim_in = init_env.observation_space.shape[0] dim_out = init_env.action_space.n", "dim_in = init_env.observation_space.shape[0] dim_out = init_env.action_space.n else: raise Exception('No valid", "Default 1000 ENV_TYPE = args.env_type # 'cart' or 'lunar' Default", "environment using chosen action state, reward, done, _ = env.step(action)", "AGENT_TYPE == 'mlp': policy_agent = MLPAgent(input_dim=dim_in, bot_name=bot_name, output_dim=dim_out, num_hidden=args.num_hidden) else:", "raise Exception('No valid network selected') reward_array = main(NUM_EPS, policy_agent, ENV_TYPE)", "policy_agent = MLPAgent(input_dim=dim_in, bot_name=bot_name, output_dim=dim_out, num_hidden=args.num_hidden) else: raise Exception('No valid", "environment selected') done = False torch.manual_seed(seed) env.seed(seed) np.random.seed(seed) env.action_space.seed(seed) random.seed(seed)", "= args.env_type # 'cart' or 'lunar' Default 'cart' USE_GPU =", "50 == 0: print(f'Episode {episode} Last Reward: {reward} Average Reward:", "= args.episodes # num episodes Default 1000 ENV_TYPE = args.env_type", "run_episode(q, agent_in, ENV_NAME, seed=0): agent = agent_in.duplicate() if ENV_NAME ==", "selected') done = False torch.manual_seed(seed) env.seed(seed) np.random.seed(seed) env.action_space.seed(seed) random.seed(seed) state", "for MLP \", type=int, default=0) parser.add_argument(\"-env\", \"--env_type\", help=\"environment to run", "{AGENT_TYPE} on {ENV_TYPE} \") # mp.set_start_method('spawn') mp.set_sharing_strategy('file_system') for i in", "help=\"number of hidden layers for MLP \", type=int, default=0) parser.add_argument(\"-env\",", "ENV_NAME == 'lunar': env = gym.make('LunarLander-v2') elif ENV_NAME == 'cart':", "advantage_list, deeper_advantage_list = discount_reward(agent.replay_buffer.rewards_list, agent.replay_buffer.value_list, agent.replay_buffer.deeper_value_list) agent.replay_buffer.rewards_list = rewards_list agent.replay_buffer.advantage_list", "= args.agent_type # 'ddt', 'mlp' NUM_EPS = args.episodes # num", "= agent.get_action(state) # Step through environment using chosen action state,", "through environment using chosen action state, reward, done, _ =", "= gym.make('LunarLander-v2') elif ENV_NAME == 'cart': env = gym.make('CartPole-v1') else:", "+= 'GPU' if AGENT_TYPE == 'ddt': policy_agent = DDTAgent(bot_name=bot_name, input_dim=dim_in,", "= gym.make('LunarLander-v2') dim_in = init_env.observation_space.shape[0] dim_out = init_env.action_space.n elif ENV_TYPE", "in range(episodes): reward = 0 returned_object = run_episode(None, agent_in=agent, ENV_NAME=ENV_NAME)", "argparse import copy import random def run_episode(q, agent_in, ENV_NAME, seed=0):", "help=\"how many episodes\", type=int, default=2000) parser.add_argument(\"-l\", \"--num_leaves\", help=\"number of leaves", "interpretable_ddts.opt_helpers.replay_buffer import discount_reward import torch.multiprocessing as mp import argparse import", "the starting state while not done: action = agent.get_action(state) #", "copy import random def run_episode(q, agent_in, ENV_NAME, seed=0): agent =", "while not done: action = agent.get_action(state) # Step through environment", "in range(5): bot_name = AGENT_TYPE + ENV_TYPE if USE_GPU: bot_name", "MLP \", type=int, default=0) parser.add_argument(\"-env\", \"--env_type\", help=\"environment to run on\"," ]
[ "= [] for elem in question_2: prefix = elem[:2] my_list.append(prefix)", "my_list.append(prefix) count = {} for letter in my_list: if letter.isalpha():", "= elem[:2] my_list.append(prefix) print(my_list) def question_3(prefix_length, word): my_list = []", "question_2 = ['able', 'ability', 'apple', 'tryst', 'trial', 'tremendous', 'tree'] my_list", "word: prefix = key[:prefix_length] my_list.append(prefix) count = {} for letter", "= key[:prefix_length] my_list.append(prefix) return my_list def wordByPrefix(prefix_length, word): my_list =", "return letter question_2 = ['able', 'ability', 'apple', 'tryst', 'trial', 'tremendous',", "prefix_key return letter question_2 = ['able', 'ability', 'apple', 'tryst', 'trial',", "'tryst', 'trial', 'tremendous', 'tree'] my_list = [] for elem in", "for key in word: prefix = key[:prefix_length] my_list.append(prefix) return my_list", "for letter in word: prefix_key = letter[:prefix_length] letter = word[:prefix_length]", "return count def wordByPrefix(prefix_length, word): my_list = [] #count =", "my_list = [] for elem in question_2: prefix = elem[:2]", "TW10: Words by Prefix Team: <NAME>, <NAME> For: OMSBA 2061,", "wordByPrefix(prefix_length, word): my_dict = {} for key in word: for", "prefix_key = letter[:prefix_length] letter = word[:prefix_length] return prefix_key return letter", "my_list.append(prefix) print(my_list) def question_3(prefix_length, word): my_list = [] for key", "\"\"\" def wordByPrefix(prefix_length, word): my_dict = {} for key in", "question_3(prefix_length, word): my_list = [] for key in word: prefix", "my_list: if letter.isalpha(): if letter not in count: count[letter] =", "OMSBA 2061, Seattle University Date: 11/3/2020 \"\"\" def wordByPrefix(prefix_length, word):", "= letter[:prefix_length] letter = word[:prefix_length] return prefix_key return letter question_2", "for letter in my_list: if letter.isalpha(): if letter not in", "my_list def wordByPrefix(prefix_length, word): my_list = [] #count = 0", "in word: prefix_key = letter[:prefix_length] letter = word[:prefix_length] return prefix_key", "letter.isalpha(): if letter not in count: count[letter] = 0 count[letter]", "word: prefix = key[:prefix_length] my_list.append(prefix) return my_list def wordByPrefix(prefix_length, word):", "key in word: prefix = key[:prefix_length] my_list.append(prefix) count = {}", "['able', 'ability', 'apple', 'tryst', 'trial', 'tremendous', 'tree'] my_list = []", "+= 1 return count def wordByPrefix(prefix_length, word): my_list = []", "letter in word: prefix_key = letter[:prefix_length] letter = word[:prefix_length] return", "word: for letter in word: prefix_key = letter[:prefix_length] letter =", "wordByPrefix(prefix_length, word): my_list = [] #count = 0 for key", "= 0 count[letter] += 1 return count def wordByPrefix(prefix_length, word):", "key in word: prefix = key[:prefix_length] my_list.append(prefix) return my_list def", "letter question_2 = ['able', 'ability', 'apple', 'tryst', 'trial', 'tremendous', 'tree']", "in word: for letter in word: prefix_key = letter[:prefix_length] letter", "[] for elem in question_2: prefix = elem[:2] my_list.append(prefix) print(my_list)", "count def wordByPrefix(prefix_length, word): my_list = [] #count = 0", "for elem in question_2: prefix = elem[:2] my_list.append(prefix) print(my_list) def", "if letter not in count: count[letter] = 0 count[letter] +=", "in word: prefix = key[:prefix_length] my_list.append(prefix) count = {} for", "0 for key in word: prefix = key[:prefix_length] my_list.append(prefix) count", "count[letter] = 0 count[letter] += 1 return count def wordByPrefix(prefix_length,", "if letter.isalpha(): if letter not in count: letter[count] = []", "count[letter] += 1 return count def wordByPrefix(prefix_length, word): my_list =", "{} for key in word: for letter in word: prefix_key", "my_list = [] for key in word: prefix = key[:prefix_length]", "'tree'] my_list = [] for elem in question_2: prefix =", "word): my_list = [] for key in word: prefix =", "by Prefix Team: <NAME>, <NAME> For: OMSBA 2061, Seattle University", "[] for key in word: prefix = key[:prefix_length] my_list.append(prefix) return", "word): my_list = [] #count = 0 for key in", "print(my_list) def question_3(prefix_length, word): my_list = [] for key in", "my_list: if letter.isalpha(): if letter not in count: letter[count] =", "def wordByPrefix(prefix_length, word): my_list = [] #count = 0 for", "'tremendous', 'tree'] my_list = [] for elem in question_2: prefix", "= [] #count = 0 for key in word: prefix", "in my_list: if letter.isalpha(): if letter not in count: count[letter]", "Seattle University Date: 11/3/2020 \"\"\" def wordByPrefix(prefix_length, word): my_dict =", "question_2: prefix = elem[:2] my_list.append(prefix) print(my_list) def question_3(prefix_length, word): my_list", "University Date: 11/3/2020 \"\"\" def wordByPrefix(prefix_length, word): my_dict = {}", "elem in question_2: prefix = elem[:2] my_list.append(prefix) print(my_list) def question_3(prefix_length,", "if letter.isalpha(): if letter not in count: count[letter] = 0", "prefix = key[:prefix_length] my_list.append(prefix) return my_list def wordByPrefix(prefix_length, word): my_list", "def wordByPrefix(prefix_length, word): my_dict = {} for key in word:", "count: count[letter] = 0 count[letter] += 1 return count def", "[] #count = 0 for key in word: prefix =", "= word[:prefix_length] return prefix_key return letter question_2 = ['able', 'ability',", "Team: <NAME>, <NAME> For: OMSBA 2061, Seattle University Date: 11/3/2020", "'trial', 'tremendous', 'tree'] my_list = [] for elem in question_2:", "for key in word: prefix = key[:prefix_length] my_list.append(prefix) count =", "11/3/2020 \"\"\" def wordByPrefix(prefix_length, word): my_dict = {} for key", "= {} for letter in my_list: if letter.isalpha(): if letter", "in count: count[letter] = 0 count[letter] += 1 return count", "not in count: count[letter] = 0 count[letter] += 1 return", "'apple', 'tryst', 'trial', 'tremendous', 'tree'] my_list = [] for elem", "'ability', 'apple', 'tryst', 'trial', 'tremendous', 'tree'] my_list = [] for", "0 count[letter] += 1 return count def wordByPrefix(prefix_length, word): my_list", "letter = word[:prefix_length] return prefix_key return letter question_2 = ['able',", "#count = 0 for key in word: prefix = key[:prefix_length]", "my_list = [] #count = 0 for key in word:", "prefix = elem[:2] my_list.append(prefix) print(my_list) def question_3(prefix_length, word): my_list =", "elem[:2] my_list.append(prefix) print(my_list) def question_3(prefix_length, word): my_list = [] for", "count = {} for letter in my_list: if letter.isalpha(): if", "prefix = key[:prefix_length] my_list.append(prefix) count = {} for letter in", "= 0 for key in word: prefix = key[:prefix_length] my_list.append(prefix)", "key[:prefix_length] my_list.append(prefix) return my_list def wordByPrefix(prefix_length, word): my_list = []", "letter.isalpha(): if letter not in count: letter[count] = [] count.update(letter)", "<NAME>, <NAME> For: OMSBA 2061, Seattle University Date: 11/3/2020 \"\"\"", "\"\"\" TW10: Words by Prefix Team: <NAME>, <NAME> For: OMSBA", "my_dict = {} for key in word: for letter in", "key[:prefix_length] my_list.append(prefix) count = {} for letter in my_list: if", "= [] for key in word: prefix = key[:prefix_length] my_list.append(prefix)", "key in word: for letter in word: prefix_key = letter[:prefix_length]", "if letter not in count: letter[count] = [] count.update(letter) return", "For: OMSBA 2061, Seattle University Date: 11/3/2020 \"\"\" def wordByPrefix(prefix_length,", "Prefix Team: <NAME>, <NAME> For: OMSBA 2061, Seattle University Date:", "= {} for key in word: for letter in word:", "letter not in count: letter[count] = [] count.update(letter) return count", "= key[:prefix_length] my_list.append(prefix) count = {} for letter in my_list:", "in my_list: if letter.isalpha(): if letter not in count: letter[count]", "in word: prefix = key[:prefix_length] my_list.append(prefix) return my_list def wordByPrefix(prefix_length,", "{} for letter in my_list: if letter.isalpha(): if letter not", "1 return count def wordByPrefix(prefix_length, word): my_list = [] #count", "Date: 11/3/2020 \"\"\" def wordByPrefix(prefix_length, word): my_dict = {} for", "<NAME> For: OMSBA 2061, Seattle University Date: 11/3/2020 \"\"\" def", "in question_2: prefix = elem[:2] my_list.append(prefix) print(my_list) def question_3(prefix_length, word):", "word[:prefix_length] return prefix_key return letter question_2 = ['able', 'ability', 'apple',", "letter not in count: count[letter] = 0 count[letter] += 1", "my_list.append(prefix) return my_list def wordByPrefix(prefix_length, word): my_list = [] #count", "letter in my_list: if letter.isalpha(): if letter not in count:", "letter[:prefix_length] letter = word[:prefix_length] return prefix_key return letter question_2 =", "def question_3(prefix_length, word): my_list = [] for key in word:", "for key in word: for letter in word: prefix_key =", "Words by Prefix Team: <NAME>, <NAME> For: OMSBA 2061, Seattle", "word: prefix_key = letter[:prefix_length] letter = word[:prefix_length] return prefix_key return", "word): my_dict = {} for key in word: for letter", "2061, Seattle University Date: 11/3/2020 \"\"\" def wordByPrefix(prefix_length, word): my_dict", "return prefix_key return letter question_2 = ['able', 'ability', 'apple', 'tryst',", "= ['able', 'ability', 'apple', 'tryst', 'trial', 'tremendous', 'tree'] my_list =", "return my_list def wordByPrefix(prefix_length, word): my_list = [] #count =" ]