content
stringlengths
7
1.05M
fixed_cases
stringlengths
1
1.28M
a = [] a.append(2) a.append(4) a.append(6) print(a[0:4]) #print(a)
a = [] a.append(2) a.append(4) a.append(6) print(a[0:4])
class Piece(object): ''' two type of players: black and white ''' def __init__(self, player): self.player = player class Grid(object): ''' each grid has one color: W - white / B - black a grid may point to a piece object ''' def __init__(self, color, piece = None): self.color = color self.piece = piece class Board(object): def __init__(self): self.checkerBoard = [[0 for _ in xrange(6)] for _ in xrange(6)] self._create() # OR (conf.BOARDSIZE/2) * (conf.BOARDSIZE/2 - 1) self.white_piece_Num = 6 self.black_piece_Num = 6 def _create(self): ''' initialize a checker board assign grid color and pieces ''' for i in xrange(6): for j in xrange(6): if not i%2 and not j%2: # both even self.checkerBoard[i][j] = Grid("W") elif i%2 and not j%2: # odd, even self.checkerBoard[i][j] = Grid("B") elif not i%2 and j%2: # even, odd self.checkerBoard[i][j] = Grid("B") else: # odd, odd self.checkerBoard[i][j] = Grid("W") if self.checkerBoard[i][j].color == "B": if j<2: self.checkerBoard[i][j].piece = Piece("white") elif 3<j<6: self.checkerBoard[i][j].piece = Piece("black") return def _direction(self, i, j, moveto): ''' calculate coordinates after a move on selected direction return type: tuple ''' return {'UpLeft': lambda: (i-1, j-1), 'UpRight': lambda: (i+1, j-1), 'DownLeft': lambda: (i-1, j+1), 'DownRight': lambda: (i+1, j+1), }.get(moveto)() def _valid_position(self, i, j): ''' check whether given position is valid in checkerBoard return type: bool ''' return (-1 < i < 6) and (-1 < j < 6) def move(self, start, end): ''' move piece from start to end (coordinate) ''' s_i, s_j = start[0], start[1] e_i, e_j = end[0], end[1] self.checkerBoard[e_i][e_j].piece = self.checkerBoard[s_i][s_j].piece self.checkerBoard[s_i][s_j].piece = None def remove(self, piece): ''' remove piece from board ''' i, j = piece[0], piece[1] if self.checkerBoard[i][j].piece.player == "white": self.white_piece_Num -= 1 else: self.black_piece_Num -= 1 self.checkerBoard[i][j].piece = None def check_jump(self, player): ''' return all capture moves for given player return type: list[list, list, ...] ''' jump_list = [] for i in xrange(6): for j in xrange(6): if self.checkerBoard[i][j].piece\ and self.checkerBoard[i][j].piece.player == player: if player == "white": adversary = "black" L_move1 = self._direction(i, j, 'DownLeft') R_move1 = self._direction(i, j, 'DownRight') L_move2 = self._direction(L_move1[0], L_move1[1], 'DownLeft') R_move2 = self._direction(R_move1[0], R_move1[1], 'DownRight') L1_i, L1_j, R1_i, R1_j = L_move1[0], L_move1[1], R_move1[0], R_move1[1] L2_i, L2_j, R2_i, R2_j = L_move2[0], L_move2[1], R_move2[0], R_move2[1] else: adversary = "white" L_move1 = self._direction(i, j, 'UpLeft') R_move1 = self._direction(i, j, 'UpRight') L_move2 = self._direction(L_move1[0], L_move1[1], 'UpLeft') R_move2 = self._direction(R_move1[0], R_move1[1], 'UpRight') L1_i, L1_j, R1_i, R1_j = L_move1[0], L_move1[1], R_move1[0], R_move1[1] L2_i, L2_j, R2_i, R2_j = L_move2[0], L_move2[1], R_move2[0], R_move2[1] if self._valid_position(L2_i, L2_j) or self._valid_position(R2_i, R2_j): if self._valid_position(L2_i, L2_j)\ and self.checkerBoard[L1_i][L1_j].piece\ and self.checkerBoard[L1_i][L1_j].piece.player == adversary\ and self.checkerBoard[L2_i][L2_j].piece is None: jump_list.append([i, j]) if self._valid_position(R2_i, R2_j)\ and self.checkerBoard[R1_i][R1_j].piece\ and self.checkerBoard[R1_i][R1_j].piece.player == adversary\ and self.checkerBoard[R2_i][R2_j].piece is None: jump_list.append([i, j]) return jump_list def valid_moves(self, piece, jump = 0): ''' return all valid moves for selected piece return type: list[list, list, ...] ''' i, j = piece cur_grid = self.checkerBoard[i][j] if cur_grid.piece == None: # if no piece in that grid return [] valid_moves = [] if jump: # if current piece is from another position after one capture move, # then check whether there are other capture moves # robot move if cur_grid.piece.player == "white": adversary = "black" L_move1 = self._direction(i, j, 'DownLeft') R_move1 = self._direction(i, j, 'DownRight') L_move2 = self._direction(L_move1[0], L_move1[1], 'DownLeft') R_move2 = self._direction(R_move1[0], R_move1[1], 'DownRight') L1_i, L1_j, R1_i, R1_j = L_move1[0], L_move1[1], R_move1[0], R_move1[1] L2_i, L2_j, R2_i, R2_j = L_move2[0], L_move2[1], R_move2[0], R_move2[1] # human move else: adversary = "white" L_move1 = self._direction(i, j, 'UpLeft') R_move1 = self._direction(i, j, 'UpRight') L_move2 = self._direction(L_move1[0], L_move1[1], 'UpLeft') R_move2 = self._direction(R_move1[0], R_move1[1], 'UpRight') L1_i, L1_j, R1_i, R1_j = L_move1[0], L_move1[1], R_move1[0], R_move1[1] L2_i, L2_j, R2_i, R2_j = L_move2[0], L_move2[1], R_move2[0], R_move2[1] # check left if (self._valid_position(L2_i, L2_j))\ and self.checkerBoard[L1_i][L1_j].piece\ and self.checkerBoard[L1_i][L1_j].piece.player == adversary\ and self.checkerBoard[L2_i][L2_j].piece is None: # empty valid_moves.append([L2_i, L2_j]) # check right if self._valid_position(R2_i, R2_j)\ and self.checkerBoard[R1_i][R1_j].piece\ and self.checkerBoard[R1_i][R1_j].piece.player == adversary\ and self.checkerBoard[R2_i][R2_j].piece is None: # empty valid_moves.append([R2_i, R2_j]) # if not after a capture move else: # computer move jump_exist = 0 # capture move flag player = cur_grid.piece.player if cur_grid.piece.player == "white": adversary = "black" L_move1 = self._direction(i, j, 'DownLeft') R_move1 = self._direction(i, j, 'DownRight') L_move2 = self._direction(L_move1[0], L_move1[1], 'DownLeft') R_move2 = self._direction(R_move1[0], R_move1[1], 'DownRight') L1_i, L1_j, R1_i, R1_j = L_move1[0], L_move1[1], R_move1[0], R_move1[1] L2_i, L2_j, R2_i, R2_j = L_move2[0], L_move2[1], R_move2[0], R_move2[1] else: adversary = "white" L_move1 = self._direction(i, j, 'UpLeft') R_move1 = self._direction(i, j, 'UpRight') L_move2 = self._direction(L_move1[0], L_move1[1], 'UpLeft') R_move2 = self._direction(R_move1[0], R_move1[1], 'UpRight') L1_i, L1_j, R1_i, R1_j = L_move1[0], L_move1[1], R_move1[0], R_move1[1] L2_i, L2_j, R2_i, R2_j = L_move2[0], L_move2[1], R_move2[0], R_move2[1] # if capture moves exist, return all capture moves if self._valid_position(L2_i, L2_j) or self._valid_position(R2_i, R2_j): if self._valid_position(L2_i, L2_j)\ and self.checkerBoard[L1_i][L1_j].piece\ and self.checkerBoard[L1_i][L1_j].piece.player == adversary\ and self.checkerBoard[L2_i][L2_j].piece is None: jump_exist = 1 valid_moves.append([L2_i, L2_j]) if self._valid_position(R2_i, R2_j)\ and self.checkerBoard[R1_i][R1_j].piece\ and self.checkerBoard[R1_i][R1_j].piece.player == adversary\ and self.checkerBoard[R2_i][R2_j].piece is None: jump_exist = 1 valid_moves.append([R2_i, R2_j]) if jump_exist == 0: # if there is no capture move if self._valid_position(L1_i, L1_j)\ and self.checkerBoard[L1_i][L1_j].piece == None: valid_moves.append([L1_i, L1_j]) if self._valid_position(R1_i, R1_j)\ and self.checkerBoard[R1_i][R1_j].piece == None: valid_moves.append([R1_i, R1_j]) return valid_moves
class Piece(object): """ two type of players: black and white """ def __init__(self, player): self.player = player class Grid(object): """ each grid has one color: W - white / B - black a grid may point to a piece object """ def __init__(self, color, piece=None): self.color = color self.piece = piece class Board(object): def __init__(self): self.checkerBoard = [[0 for _ in xrange(6)] for _ in xrange(6)] self._create() self.white_piece_Num = 6 self.black_piece_Num = 6 def _create(self): """ initialize a checker board assign grid color and pieces """ for i in xrange(6): for j in xrange(6): if not i % 2 and (not j % 2): self.checkerBoard[i][j] = grid('W') elif i % 2 and (not j % 2): self.checkerBoard[i][j] = grid('B') elif not i % 2 and j % 2: self.checkerBoard[i][j] = grid('B') else: self.checkerBoard[i][j] = grid('W') if self.checkerBoard[i][j].color == 'B': if j < 2: self.checkerBoard[i][j].piece = piece('white') elif 3 < j < 6: self.checkerBoard[i][j].piece = piece('black') return def _direction(self, i, j, moveto): """ calculate coordinates after a move on selected direction return type: tuple """ return {'UpLeft': lambda : (i - 1, j - 1), 'UpRight': lambda : (i + 1, j - 1), 'DownLeft': lambda : (i - 1, j + 1), 'DownRight': lambda : (i + 1, j + 1)}.get(moveto)() def _valid_position(self, i, j): """ check whether given position is valid in checkerBoard return type: bool """ return -1 < i < 6 and -1 < j < 6 def move(self, start, end): """ move piece from start to end (coordinate) """ (s_i, s_j) = (start[0], start[1]) (e_i, e_j) = (end[0], end[1]) self.checkerBoard[e_i][e_j].piece = self.checkerBoard[s_i][s_j].piece self.checkerBoard[s_i][s_j].piece = None def remove(self, piece): """ remove piece from board """ (i, j) = (piece[0], piece[1]) if self.checkerBoard[i][j].piece.player == 'white': self.white_piece_Num -= 1 else: self.black_piece_Num -= 1 self.checkerBoard[i][j].piece = None def check_jump(self, player): """ return all capture moves for given player return type: list[list, list, ...] """ jump_list = [] for i in xrange(6): for j in xrange(6): if self.checkerBoard[i][j].piece and self.checkerBoard[i][j].piece.player == player: if player == 'white': adversary = 'black' l_move1 = self._direction(i, j, 'DownLeft') r_move1 = self._direction(i, j, 'DownRight') l_move2 = self._direction(L_move1[0], L_move1[1], 'DownLeft') r_move2 = self._direction(R_move1[0], R_move1[1], 'DownRight') (l1_i, l1_j, r1_i, r1_j) = (L_move1[0], L_move1[1], R_move1[0], R_move1[1]) (l2_i, l2_j, r2_i, r2_j) = (L_move2[0], L_move2[1], R_move2[0], R_move2[1]) else: adversary = 'white' l_move1 = self._direction(i, j, 'UpLeft') r_move1 = self._direction(i, j, 'UpRight') l_move2 = self._direction(L_move1[0], L_move1[1], 'UpLeft') r_move2 = self._direction(R_move1[0], R_move1[1], 'UpRight') (l1_i, l1_j, r1_i, r1_j) = (L_move1[0], L_move1[1], R_move1[0], R_move1[1]) (l2_i, l2_j, r2_i, r2_j) = (L_move2[0], L_move2[1], R_move2[0], R_move2[1]) if self._valid_position(L2_i, L2_j) or self._valid_position(R2_i, R2_j): if self._valid_position(L2_i, L2_j) and self.checkerBoard[L1_i][L1_j].piece and (self.checkerBoard[L1_i][L1_j].piece.player == adversary) and (self.checkerBoard[L2_i][L2_j].piece is None): jump_list.append([i, j]) if self._valid_position(R2_i, R2_j) and self.checkerBoard[R1_i][R1_j].piece and (self.checkerBoard[R1_i][R1_j].piece.player == adversary) and (self.checkerBoard[R2_i][R2_j].piece is None): jump_list.append([i, j]) return jump_list def valid_moves(self, piece, jump=0): """ return all valid moves for selected piece return type: list[list, list, ...] """ (i, j) = piece cur_grid = self.checkerBoard[i][j] if cur_grid.piece == None: return [] valid_moves = [] if jump: if cur_grid.piece.player == 'white': adversary = 'black' l_move1 = self._direction(i, j, 'DownLeft') r_move1 = self._direction(i, j, 'DownRight') l_move2 = self._direction(L_move1[0], L_move1[1], 'DownLeft') r_move2 = self._direction(R_move1[0], R_move1[1], 'DownRight') (l1_i, l1_j, r1_i, r1_j) = (L_move1[0], L_move1[1], R_move1[0], R_move1[1]) (l2_i, l2_j, r2_i, r2_j) = (L_move2[0], L_move2[1], R_move2[0], R_move2[1]) else: adversary = 'white' l_move1 = self._direction(i, j, 'UpLeft') r_move1 = self._direction(i, j, 'UpRight') l_move2 = self._direction(L_move1[0], L_move1[1], 'UpLeft') r_move2 = self._direction(R_move1[0], R_move1[1], 'UpRight') (l1_i, l1_j, r1_i, r1_j) = (L_move1[0], L_move1[1], R_move1[0], R_move1[1]) (l2_i, l2_j, r2_i, r2_j) = (L_move2[0], L_move2[1], R_move2[0], R_move2[1]) if self._valid_position(L2_i, L2_j) and self.checkerBoard[L1_i][L1_j].piece and (self.checkerBoard[L1_i][L1_j].piece.player == adversary) and (self.checkerBoard[L2_i][L2_j].piece is None): valid_moves.append([L2_i, L2_j]) if self._valid_position(R2_i, R2_j) and self.checkerBoard[R1_i][R1_j].piece and (self.checkerBoard[R1_i][R1_j].piece.player == adversary) and (self.checkerBoard[R2_i][R2_j].piece is None): valid_moves.append([R2_i, R2_j]) else: jump_exist = 0 player = cur_grid.piece.player if cur_grid.piece.player == 'white': adversary = 'black' l_move1 = self._direction(i, j, 'DownLeft') r_move1 = self._direction(i, j, 'DownRight') l_move2 = self._direction(L_move1[0], L_move1[1], 'DownLeft') r_move2 = self._direction(R_move1[0], R_move1[1], 'DownRight') (l1_i, l1_j, r1_i, r1_j) = (L_move1[0], L_move1[1], R_move1[0], R_move1[1]) (l2_i, l2_j, r2_i, r2_j) = (L_move2[0], L_move2[1], R_move2[0], R_move2[1]) else: adversary = 'white' l_move1 = self._direction(i, j, 'UpLeft') r_move1 = self._direction(i, j, 'UpRight') l_move2 = self._direction(L_move1[0], L_move1[1], 'UpLeft') r_move2 = self._direction(R_move1[0], R_move1[1], 'UpRight') (l1_i, l1_j, r1_i, r1_j) = (L_move1[0], L_move1[1], R_move1[0], R_move1[1]) (l2_i, l2_j, r2_i, r2_j) = (L_move2[0], L_move2[1], R_move2[0], R_move2[1]) if self._valid_position(L2_i, L2_j) or self._valid_position(R2_i, R2_j): if self._valid_position(L2_i, L2_j) and self.checkerBoard[L1_i][L1_j].piece and (self.checkerBoard[L1_i][L1_j].piece.player == adversary) and (self.checkerBoard[L2_i][L2_j].piece is None): jump_exist = 1 valid_moves.append([L2_i, L2_j]) if self._valid_position(R2_i, R2_j) and self.checkerBoard[R1_i][R1_j].piece and (self.checkerBoard[R1_i][R1_j].piece.player == adversary) and (self.checkerBoard[R2_i][R2_j].piece is None): jump_exist = 1 valid_moves.append([R2_i, R2_j]) if jump_exist == 0: if self._valid_position(L1_i, L1_j) and self.checkerBoard[L1_i][L1_j].piece == None: valid_moves.append([L1_i, L1_j]) if self._valid_position(R1_i, R1_j) and self.checkerBoard[R1_i][R1_j].piece == None: valid_moves.append([R1_i, R1_j]) return valid_moves
#List items are indexed and you can access them by referring to the index number: #Example #Print the second item of the list: thislist = ["apple", "banana", "cherry"] print(thislist[1])
thislist = ['apple', 'banana', 'cherry'] print(thislist[1])
# # PySNMP MIB module EMPIRE-APACHEMOD (http://snmplabs.com/pysmi) # ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/EMPIRE-APACHEMOD # Produced by pysmi-0.3.4 at Mon Apr 29 18:48:20 2019 # On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4 # Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15) # OctetString, ObjectIdentifier, Integer = mibBuilder.importSymbols("ASN1", "OctetString", "ObjectIdentifier", "Integer") NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues") ConstraintsIntersection, ConstraintsUnion, ValueSizeConstraint, ValueRangeConstraint, SingleValueConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsIntersection", "ConstraintsUnion", "ValueSizeConstraint", "ValueRangeConstraint", "SingleValueConstraint") ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup") Counter32, enterprises, ModuleIdentity, ObjectIdentity, TimeTicks, iso, Bits, NotificationType, MibIdentifier, NotificationType, IpAddress, Counter64, MibScalar, MibTable, MibTableRow, MibTableColumn, Gauge32, Integer32, Unsigned32 = mibBuilder.importSymbols("SNMPv2-SMI", "Counter32", "enterprises", "ModuleIdentity", "ObjectIdentity", "TimeTicks", "iso", "Bits", "NotificationType", "MibIdentifier", "NotificationType", "IpAddress", "Counter64", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Gauge32", "Integer32", "Unsigned32") TextualConvention, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString") empire = MibIdentifier((1, 3, 6, 1, 4, 1, 546)) applications = MibIdentifier((1, 3, 6, 1, 4, 1, 546, 16)) apacheSrv = MibIdentifier((1, 3, 6, 1, 4, 1, 546, 16, 3)) apacheModVersion = MibScalar((1, 3, 6, 1, 4, 1, 546, 16, 3, 1), DisplayString()).setMaxAccess("readonly") if mibBuilder.loadTexts: apacheModVersion.setStatus('mandatory') apacheModMode = MibScalar((1, 3, 6, 1, 4, 1, 546, 16, 3, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("fullMode", 1), ("restrictedMode", 2)))).setMaxAccess("readonly") if mibBuilder.loadTexts: apacheModMode.setStatus('mandatory') apacheConfigTable = MibTable((1, 3, 6, 1, 4, 1, 546, 16, 3, 10), ) if mibBuilder.loadTexts: apacheConfigTable.setStatus('mandatory') apacheConfigEntry = MibTableRow((1, 3, 6, 1, 4, 1, 546, 16, 3, 10, 1), ).setIndexNames((0, "EMPIRE-APACHEMOD", "apacheConfigPort")) if mibBuilder.loadTexts: apacheConfigEntry.setStatus('mandatory') apacheConfigPort = MibTableColumn((1, 3, 6, 1, 4, 1, 546, 16, 3, 10, 1, 1), Integer32()).setMaxAccess("readonly") if mibBuilder.loadTexts: apacheConfigPort.setStatus('mandatory') apacheConfigVersion = MibTableColumn((1, 3, 6, 1, 4, 1, 546, 16, 3, 10, 1, 2), DisplayString()).setMaxAccess("readonly") if mibBuilder.loadTexts: apacheConfigVersion.setStatus('mandatory') apacheConfigPID = MibTableColumn((1, 3, 6, 1, 4, 1, 546, 16, 3, 10, 1, 3), Integer32()).setMaxAccess("readonly") if mibBuilder.loadTexts: apacheConfigPID.setStatus('mandatory') apacheConfigRunMode = MibTableColumn((1, 3, 6, 1, 4, 1, 546, 16, 3, 10, 1, 4), DisplayString()).setMaxAccess("readonly") if mibBuilder.loadTexts: apacheConfigRunMode.setStatus('mandatory') apacheConfigUser = MibTableColumn((1, 3, 6, 1, 4, 1, 546, 16, 3, 10, 1, 5), DisplayString()).setMaxAccess("readonly") if mibBuilder.loadTexts: apacheConfigUser.setStatus('mandatory') apacheConfigGroup = MibTableColumn((1, 3, 6, 1, 4, 1, 546, 16, 3, 10, 1, 6), DisplayString()).setMaxAccess("readonly") if mibBuilder.loadTexts: apacheConfigGroup.setStatus('mandatory') apacheConfigHostname = MibTableColumn((1, 3, 6, 1, 4, 1, 546, 16, 3, 10, 1, 7), DisplayString()).setMaxAccess("readonly") if mibBuilder.loadTexts: apacheConfigHostname.setStatus('mandatory') apacheConfigStartProcs = MibTableColumn((1, 3, 6, 1, 4, 1, 546, 16, 3, 10, 1, 8), Integer32()).setMaxAccess("readonly") if mibBuilder.loadTexts: apacheConfigStartProcs.setStatus('mandatory') apacheConfigMinIdleProcs = MibTableColumn((1, 3, 6, 1, 4, 1, 546, 16, 3, 10, 1, 9), Integer32()).setMaxAccess("readonly") if mibBuilder.loadTexts: apacheConfigMinIdleProcs.setStatus('mandatory') apacheConfigMaxIdleProcs = MibTableColumn((1, 3, 6, 1, 4, 1, 546, 16, 3, 10, 1, 10), Integer32()).setMaxAccess("readonly") if mibBuilder.loadTexts: apacheConfigMaxIdleProcs.setStatus('mandatory') apacheConfigMaxProcs = MibTableColumn((1, 3, 6, 1, 4, 1, 546, 16, 3, 10, 1, 11), Integer32()).setMaxAccess("readonly") if mibBuilder.loadTexts: apacheConfigMaxProcs.setStatus('mandatory') apacheConfigRequestsMaxPerChild = MibTableColumn((1, 3, 6, 1, 4, 1, 546, 16, 3, 10, 1, 12), Integer32()).setMaxAccess("readonly") if mibBuilder.loadTexts: apacheConfigRequestsMaxPerChild.setStatus('mandatory') apacheConfigRequestsKeepAlive = MibTableColumn((1, 3, 6, 1, 4, 1, 546, 16, 3, 10, 1, 13), Integer32()).setMaxAccess("readonly") if mibBuilder.loadTexts: apacheConfigRequestsKeepAlive.setStatus('mandatory') apacheConfigRequestsMaxPerConn = MibTableColumn((1, 3, 6, 1, 4, 1, 546, 16, 3, 10, 1, 14), Integer32()).setMaxAccess("readonly") if mibBuilder.loadTexts: apacheConfigRequestsMaxPerConn.setStatus('mandatory') apacheConfigThreadsPerChild = MibTableColumn((1, 3, 6, 1, 4, 1, 546, 16, 3, 10, 1, 15), Integer32()).setMaxAccess("readonly") if mibBuilder.loadTexts: apacheConfigThreadsPerChild.setStatus('mandatory') apacheConfigConnectionTimeout = MibTableColumn((1, 3, 6, 1, 4, 1, 546, 16, 3, 10, 1, 16), Integer32()).setMaxAccess("readonly") if mibBuilder.loadTexts: apacheConfigConnectionTimeout.setStatus('mandatory') apacheConfigKeepAliveTimeout = MibTableColumn((1, 3, 6, 1, 4, 1, 546, 16, 3, 10, 1, 17), Integer32()).setMaxAccess("readonly") if mibBuilder.loadTexts: apacheConfigKeepAliveTimeout.setStatus('mandatory') apacheConfigServerRoot = MibTableColumn((1, 3, 6, 1, 4, 1, 546, 16, 3, 10, 1, 18), DisplayString()).setMaxAccess("readonly") if mibBuilder.loadTexts: apacheConfigServerRoot.setStatus('mandatory') apacheConfigConfigFile = MibTableColumn((1, 3, 6, 1, 4, 1, 546, 16, 3, 10, 1, 19), DisplayString()).setMaxAccess("readonly") if mibBuilder.loadTexts: apacheConfigConfigFile.setStatus('mandatory') apacheConfigPIDFile = MibTableColumn((1, 3, 6, 1, 4, 1, 546, 16, 3, 10, 1, 20), DisplayString()).setMaxAccess("readonly") if mibBuilder.loadTexts: apacheConfigPIDFile.setStatus('mandatory') apacheConfigScoreboardFile = MibTableColumn((1, 3, 6, 1, 4, 1, 546, 16, 3, 10, 1, 21), DisplayString()).setMaxAccess("readonly") if mibBuilder.loadTexts: apacheConfigScoreboardFile.setStatus('mandatory') apacheConfigDocumentRoot = MibTableColumn((1, 3, 6, 1, 4, 1, 546, 16, 3, 10, 1, 22), DisplayString()).setMaxAccess("readonly") if mibBuilder.loadTexts: apacheConfigDocumentRoot.setStatus('mandatory') apacheConfigAccessLogFile = MibTableColumn((1, 3, 6, 1, 4, 1, 546, 16, 3, 10, 1, 23), DisplayString()).setMaxAccess("readonly") if mibBuilder.loadTexts: apacheConfigAccessLogFile.setStatus('mandatory') apacheConfigErrorLogFile = MibTableColumn((1, 3, 6, 1, 4, 1, 546, 16, 3, 10, 1, 24), DisplayString()).setMaxAccess("readonly") if mibBuilder.loadTexts: apacheConfigErrorLogFile.setStatus('mandatory') apacheConfigScriptLogFile = MibTableColumn((1, 3, 6, 1, 4, 1, 546, 16, 3, 10, 1, 25), DisplayString()).setMaxAccess("readonly") if mibBuilder.loadTexts: apacheConfigScriptLogFile.setStatus('mandatory') apachePerformance = MibIdentifier((1, 3, 6, 1, 4, 1, 546, 16, 3, 11)) apacheFootprintTable = MibTable((1, 3, 6, 1, 4, 1, 546, 16, 3, 11, 1), ) if mibBuilder.loadTexts: apacheFootprintTable.setStatus('mandatory') apacheFootprintEntry = MibTableRow((1, 3, 6, 1, 4, 1, 546, 16, 3, 11, 1, 1), ).setIndexNames((0, "EMPIRE-APACHEMOD", "apacheFootprintPort")) if mibBuilder.loadTexts: apacheFootprintEntry.setStatus('mandatory') apacheFootprintPort = MibTableColumn((1, 3, 6, 1, 4, 1, 546, 16, 3, 11, 1, 1, 1), Integer32()).setMaxAccess("readonly") if mibBuilder.loadTexts: apacheFootprintPort.setStatus('mandatory') apacheFootprintCPUTime = MibTableColumn((1, 3, 6, 1, 4, 1, 546, 16, 3, 11, 1, 1, 2), Integer32()).setMaxAccess("readonly") if mibBuilder.loadTexts: apacheFootprintCPUTime.setStatus('mandatory') apacheFootprintPercentCPU = MibTableColumn((1, 3, 6, 1, 4, 1, 546, 16, 3, 11, 1, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 100))).setMaxAccess("readonly") if mibBuilder.loadTexts: apacheFootprintPercentCPU.setStatus('mandatory') apacheFootprintTotalMEMSize = MibTableColumn((1, 3, 6, 1, 4, 1, 546, 16, 3, 11, 1, 1, 4), Gauge32()).setMaxAccess("readonly") if mibBuilder.loadTexts: apacheFootprintTotalMEMSize.setStatus('mandatory') apacheFootprintTotalRSS = MibTableColumn((1, 3, 6, 1, 4, 1, 546, 16, 3, 11, 1, 1, 5), Gauge32()).setMaxAccess("readonly") if mibBuilder.loadTexts: apacheFootprintTotalRSS.setStatus('mandatory') apacheFootprintPercentMEM = MibTableColumn((1, 3, 6, 1, 4, 1, 546, 16, 3, 11, 1, 1, 6), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 100))).setMaxAccess("readonly") if mibBuilder.loadTexts: apacheFootprintPercentMEM.setStatus('mandatory') apacheFootprintNumThreads = MibTableColumn((1, 3, 6, 1, 4, 1, 546, 16, 3, 11, 1, 1, 7), Gauge32()).setMaxAccess("readonly") if mibBuilder.loadTexts: apacheFootprintNumThreads.setStatus('mandatory') apacheFootprintInBlks = MibTableColumn((1, 3, 6, 1, 4, 1, 546, 16, 3, 11, 1, 1, 8), Counter32()).setMaxAccess("readonly") if mibBuilder.loadTexts: apacheFootprintInBlks.setStatus('mandatory') apacheFootprintOutBlks = MibTableColumn((1, 3, 6, 1, 4, 1, 546, 16, 3, 11, 1, 1, 9), Counter32()).setMaxAccess("readonly") if mibBuilder.loadTexts: apacheFootprintOutBlks.setStatus('mandatory') apacheFootprintMsgsSent = MibTableColumn((1, 3, 6, 1, 4, 1, 546, 16, 3, 11, 1, 1, 10), Counter32()).setMaxAccess("readonly") if mibBuilder.loadTexts: apacheFootprintMsgsSent.setStatus('mandatory') apacheFootprintMsgsRecv = MibTableColumn((1, 3, 6, 1, 4, 1, 546, 16, 3, 11, 1, 1, 11), Counter32()).setMaxAccess("readonly") if mibBuilder.loadTexts: apacheFootprintMsgsRecv.setStatus('mandatory') apacheFootprintSysCalls = MibTableColumn((1, 3, 6, 1, 4, 1, 546, 16, 3, 11, 1, 1, 12), Counter32()).setMaxAccess("readonly") if mibBuilder.loadTexts: apacheFootprintSysCalls.setStatus('mandatory') apacheFootprintMinorPgFlts = MibTableColumn((1, 3, 6, 1, 4, 1, 546, 16, 3, 11, 1, 1, 13), Counter32()).setMaxAccess("readonly") if mibBuilder.loadTexts: apacheFootprintMinorPgFlts.setStatus('mandatory') apacheFootprintMajorPgFlts = MibTableColumn((1, 3, 6, 1, 4, 1, 546, 16, 3, 11, 1, 1, 14), Counter32()).setMaxAccess("readonly") if mibBuilder.loadTexts: apacheFootprintMajorPgFlts.setStatus('mandatory') apacheFootprintNumSwaps = MibTableColumn((1, 3, 6, 1, 4, 1, 546, 16, 3, 11, 1, 1, 15), Counter32()).setMaxAccess("readonly") if mibBuilder.loadTexts: apacheFootprintNumSwaps.setStatus('mandatory') apacheFootprintVolCtx = MibTableColumn((1, 3, 6, 1, 4, 1, 546, 16, 3, 11, 1, 1, 16), Counter32()).setMaxAccess("readonly") if mibBuilder.loadTexts: apacheFootprintVolCtx.setStatus('mandatory') apacheFootprintInvolCtx = MibTableColumn((1, 3, 6, 1, 4, 1, 546, 16, 3, 11, 1, 1, 17), Counter32()).setMaxAccess("readonly") if mibBuilder.loadTexts: apacheFootprintInvolCtx.setStatus('mandatory') apacheFootprintTotalLogSize = MibTableColumn((1, 3, 6, 1, 4, 1, 546, 16, 3, 11, 1, 1, 18), Gauge32()).setMaxAccess("readonly") if mibBuilder.loadTexts: apacheFootprintTotalLogSize.setStatus('mandatory') apacheFootprintDocSize = MibTableColumn((1, 3, 6, 1, 4, 1, 546, 16, 3, 11, 1, 1, 19), Gauge32()).setMaxAccess("readonly") if mibBuilder.loadTexts: apacheFootprintDocSize.setStatus('mandatory') apacheFootprintTotalDiskSize = MibTableColumn((1, 3, 6, 1, 4, 1, 546, 16, 3, 11, 1, 1, 20), Gauge32()).setMaxAccess("readonly") if mibBuilder.loadTexts: apacheFootprintTotalDiskSize.setStatus('mandatory') apacheServerPerfTable = MibTable((1, 3, 6, 1, 4, 1, 546, 16, 3, 11, 2), ) if mibBuilder.loadTexts: apacheServerPerfTable.setStatus('mandatory') apacheServerPerfEntry = MibTableRow((1, 3, 6, 1, 4, 1, 546, 16, 3, 11, 2, 1), ).setIndexNames((0, "EMPIRE-APACHEMOD", "apacheServerPerfPort")) if mibBuilder.loadTexts: apacheServerPerfEntry.setStatus('mandatory') apacheServerPerfPort = MibTableColumn((1, 3, 6, 1, 4, 1, 546, 16, 3, 11, 2, 1, 1), Integer32()).setMaxAccess("readonly") if mibBuilder.loadTexts: apacheServerPerfPort.setStatus('mandatory') apacheServerPerfUptime = MibTableColumn((1, 3, 6, 1, 4, 1, 546, 16, 3, 11, 2, 1, 2), Counter32()).setMaxAccess("readonly") if mibBuilder.loadTexts: apacheServerPerfUptime.setStatus('mandatory') apacheServerPerfTotalAccesses = MibTableColumn((1, 3, 6, 1, 4, 1, 546, 16, 3, 11, 2, 1, 3), Counter32()).setMaxAccess("readonly") if mibBuilder.loadTexts: apacheServerPerfTotalAccesses.setStatus('mandatory') apacheServerPerfTotalTraffic = MibTableColumn((1, 3, 6, 1, 4, 1, 546, 16, 3, 11, 2, 1, 4), Counter32()).setMaxAccess("readonly") if mibBuilder.loadTexts: apacheServerPerfTotalTraffic.setStatus('mandatory') apacheServerPerfCurrentUsers = MibTableColumn((1, 3, 6, 1, 4, 1, 546, 16, 3, 11, 2, 1, 5), Gauge32()).setMaxAccess("readonly") if mibBuilder.loadTexts: apacheServerPerfCurrentUsers.setStatus('mandatory') apacheServerPerfCurrentIdleProcs = MibTableColumn((1, 3, 6, 1, 4, 1, 546, 16, 3, 11, 2, 1, 6), Gauge32()).setMaxAccess("readonly") if mibBuilder.loadTexts: apacheServerPerfCurrentIdleProcs.setStatus('mandatory') apacheServerPerfCurrentStartupProcs = MibTableColumn((1, 3, 6, 1, 4, 1, 546, 16, 3, 11, 2, 1, 7), Gauge32()).setMaxAccess("readonly") if mibBuilder.loadTexts: apacheServerPerfCurrentStartupProcs.setStatus('mandatory') apacheServerPerfCurrentReadProcs = MibTableColumn((1, 3, 6, 1, 4, 1, 546, 16, 3, 11, 2, 1, 8), Gauge32()).setMaxAccess("readonly") if mibBuilder.loadTexts: apacheServerPerfCurrentReadProcs.setStatus('mandatory') apacheServerPerfCurrentReplyProcs = MibTableColumn((1, 3, 6, 1, 4, 1, 546, 16, 3, 11, 2, 1, 9), Gauge32()).setMaxAccess("readonly") if mibBuilder.loadTexts: apacheServerPerfCurrentReplyProcs.setStatus('mandatory') apacheServerPerfCurrentKeepAliveProcs = MibTableColumn((1, 3, 6, 1, 4, 1, 546, 16, 3, 11, 2, 1, 10), Gauge32()).setMaxAccess("readonly") if mibBuilder.loadTexts: apacheServerPerfCurrentKeepAliveProcs.setStatus('mandatory') apacheServerPerfCurrentDNSProcs = MibTableColumn((1, 3, 6, 1, 4, 1, 546, 16, 3, 11, 2, 1, 11), Gauge32()).setMaxAccess("readonly") if mibBuilder.loadTexts: apacheServerPerfCurrentDNSProcs.setStatus('mandatory') apacheServerPerfCurrentLoggingProcs = MibTableColumn((1, 3, 6, 1, 4, 1, 546, 16, 3, 11, 2, 1, 12), Gauge32()).setMaxAccess("readonly") if mibBuilder.loadTexts: apacheServerPerfCurrentLoggingProcs.setStatus('mandatory') apacheServerPerfCurrentFinishingProcs = MibTableColumn((1, 3, 6, 1, 4, 1, 546, 16, 3, 11, 2, 1, 13), Gauge32()).setMaxAccess("readonly") if mibBuilder.loadTexts: apacheServerPerfCurrentFinishingProcs.setStatus('mandatory') apacheServerPerfCurrentTotalProcs = MibTableColumn((1, 3, 6, 1, 4, 1, 546, 16, 3, 11, 2, 1, 14), Gauge32()).setMaxAccess("readonly") if mibBuilder.loadTexts: apacheServerPerfCurrentTotalProcs.setStatus('mandatory') apacheServerPerfCurrentBusyProcs = MibTableColumn((1, 3, 6, 1, 4, 1, 546, 16, 3, 11, 2, 1, 15), Gauge32()).setMaxAccess("readonly") if mibBuilder.loadTexts: apacheServerPerfCurrentBusyProcs.setStatus('mandatory') mibBuilder.exportSymbols("EMPIRE-APACHEMOD", apacheServerPerfUptime=apacheServerPerfUptime, apacheConfigPort=apacheConfigPort, apacheConfigRequestsKeepAlive=apacheConfigRequestsKeepAlive, apacheFootprintEntry=apacheFootprintEntry, apacheFootprintVolCtx=apacheFootprintVolCtx, apacheFootprintPort=apacheFootprintPort, apacheFootprintSysCalls=apacheFootprintSysCalls, apacheFootprintInvolCtx=apacheFootprintInvolCtx, apacheFootprintTotalRSS=apacheFootprintTotalRSS, apacheServerPerfCurrentFinishingProcs=apacheServerPerfCurrentFinishingProcs, apacheConfigVersion=apacheConfigVersion, apacheConfigRequestsMaxPerConn=apacheConfigRequestsMaxPerConn, apacheConfigAccessLogFile=apacheConfigAccessLogFile, apacheConfigConfigFile=apacheConfigConfigFile, apacheConfigDocumentRoot=apacheConfigDocumentRoot, apacheServerPerfCurrentDNSProcs=apacheServerPerfCurrentDNSProcs, apacheFootprintNumSwaps=apacheFootprintNumSwaps, apacheFootprintPercentCPU=apacheFootprintPercentCPU, apacheFootprintTotalLogSize=apacheFootprintTotalLogSize, apacheServerPerfPort=apacheServerPerfPort, apacheFootprintInBlks=apacheFootprintInBlks, apacheConfigScriptLogFile=apacheConfigScriptLogFile, apachePerformance=apachePerformance, apacheConfigThreadsPerChild=apacheConfigThreadsPerChild, apacheFootprintTotalDiskSize=apacheFootprintTotalDiskSize, apacheSrv=apacheSrv, apacheConfigRunMode=apacheConfigRunMode, apacheServerPerfCurrentReplyProcs=apacheServerPerfCurrentReplyProcs, apacheServerPerfEntry=apacheServerPerfEntry, apacheServerPerfCurrentBusyProcs=apacheServerPerfCurrentBusyProcs, apacheConfigUser=apacheConfigUser, apacheFootprintTotalMEMSize=apacheFootprintTotalMEMSize, apacheServerPerfTotalAccesses=apacheServerPerfTotalAccesses, apacheServerPerfTotalTraffic=apacheServerPerfTotalTraffic, apacheConfigRequestsMaxPerChild=apacheConfigRequestsMaxPerChild, empire=empire, apacheFootprintCPUTime=apacheFootprintCPUTime, apacheModVersion=apacheModVersion, apacheConfigEntry=apacheConfigEntry, apacheConfigStartProcs=apacheConfigStartProcs, apacheConfigHostname=apacheConfigHostname, apacheConfigErrorLogFile=apacheConfigErrorLogFile, apacheFootprintNumThreads=apacheFootprintNumThreads, apacheServerPerfCurrentReadProcs=apacheServerPerfCurrentReadProcs, apacheServerPerfCurrentIdleProcs=apacheServerPerfCurrentIdleProcs, apacheFootprintTable=apacheFootprintTable, apacheConfigConnectionTimeout=apacheConfigConnectionTimeout, apacheFootprintMsgsSent=apacheFootprintMsgsSent, apacheConfigTable=apacheConfigTable, apacheModMode=apacheModMode, apacheConfigGroup=apacheConfigGroup, applications=applications, apacheServerPerfCurrentLoggingProcs=apacheServerPerfCurrentLoggingProcs, apacheConfigMaxIdleProcs=apacheConfigMaxIdleProcs, apacheConfigMaxProcs=apacheConfigMaxProcs, apacheFootprintPercentMEM=apacheFootprintPercentMEM, apacheServerPerfTable=apacheServerPerfTable, apacheConfigPID=apacheConfigPID, apacheConfigServerRoot=apacheConfigServerRoot, apacheServerPerfCurrentKeepAliveProcs=apacheServerPerfCurrentKeepAliveProcs, apacheFootprintMinorPgFlts=apacheFootprintMinorPgFlts, apacheFootprintDocSize=apacheFootprintDocSize, apacheServerPerfCurrentStartupProcs=apacheServerPerfCurrentStartupProcs, apacheConfigMinIdleProcs=apacheConfigMinIdleProcs, apacheFootprintMsgsRecv=apacheFootprintMsgsRecv, apacheServerPerfCurrentUsers=apacheServerPerfCurrentUsers, apacheServerPerfCurrentTotalProcs=apacheServerPerfCurrentTotalProcs, apacheConfigKeepAliveTimeout=apacheConfigKeepAliveTimeout, apacheConfigScoreboardFile=apacheConfigScoreboardFile, apacheFootprintMajorPgFlts=apacheFootprintMajorPgFlts, apacheFootprintOutBlks=apacheFootprintOutBlks, apacheConfigPIDFile=apacheConfigPIDFile)
(octet_string, object_identifier, integer) = mibBuilder.importSymbols('ASN1', 'OctetString', 'ObjectIdentifier', 'Integer') (named_values,) = mibBuilder.importSymbols('ASN1-ENUMERATION', 'NamedValues') (constraints_intersection, constraints_union, value_size_constraint, value_range_constraint, single_value_constraint) = mibBuilder.importSymbols('ASN1-REFINEMENT', 'ConstraintsIntersection', 'ConstraintsUnion', 'ValueSizeConstraint', 'ValueRangeConstraint', 'SingleValueConstraint') (module_compliance, notification_group) = mibBuilder.importSymbols('SNMPv2-CONF', 'ModuleCompliance', 'NotificationGroup') (counter32, enterprises, module_identity, object_identity, time_ticks, iso, bits, notification_type, mib_identifier, notification_type, ip_address, counter64, mib_scalar, mib_table, mib_table_row, mib_table_column, gauge32, integer32, unsigned32) = mibBuilder.importSymbols('SNMPv2-SMI', 'Counter32', 'enterprises', 'ModuleIdentity', 'ObjectIdentity', 'TimeTicks', 'iso', 'Bits', 'NotificationType', 'MibIdentifier', 'NotificationType', 'IpAddress', 'Counter64', 'MibScalar', 'MibTable', 'MibTableRow', 'MibTableColumn', 'Gauge32', 'Integer32', 'Unsigned32') (textual_convention, display_string) = mibBuilder.importSymbols('SNMPv2-TC', 'TextualConvention', 'DisplayString') empire = mib_identifier((1, 3, 6, 1, 4, 1, 546)) applications = mib_identifier((1, 3, 6, 1, 4, 1, 546, 16)) apache_srv = mib_identifier((1, 3, 6, 1, 4, 1, 546, 16, 3)) apache_mod_version = mib_scalar((1, 3, 6, 1, 4, 1, 546, 16, 3, 1), display_string()).setMaxAccess('readonly') if mibBuilder.loadTexts: apacheModVersion.setStatus('mandatory') apache_mod_mode = mib_scalar((1, 3, 6, 1, 4, 1, 546, 16, 3, 2), integer32().subtype(subtypeSpec=constraints_union(single_value_constraint(1, 2))).clone(namedValues=named_values(('fullMode', 1), ('restrictedMode', 2)))).setMaxAccess('readonly') if mibBuilder.loadTexts: apacheModMode.setStatus('mandatory') apache_config_table = mib_table((1, 3, 6, 1, 4, 1, 546, 16, 3, 10)) if mibBuilder.loadTexts: apacheConfigTable.setStatus('mandatory') apache_config_entry = mib_table_row((1, 3, 6, 1, 4, 1, 546, 16, 3, 10, 1)).setIndexNames((0, 'EMPIRE-APACHEMOD', 'apacheConfigPort')) if mibBuilder.loadTexts: apacheConfigEntry.setStatus('mandatory') apache_config_port = mib_table_column((1, 3, 6, 1, 4, 1, 546, 16, 3, 10, 1, 1), integer32()).setMaxAccess('readonly') if mibBuilder.loadTexts: apacheConfigPort.setStatus('mandatory') apache_config_version = mib_table_column((1, 3, 6, 1, 4, 1, 546, 16, 3, 10, 1, 2), display_string()).setMaxAccess('readonly') if mibBuilder.loadTexts: apacheConfigVersion.setStatus('mandatory') apache_config_pid = mib_table_column((1, 3, 6, 1, 4, 1, 546, 16, 3, 10, 1, 3), integer32()).setMaxAccess('readonly') if mibBuilder.loadTexts: apacheConfigPID.setStatus('mandatory') apache_config_run_mode = mib_table_column((1, 3, 6, 1, 4, 1, 546, 16, 3, 10, 1, 4), display_string()).setMaxAccess('readonly') if mibBuilder.loadTexts: apacheConfigRunMode.setStatus('mandatory') apache_config_user = mib_table_column((1, 3, 6, 1, 4, 1, 546, 16, 3, 10, 1, 5), display_string()).setMaxAccess('readonly') if mibBuilder.loadTexts: apacheConfigUser.setStatus('mandatory') apache_config_group = mib_table_column((1, 3, 6, 1, 4, 1, 546, 16, 3, 10, 1, 6), display_string()).setMaxAccess('readonly') if mibBuilder.loadTexts: apacheConfigGroup.setStatus('mandatory') apache_config_hostname = mib_table_column((1, 3, 6, 1, 4, 1, 546, 16, 3, 10, 1, 7), display_string()).setMaxAccess('readonly') if mibBuilder.loadTexts: apacheConfigHostname.setStatus('mandatory') apache_config_start_procs = mib_table_column((1, 3, 6, 1, 4, 1, 546, 16, 3, 10, 1, 8), integer32()).setMaxAccess('readonly') if mibBuilder.loadTexts: apacheConfigStartProcs.setStatus('mandatory') apache_config_min_idle_procs = mib_table_column((1, 3, 6, 1, 4, 1, 546, 16, 3, 10, 1, 9), integer32()).setMaxAccess('readonly') if mibBuilder.loadTexts: apacheConfigMinIdleProcs.setStatus('mandatory') apache_config_max_idle_procs = mib_table_column((1, 3, 6, 1, 4, 1, 546, 16, 3, 10, 1, 10), integer32()).setMaxAccess('readonly') if mibBuilder.loadTexts: apacheConfigMaxIdleProcs.setStatus('mandatory') apache_config_max_procs = mib_table_column((1, 3, 6, 1, 4, 1, 546, 16, 3, 10, 1, 11), integer32()).setMaxAccess('readonly') if mibBuilder.loadTexts: apacheConfigMaxProcs.setStatus('mandatory') apache_config_requests_max_per_child = mib_table_column((1, 3, 6, 1, 4, 1, 546, 16, 3, 10, 1, 12), integer32()).setMaxAccess('readonly') if mibBuilder.loadTexts: apacheConfigRequestsMaxPerChild.setStatus('mandatory') apache_config_requests_keep_alive = mib_table_column((1, 3, 6, 1, 4, 1, 546, 16, 3, 10, 1, 13), integer32()).setMaxAccess('readonly') if mibBuilder.loadTexts: apacheConfigRequestsKeepAlive.setStatus('mandatory') apache_config_requests_max_per_conn = mib_table_column((1, 3, 6, 1, 4, 1, 546, 16, 3, 10, 1, 14), integer32()).setMaxAccess('readonly') if mibBuilder.loadTexts: apacheConfigRequestsMaxPerConn.setStatus('mandatory') apache_config_threads_per_child = mib_table_column((1, 3, 6, 1, 4, 1, 546, 16, 3, 10, 1, 15), integer32()).setMaxAccess('readonly') if mibBuilder.loadTexts: apacheConfigThreadsPerChild.setStatus('mandatory') apache_config_connection_timeout = mib_table_column((1, 3, 6, 1, 4, 1, 546, 16, 3, 10, 1, 16), integer32()).setMaxAccess('readonly') if mibBuilder.loadTexts: apacheConfigConnectionTimeout.setStatus('mandatory') apache_config_keep_alive_timeout = mib_table_column((1, 3, 6, 1, 4, 1, 546, 16, 3, 10, 1, 17), integer32()).setMaxAccess('readonly') if mibBuilder.loadTexts: apacheConfigKeepAliveTimeout.setStatus('mandatory') apache_config_server_root = mib_table_column((1, 3, 6, 1, 4, 1, 546, 16, 3, 10, 1, 18), display_string()).setMaxAccess('readonly') if mibBuilder.loadTexts: apacheConfigServerRoot.setStatus('mandatory') apache_config_config_file = mib_table_column((1, 3, 6, 1, 4, 1, 546, 16, 3, 10, 1, 19), display_string()).setMaxAccess('readonly') if mibBuilder.loadTexts: apacheConfigConfigFile.setStatus('mandatory') apache_config_pid_file = mib_table_column((1, 3, 6, 1, 4, 1, 546, 16, 3, 10, 1, 20), display_string()).setMaxAccess('readonly') if mibBuilder.loadTexts: apacheConfigPIDFile.setStatus('mandatory') apache_config_scoreboard_file = mib_table_column((1, 3, 6, 1, 4, 1, 546, 16, 3, 10, 1, 21), display_string()).setMaxAccess('readonly') if mibBuilder.loadTexts: apacheConfigScoreboardFile.setStatus('mandatory') apache_config_document_root = mib_table_column((1, 3, 6, 1, 4, 1, 546, 16, 3, 10, 1, 22), display_string()).setMaxAccess('readonly') if mibBuilder.loadTexts: apacheConfigDocumentRoot.setStatus('mandatory') apache_config_access_log_file = mib_table_column((1, 3, 6, 1, 4, 1, 546, 16, 3, 10, 1, 23), display_string()).setMaxAccess('readonly') if mibBuilder.loadTexts: apacheConfigAccessLogFile.setStatus('mandatory') apache_config_error_log_file = mib_table_column((1, 3, 6, 1, 4, 1, 546, 16, 3, 10, 1, 24), display_string()).setMaxAccess('readonly') if mibBuilder.loadTexts: apacheConfigErrorLogFile.setStatus('mandatory') apache_config_script_log_file = mib_table_column((1, 3, 6, 1, 4, 1, 546, 16, 3, 10, 1, 25), display_string()).setMaxAccess('readonly') if mibBuilder.loadTexts: apacheConfigScriptLogFile.setStatus('mandatory') apache_performance = mib_identifier((1, 3, 6, 1, 4, 1, 546, 16, 3, 11)) apache_footprint_table = mib_table((1, 3, 6, 1, 4, 1, 546, 16, 3, 11, 1)) if mibBuilder.loadTexts: apacheFootprintTable.setStatus('mandatory') apache_footprint_entry = mib_table_row((1, 3, 6, 1, 4, 1, 546, 16, 3, 11, 1, 1)).setIndexNames((0, 'EMPIRE-APACHEMOD', 'apacheFootprintPort')) if mibBuilder.loadTexts: apacheFootprintEntry.setStatus('mandatory') apache_footprint_port = mib_table_column((1, 3, 6, 1, 4, 1, 546, 16, 3, 11, 1, 1, 1), integer32()).setMaxAccess('readonly') if mibBuilder.loadTexts: apacheFootprintPort.setStatus('mandatory') apache_footprint_cpu_time = mib_table_column((1, 3, 6, 1, 4, 1, 546, 16, 3, 11, 1, 1, 2), integer32()).setMaxAccess('readonly') if mibBuilder.loadTexts: apacheFootprintCPUTime.setStatus('mandatory') apache_footprint_percent_cpu = mib_table_column((1, 3, 6, 1, 4, 1, 546, 16, 3, 11, 1, 1, 3), integer32().subtype(subtypeSpec=value_range_constraint(0, 100))).setMaxAccess('readonly') if mibBuilder.loadTexts: apacheFootprintPercentCPU.setStatus('mandatory') apache_footprint_total_mem_size = mib_table_column((1, 3, 6, 1, 4, 1, 546, 16, 3, 11, 1, 1, 4), gauge32()).setMaxAccess('readonly') if mibBuilder.loadTexts: apacheFootprintTotalMEMSize.setStatus('mandatory') apache_footprint_total_rss = mib_table_column((1, 3, 6, 1, 4, 1, 546, 16, 3, 11, 1, 1, 5), gauge32()).setMaxAccess('readonly') if mibBuilder.loadTexts: apacheFootprintTotalRSS.setStatus('mandatory') apache_footprint_percent_mem = mib_table_column((1, 3, 6, 1, 4, 1, 546, 16, 3, 11, 1, 1, 6), integer32().subtype(subtypeSpec=value_range_constraint(0, 100))).setMaxAccess('readonly') if mibBuilder.loadTexts: apacheFootprintPercentMEM.setStatus('mandatory') apache_footprint_num_threads = mib_table_column((1, 3, 6, 1, 4, 1, 546, 16, 3, 11, 1, 1, 7), gauge32()).setMaxAccess('readonly') if mibBuilder.loadTexts: apacheFootprintNumThreads.setStatus('mandatory') apache_footprint_in_blks = mib_table_column((1, 3, 6, 1, 4, 1, 546, 16, 3, 11, 1, 1, 8), counter32()).setMaxAccess('readonly') if mibBuilder.loadTexts: apacheFootprintInBlks.setStatus('mandatory') apache_footprint_out_blks = mib_table_column((1, 3, 6, 1, 4, 1, 546, 16, 3, 11, 1, 1, 9), counter32()).setMaxAccess('readonly') if mibBuilder.loadTexts: apacheFootprintOutBlks.setStatus('mandatory') apache_footprint_msgs_sent = mib_table_column((1, 3, 6, 1, 4, 1, 546, 16, 3, 11, 1, 1, 10), counter32()).setMaxAccess('readonly') if mibBuilder.loadTexts: apacheFootprintMsgsSent.setStatus('mandatory') apache_footprint_msgs_recv = mib_table_column((1, 3, 6, 1, 4, 1, 546, 16, 3, 11, 1, 1, 11), counter32()).setMaxAccess('readonly') if mibBuilder.loadTexts: apacheFootprintMsgsRecv.setStatus('mandatory') apache_footprint_sys_calls = mib_table_column((1, 3, 6, 1, 4, 1, 546, 16, 3, 11, 1, 1, 12), counter32()).setMaxAccess('readonly') if mibBuilder.loadTexts: apacheFootprintSysCalls.setStatus('mandatory') apache_footprint_minor_pg_flts = mib_table_column((1, 3, 6, 1, 4, 1, 546, 16, 3, 11, 1, 1, 13), counter32()).setMaxAccess('readonly') if mibBuilder.loadTexts: apacheFootprintMinorPgFlts.setStatus('mandatory') apache_footprint_major_pg_flts = mib_table_column((1, 3, 6, 1, 4, 1, 546, 16, 3, 11, 1, 1, 14), counter32()).setMaxAccess('readonly') if mibBuilder.loadTexts: apacheFootprintMajorPgFlts.setStatus('mandatory') apache_footprint_num_swaps = mib_table_column((1, 3, 6, 1, 4, 1, 546, 16, 3, 11, 1, 1, 15), counter32()).setMaxAccess('readonly') if mibBuilder.loadTexts: apacheFootprintNumSwaps.setStatus('mandatory') apache_footprint_vol_ctx = mib_table_column((1, 3, 6, 1, 4, 1, 546, 16, 3, 11, 1, 1, 16), counter32()).setMaxAccess('readonly') if mibBuilder.loadTexts: apacheFootprintVolCtx.setStatus('mandatory') apache_footprint_invol_ctx = mib_table_column((1, 3, 6, 1, 4, 1, 546, 16, 3, 11, 1, 1, 17), counter32()).setMaxAccess('readonly') if mibBuilder.loadTexts: apacheFootprintInvolCtx.setStatus('mandatory') apache_footprint_total_log_size = mib_table_column((1, 3, 6, 1, 4, 1, 546, 16, 3, 11, 1, 1, 18), gauge32()).setMaxAccess('readonly') if mibBuilder.loadTexts: apacheFootprintTotalLogSize.setStatus('mandatory') apache_footprint_doc_size = mib_table_column((1, 3, 6, 1, 4, 1, 546, 16, 3, 11, 1, 1, 19), gauge32()).setMaxAccess('readonly') if mibBuilder.loadTexts: apacheFootprintDocSize.setStatus('mandatory') apache_footprint_total_disk_size = mib_table_column((1, 3, 6, 1, 4, 1, 546, 16, 3, 11, 1, 1, 20), gauge32()).setMaxAccess('readonly') if mibBuilder.loadTexts: apacheFootprintTotalDiskSize.setStatus('mandatory') apache_server_perf_table = mib_table((1, 3, 6, 1, 4, 1, 546, 16, 3, 11, 2)) if mibBuilder.loadTexts: apacheServerPerfTable.setStatus('mandatory') apache_server_perf_entry = mib_table_row((1, 3, 6, 1, 4, 1, 546, 16, 3, 11, 2, 1)).setIndexNames((0, 'EMPIRE-APACHEMOD', 'apacheServerPerfPort')) if mibBuilder.loadTexts: apacheServerPerfEntry.setStatus('mandatory') apache_server_perf_port = mib_table_column((1, 3, 6, 1, 4, 1, 546, 16, 3, 11, 2, 1, 1), integer32()).setMaxAccess('readonly') if mibBuilder.loadTexts: apacheServerPerfPort.setStatus('mandatory') apache_server_perf_uptime = mib_table_column((1, 3, 6, 1, 4, 1, 546, 16, 3, 11, 2, 1, 2), counter32()).setMaxAccess('readonly') if mibBuilder.loadTexts: apacheServerPerfUptime.setStatus('mandatory') apache_server_perf_total_accesses = mib_table_column((1, 3, 6, 1, 4, 1, 546, 16, 3, 11, 2, 1, 3), counter32()).setMaxAccess('readonly') if mibBuilder.loadTexts: apacheServerPerfTotalAccesses.setStatus('mandatory') apache_server_perf_total_traffic = mib_table_column((1, 3, 6, 1, 4, 1, 546, 16, 3, 11, 2, 1, 4), counter32()).setMaxAccess('readonly') if mibBuilder.loadTexts: apacheServerPerfTotalTraffic.setStatus('mandatory') apache_server_perf_current_users = mib_table_column((1, 3, 6, 1, 4, 1, 546, 16, 3, 11, 2, 1, 5), gauge32()).setMaxAccess('readonly') if mibBuilder.loadTexts: apacheServerPerfCurrentUsers.setStatus('mandatory') apache_server_perf_current_idle_procs = mib_table_column((1, 3, 6, 1, 4, 1, 546, 16, 3, 11, 2, 1, 6), gauge32()).setMaxAccess('readonly') if mibBuilder.loadTexts: apacheServerPerfCurrentIdleProcs.setStatus('mandatory') apache_server_perf_current_startup_procs = mib_table_column((1, 3, 6, 1, 4, 1, 546, 16, 3, 11, 2, 1, 7), gauge32()).setMaxAccess('readonly') if mibBuilder.loadTexts: apacheServerPerfCurrentStartupProcs.setStatus('mandatory') apache_server_perf_current_read_procs = mib_table_column((1, 3, 6, 1, 4, 1, 546, 16, 3, 11, 2, 1, 8), gauge32()).setMaxAccess('readonly') if mibBuilder.loadTexts: apacheServerPerfCurrentReadProcs.setStatus('mandatory') apache_server_perf_current_reply_procs = mib_table_column((1, 3, 6, 1, 4, 1, 546, 16, 3, 11, 2, 1, 9), gauge32()).setMaxAccess('readonly') if mibBuilder.loadTexts: apacheServerPerfCurrentReplyProcs.setStatus('mandatory') apache_server_perf_current_keep_alive_procs = mib_table_column((1, 3, 6, 1, 4, 1, 546, 16, 3, 11, 2, 1, 10), gauge32()).setMaxAccess('readonly') if mibBuilder.loadTexts: apacheServerPerfCurrentKeepAliveProcs.setStatus('mandatory') apache_server_perf_current_dns_procs = mib_table_column((1, 3, 6, 1, 4, 1, 546, 16, 3, 11, 2, 1, 11), gauge32()).setMaxAccess('readonly') if mibBuilder.loadTexts: apacheServerPerfCurrentDNSProcs.setStatus('mandatory') apache_server_perf_current_logging_procs = mib_table_column((1, 3, 6, 1, 4, 1, 546, 16, 3, 11, 2, 1, 12), gauge32()).setMaxAccess('readonly') if mibBuilder.loadTexts: apacheServerPerfCurrentLoggingProcs.setStatus('mandatory') apache_server_perf_current_finishing_procs = mib_table_column((1, 3, 6, 1, 4, 1, 546, 16, 3, 11, 2, 1, 13), gauge32()).setMaxAccess('readonly') if mibBuilder.loadTexts: apacheServerPerfCurrentFinishingProcs.setStatus('mandatory') apache_server_perf_current_total_procs = mib_table_column((1, 3, 6, 1, 4, 1, 546, 16, 3, 11, 2, 1, 14), gauge32()).setMaxAccess('readonly') if mibBuilder.loadTexts: apacheServerPerfCurrentTotalProcs.setStatus('mandatory') apache_server_perf_current_busy_procs = mib_table_column((1, 3, 6, 1, 4, 1, 546, 16, 3, 11, 2, 1, 15), gauge32()).setMaxAccess('readonly') if mibBuilder.loadTexts: apacheServerPerfCurrentBusyProcs.setStatus('mandatory') mibBuilder.exportSymbols('EMPIRE-APACHEMOD', apacheServerPerfUptime=apacheServerPerfUptime, apacheConfigPort=apacheConfigPort, apacheConfigRequestsKeepAlive=apacheConfigRequestsKeepAlive, apacheFootprintEntry=apacheFootprintEntry, apacheFootprintVolCtx=apacheFootprintVolCtx, apacheFootprintPort=apacheFootprintPort, apacheFootprintSysCalls=apacheFootprintSysCalls, apacheFootprintInvolCtx=apacheFootprintInvolCtx, apacheFootprintTotalRSS=apacheFootprintTotalRSS, apacheServerPerfCurrentFinishingProcs=apacheServerPerfCurrentFinishingProcs, apacheConfigVersion=apacheConfigVersion, apacheConfigRequestsMaxPerConn=apacheConfigRequestsMaxPerConn, apacheConfigAccessLogFile=apacheConfigAccessLogFile, apacheConfigConfigFile=apacheConfigConfigFile, apacheConfigDocumentRoot=apacheConfigDocumentRoot, apacheServerPerfCurrentDNSProcs=apacheServerPerfCurrentDNSProcs, apacheFootprintNumSwaps=apacheFootprintNumSwaps, apacheFootprintPercentCPU=apacheFootprintPercentCPU, apacheFootprintTotalLogSize=apacheFootprintTotalLogSize, apacheServerPerfPort=apacheServerPerfPort, apacheFootprintInBlks=apacheFootprintInBlks, apacheConfigScriptLogFile=apacheConfigScriptLogFile, apachePerformance=apachePerformance, apacheConfigThreadsPerChild=apacheConfigThreadsPerChild, apacheFootprintTotalDiskSize=apacheFootprintTotalDiskSize, apacheSrv=apacheSrv, apacheConfigRunMode=apacheConfigRunMode, apacheServerPerfCurrentReplyProcs=apacheServerPerfCurrentReplyProcs, apacheServerPerfEntry=apacheServerPerfEntry, apacheServerPerfCurrentBusyProcs=apacheServerPerfCurrentBusyProcs, apacheConfigUser=apacheConfigUser, apacheFootprintTotalMEMSize=apacheFootprintTotalMEMSize, apacheServerPerfTotalAccesses=apacheServerPerfTotalAccesses, apacheServerPerfTotalTraffic=apacheServerPerfTotalTraffic, apacheConfigRequestsMaxPerChild=apacheConfigRequestsMaxPerChild, empire=empire, apacheFootprintCPUTime=apacheFootprintCPUTime, apacheModVersion=apacheModVersion, apacheConfigEntry=apacheConfigEntry, apacheConfigStartProcs=apacheConfigStartProcs, apacheConfigHostname=apacheConfigHostname, apacheConfigErrorLogFile=apacheConfigErrorLogFile, apacheFootprintNumThreads=apacheFootprintNumThreads, apacheServerPerfCurrentReadProcs=apacheServerPerfCurrentReadProcs, apacheServerPerfCurrentIdleProcs=apacheServerPerfCurrentIdleProcs, apacheFootprintTable=apacheFootprintTable, apacheConfigConnectionTimeout=apacheConfigConnectionTimeout, apacheFootprintMsgsSent=apacheFootprintMsgsSent, apacheConfigTable=apacheConfigTable, apacheModMode=apacheModMode, apacheConfigGroup=apacheConfigGroup, applications=applications, apacheServerPerfCurrentLoggingProcs=apacheServerPerfCurrentLoggingProcs, apacheConfigMaxIdleProcs=apacheConfigMaxIdleProcs, apacheConfigMaxProcs=apacheConfigMaxProcs, apacheFootprintPercentMEM=apacheFootprintPercentMEM, apacheServerPerfTable=apacheServerPerfTable, apacheConfigPID=apacheConfigPID, apacheConfigServerRoot=apacheConfigServerRoot, apacheServerPerfCurrentKeepAliveProcs=apacheServerPerfCurrentKeepAliveProcs, apacheFootprintMinorPgFlts=apacheFootprintMinorPgFlts, apacheFootprintDocSize=apacheFootprintDocSize, apacheServerPerfCurrentStartupProcs=apacheServerPerfCurrentStartupProcs, apacheConfigMinIdleProcs=apacheConfigMinIdleProcs, apacheFootprintMsgsRecv=apacheFootprintMsgsRecv, apacheServerPerfCurrentUsers=apacheServerPerfCurrentUsers, apacheServerPerfCurrentTotalProcs=apacheServerPerfCurrentTotalProcs, apacheConfigKeepAliveTimeout=apacheConfigKeepAliveTimeout, apacheConfigScoreboardFile=apacheConfigScoreboardFile, apacheFootprintMajorPgFlts=apacheFootprintMajorPgFlts, apacheFootprintOutBlks=apacheFootprintOutBlks, apacheConfigPIDFile=apacheConfigPIDFile)
for number in range(4): print(f"The for loop has run for {number} time(s)") print("\n") for number in range(10, 15): print(f"The for loop has run for {number} time(s)") print("\n") for number in range(20, 30, 2): print(f"The for loop has run for {number} time(s)") print("\n") status = False for number in range(1, 5): print(f"Attempt {number}") if status: print("Success!") break else: print("Boom!") else: print("No more chance")
for number in range(4): print(f'The for loop has run for {number} time(s)') print('\n') for number in range(10, 15): print(f'The for loop has run for {number} time(s)') print('\n') for number in range(20, 30, 2): print(f'The for loop has run for {number} time(s)') print('\n') status = False for number in range(1, 5): print(f'Attempt {number}') if status: print('Success!') break else: print('Boom!') else: print('No more chance')
""" A sliding window is an abstract concept commonly used in array/string problems. A window is a range of elements in the array/string which usually defined by the start and end indices, i.e. [i, j)[i,j) (left-closed, right-open). A sliding window is a window "slides" its two boundaries to the certain direction. """ class Solution: def lengthOfLongestSubstring(self, s: str) -> int: n = len(s) hs = set() ans, i, j = 0, 0, 0 while i < n and j < n: if s[j] not in hs: hs.add(s[j]) j += 1 ans = max(ans, j-i) else: hs.remove(s[i]) i += 1 return ans class Solution: def lengthOfLongestSubstring(self, s: str) -> int: n = len(s) hd = {} ans,j = 0,0 for i in range(len(s)): if s[i] in hd: j = max(hd[s[i]], j) ans = max(ans, i-j+1) hd[s[i]] = i+1 return ans
""" A sliding window is an abstract concept commonly used in array/string problems. A window is a range of elements in the array/string which usually defined by the start and end indices, i.e. [i, j)[i,j) (left-closed, right-open). A sliding window is a window "slides" its two boundaries to the certain direction. """ class Solution: def length_of_longest_substring(self, s: str) -> int: n = len(s) hs = set() (ans, i, j) = (0, 0, 0) while i < n and j < n: if s[j] not in hs: hs.add(s[j]) j += 1 ans = max(ans, j - i) else: hs.remove(s[i]) i += 1 return ans class Solution: def length_of_longest_substring(self, s: str) -> int: n = len(s) hd = {} (ans, j) = (0, 0) for i in range(len(s)): if s[i] in hd: j = max(hd[s[i]], j) ans = max(ans, i - j + 1) hd[s[i]] = i + 1 return ans
def up(config, database): database.execute('ALTER TABLE ONLY mapped_courses ALTER COLUMN registration_section SET DATA TYPE character varying(255) USING registration_section::varchar(255)') database.execute('ALTER TABLE ONLY mapped_courses ALTER COLUMN mapped_section SET DATA TYPE character varying(255) USING mapped_section::varchar(255)') database.execute('ALTER TABLE ONLY courses_users ALTER COLUMN registration_section SET DATA TYPE character varying(255) USING registration_section::varchar(255)') def down(config, database): database.execute('ALTER TABLE ONLY mapped_courses ALTER COLUMN registration_section SET DATA TYPE integer USING registration_section::integer') database.execute('ALTER TABLE ONLY mapped_courses ALTER COLUMN mapped_section SET DATA TYPE integer USING mapped_section::integer') database.execute('ALTER TABLE ONLY courses_users ALTER COLUMN registration_section SET DATA TYPE integer USING registration_section::integer')
def up(config, database): database.execute('ALTER TABLE ONLY mapped_courses ALTER COLUMN registration_section SET DATA TYPE character varying(255) USING registration_section::varchar(255)') database.execute('ALTER TABLE ONLY mapped_courses ALTER COLUMN mapped_section SET DATA TYPE character varying(255) USING mapped_section::varchar(255)') database.execute('ALTER TABLE ONLY courses_users ALTER COLUMN registration_section SET DATA TYPE character varying(255) USING registration_section::varchar(255)') def down(config, database): database.execute('ALTER TABLE ONLY mapped_courses ALTER COLUMN registration_section SET DATA TYPE integer USING registration_section::integer') database.execute('ALTER TABLE ONLY mapped_courses ALTER COLUMN mapped_section SET DATA TYPE integer USING mapped_section::integer') database.execute('ALTER TABLE ONLY courses_users ALTER COLUMN registration_section SET DATA TYPE integer USING registration_section::integer')
# Write a function that always returns 5 # Sounds easy right? # Just bear in mind that you can't use # any of the following characters: 0123456789*+-/ def unusual_five(): return len('trump')
def unusual_five(): return len('trump')
# code -> {"name": "creaturename", # "desc": "description",} CREATURES = {} SOLO_CREATURES = {} GROUP_CREATURES = {} with open("creatures.csv") as f: lines = f.readlines() for line in lines: if line.strip() == "": continue parts = line.strip().split(",") code = int(''.join(parts[1:5])) if parts[6].strip() == "group": GROUP_CREATURES[code] = {"name": parts[0], "desc": parts[5]} else: SOLO_CREATURES[code] = {"name": parts[0], "desc": parts[5]} CREATURES[code] = {"name": parts[0], "desc": parts[5]}
creatures = {} solo_creatures = {} group_creatures = {} with open('creatures.csv') as f: lines = f.readlines() for line in lines: if line.strip() == '': continue parts = line.strip().split(',') code = int(''.join(parts[1:5])) if parts[6].strip() == 'group': GROUP_CREATURES[code] = {'name': parts[0], 'desc': parts[5]} else: SOLO_CREATURES[code] = {'name': parts[0], 'desc': parts[5]} CREATURES[code] = {'name': parts[0], 'desc': parts[5]}
# Copyright 2020 VMware, Inc. # SPDX-License-Identifier: Apache-2.0 PATH = "path" DESCRIPTOR = "descriptor" BEFORE = "@before" AFTER = "@after" DESCRIPTORS = "@descriptors" PARAMS = "@params" POINTER_SEPARATOR = "/" WS_CURRENT_POINTER = "__crp" WS_ENV = "__env" WS_OUTPUT = "_output"
path = 'path' descriptor = 'descriptor' before = '@before' after = '@after' descriptors = '@descriptors' params = '@params' pointer_separator = '/' ws_current_pointer = '__crp' ws_env = '__env' ws_output = '_output'
kazu = [3, 7, 0, 1, 2, 2] shin_kazu = 1 for ex in range(1, len(kazu), 2): shin_kazu *= kazu[ex] print(shin_kazu)
kazu = [3, 7, 0, 1, 2, 2] shin_kazu = 1 for ex in range(1, len(kazu), 2): shin_kazu *= kazu[ex] print(shin_kazu)
def group(list): groups = [] curr = [] for elem in list: if elem in curr: curr.append(elem) elif len(curr) != 0: groups.append(curr) curr = [] curr.append(elem) elif len(curr) == 0: curr.append(elem) if curr != []: groups.append(curr) return groups def main(): print(group([1, 1, 1, 2, 3, 1, 1])) # Expected output : [[1, 1, 1], [2], [3], [1, 1]] print(group([1, 2, 1, 2, 3, 3])) # Expected output : [[1], [2], [1], [2], [3, 3]] if __name__ == '__main__': main()
def group(list): groups = [] curr = [] for elem in list: if elem in curr: curr.append(elem) elif len(curr) != 0: groups.append(curr) curr = [] curr.append(elem) elif len(curr) == 0: curr.append(elem) if curr != []: groups.append(curr) return groups def main(): print(group([1, 1, 1, 2, 3, 1, 1])) print(group([1, 2, 1, 2, 3, 3])) if __name__ == '__main__': main()
# -*- coding: utf-8 -*- """ Created on Thu Sep 23 14:04:41 2021 @author: Experiment4K """ class Euro2404(object): def __init__(self,inst): self.inst = inst self.inst.serial.timeout = 2 self.inst.serial.baudrate = 9600 def data_info(self,devicenumber):#returns the data keys of the device self.devicenumber = devicenumber self.data_keys = [str(devicenumber) + '_Euro2404_T'] return(self.data_keys) def read_info(self,devicenumber): self.read_keys = [str(devicenumber) + '_Euro2404_T'] return(self.read_keys) def write_info(self,devicenumber): self.write_keys = [] return(self.write_keys) def write_pattern(self,devicenumber,write_key): return([]) def floatHandling(self,text): try: f = str(float(text)) except: try: f = str(float(text.replace(',','.'))) except: f = False return(f) def Write(self,Key,L): None def Read(self,Key): T = self.inst.read_register(1) return([(Key,T),])
""" Created on Thu Sep 23 14:04:41 2021 @author: Experiment4K """ class Euro2404(object): def __init__(self, inst): self.inst = inst self.inst.serial.timeout = 2 self.inst.serial.baudrate = 9600 def data_info(self, devicenumber): self.devicenumber = devicenumber self.data_keys = [str(devicenumber) + '_Euro2404_T'] return self.data_keys def read_info(self, devicenumber): self.read_keys = [str(devicenumber) + '_Euro2404_T'] return self.read_keys def write_info(self, devicenumber): self.write_keys = [] return self.write_keys def write_pattern(self, devicenumber, write_key): return [] def float_handling(self, text): try: f = str(float(text)) except: try: f = str(float(text.replace(',', '.'))) except: f = False return f def write(self, Key, L): None def read(self, Key): t = self.inst.read_register(1) return [(Key, T)]
# python3 class Query: def __init__(self, query): self.type = query[0] if self.type == 'check': self.ind = int(query[1]) else: self.s = query[1] class QueryProcessor: _multiplier = 263 _prime = 1000000007 def __init__(self, bucket_count): self.bucket_count = bucket_count self.elems = [None] * bucket_count def _hash_func(self, s): ans = 0 for c in reversed(s): ans = (ans * self._multiplier + ord(c)) % self._prime return ans % self.bucket_count def write_search_result(self, was_found): print('yes' if was_found else 'no') def write_chain(self, chain): if chain is not None: print(' '.join(chain)) else: print(' ') def read_query(self): return Query(input().split()) def process_query(self, query): if query.type == "check": self.write_chain(self.elems[query.ind]) else: hashKey = self._hash_func(query.s) if query.type == 'add': if self.elems[hashKey] == None: self.elems[hashKey] = [query.s] else: if query.s not in self.elems[hashKey]: self.elems[hashKey].insert(0, query.s) elif query.type == 'find': if self.elems[hashKey] is not None and query.s in self.elems[hashKey]: self.write_search_result(True) else: self.write_search_result(False) elif query.type == 'del': if self.elems[hashKey] is not None and query.s in self.elems[hashKey]: self.elems[hashKey].remove(query.s) if len(self.elems[hashKey]) == 0: self.elems[hashKey] = None def process_queries(self): n = int(input()) for i in range(n): self.process_query(self.read_query()) if __name__ == '__main__': bucket_count = int(input()) proc = QueryProcessor(bucket_count) proc.process_queries()
class Query: def __init__(self, query): self.type = query[0] if self.type == 'check': self.ind = int(query[1]) else: self.s = query[1] class Queryprocessor: _multiplier = 263 _prime = 1000000007 def __init__(self, bucket_count): self.bucket_count = bucket_count self.elems = [None] * bucket_count def _hash_func(self, s): ans = 0 for c in reversed(s): ans = (ans * self._multiplier + ord(c)) % self._prime return ans % self.bucket_count def write_search_result(self, was_found): print('yes' if was_found else 'no') def write_chain(self, chain): if chain is not None: print(' '.join(chain)) else: print(' ') def read_query(self): return query(input().split()) def process_query(self, query): if query.type == 'check': self.write_chain(self.elems[query.ind]) else: hash_key = self._hash_func(query.s) if query.type == 'add': if self.elems[hashKey] == None: self.elems[hashKey] = [query.s] elif query.s not in self.elems[hashKey]: self.elems[hashKey].insert(0, query.s) elif query.type == 'find': if self.elems[hashKey] is not None and query.s in self.elems[hashKey]: self.write_search_result(True) else: self.write_search_result(False) elif query.type == 'del': if self.elems[hashKey] is not None and query.s in self.elems[hashKey]: self.elems[hashKey].remove(query.s) if len(self.elems[hashKey]) == 0: self.elems[hashKey] = None def process_queries(self): n = int(input()) for i in range(n): self.process_query(self.read_query()) if __name__ == '__main__': bucket_count = int(input()) proc = query_processor(bucket_count) proc.process_queries()
class Solution: def solve(self, s): s = s.replace('(',' ( ').replace(')',' ) ') tokens = s.split() stack = [] for token in tokens: if token == 'true': if stack and stack[-1] in ['or','and']: if stack[-1] == 'or': stack.pop() stack.append(stack.pop() or True) else: stack.pop() stack.append(stack.pop() and True) else: stack.append(True) elif token == 'false': if stack and stack[-1] in ['or','and']: if stack[-1] == 'or': stack.pop() stack.append(stack.pop() or False) else: stack.pop() stack.append(stack.pop() and False) else: stack.append(False) elif token == '(': stack.append('(') elif token == ')': val = stack.pop() stack.pop() if stack and stack[-1] in ['or','and']: if stack[-1] == 'or': stack.pop() stack.append(stack.pop() or val) else: stack.pop() stack.append(stack.pop() and val) else: stack.append(val) elif token == 'or': stack.append('or') elif token == 'and': stack.append('and') return stack[0]
class Solution: def solve(self, s): s = s.replace('(', ' ( ').replace(')', ' ) ') tokens = s.split() stack = [] for token in tokens: if token == 'true': if stack and stack[-1] in ['or', 'and']: if stack[-1] == 'or': stack.pop() stack.append(stack.pop() or True) else: stack.pop() stack.append(stack.pop() and True) else: stack.append(True) elif token == 'false': if stack and stack[-1] in ['or', 'and']: if stack[-1] == 'or': stack.pop() stack.append(stack.pop() or False) else: stack.pop() stack.append(stack.pop() and False) else: stack.append(False) elif token == '(': stack.append('(') elif token == ')': val = stack.pop() stack.pop() if stack and stack[-1] in ['or', 'and']: if stack[-1] == 'or': stack.pop() stack.append(stack.pop() or val) else: stack.pop() stack.append(stack.pop() and val) else: stack.append(val) elif token == 'or': stack.append('or') elif token == 'and': stack.append('and') return stack[0]
""" Algorithmic intervensions for satisfying fairness criteria. There are three families of techniques: 1. **Pre-processing** - Adjust features in the dataset. 2. **In-processing** - Adjust the learning algorithm. 3. **Post-processing** - Adjust the learned classifier. """
""" Algorithmic intervensions for satisfying fairness criteria. There are three families of techniques: 1. **Pre-processing** - Adjust features in the dataset. 2. **In-processing** - Adjust the learning algorithm. 3. **Post-processing** - Adjust the learned classifier. """
checkpoint_config = dict(interval=20) # yapf:disable log_config = dict( interval=5, hooks=[ dict(type='TextLoggerHook'), dict(type='TensorboardLoggerHook') ]) # yapf:enable custom_hooks = [dict(type='NumClassCheckHook')] dist_params = dict(backend='nccl') log_level = 'INFO' load_from = None resume_from = None workflow = [('train', 1)] class_name = ['0'] # model settings model = dict( type='YOLOV4', backbone=dict( type='YOLOV4Backbone' ), neck=dict( type='YOLOV4Neck', in_channels=[1024, 512, 256], out_channels=[512, 256, 128]), bbox_head=dict( type='YOLOV4Head', num_classes=1, in_channels=[512, 256, 128], out_channels=[1024, 512, 256], anchor_generator=dict( type='YOLOAnchorGenerator', # base_sizes=[[(116, 90), (156, 198), (373, 326)], # [(30, 61), (62, 45), (59, 119)], # [(10, 13), (16, 30), (33, 23)]], # base_sizes=[ # [[30, 28], [33, 17], [21, 25]], # [[24, 17], [18, 17], [14, 21]], # [[20, 12], [12, 15], [11, 11]]], base_sizes=[ [[2*30, 2*28], [2*33, 2*17], [2*21, 2*25]], [[2*24, 2*17], [2*18, 2*17], [2*14, 2*21]], [[2*20, 2*12], [2*12, 2*15], [2*11, 2*11]]], strides=[32, 16, 8]), bbox_coder=dict(type='YOLOBBoxCoder'), featmap_strides=[32, 16, 8], loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0, reduction='sum'), loss_conf=dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0, reduction='sum'), loss_xy=dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=2.0, reduction='sum'), loss_wh=dict(type='MSELoss', loss_weight=2.0, reduction='sum')), # training and testing settings train_cfg=dict( assigner=dict( type='GridAssigner', pos_iou_thr=0.3, neg_iou_thr=0.3, min_pos_iou=0)), test_cfg=dict( nms_pre=1000, min_bbox_size=0, score_thr=0.05, conf_thr=0.005, nms=dict(type='nms', iou_threshold=0.4), max_per_img=100)) # dataset settings dataset_type = 'MyCocoDataset' # data_root = '/Users/kyanchen/Code/mmdetection/data/multi_label' # data_root = r'M:\Tiny_Ship\20211214_All_P_Slice_Data' data_root = '/data/kyanchen/det/data/Tiny_P' img_norm_cfg = dict(mean=[52.27434974492982, 69.82640643452488, 79.01744958336889], std=[2.7533898592345842, 2.634773617140497, 2.172352333590293], to_rgb=True) train_pipeline = [ dict(type='LoadImageFromFile', to_float32=True), dict(type='LoadAnnotations', with_bbox=True), dict( type='Expand', mean=img_norm_cfg['mean'], to_rgb=img_norm_cfg['to_rgb'], ratio_range=(1, 1.2)), dict( type='MinIoURandomCrop', min_ious=(0.01, 0.05, 0.1), min_crop_size=0.7), # dict(type='Resize', img_scale=[(320, 320), (608, 608)], keep_ratio=True), dict(type='Resize', img_scale=[(256, 256)], keep_ratio=False), dict(type='RandomFlip', flip_ratio=0.5, direction=['horizontal', 'vertical']), dict(type='PhotoMetricDistortion', brightness_delta=20, contrast_range=(0.7, 1.3), saturation_range=(0.7, 1.3), hue_delta=15 ), dict(type='Normalize', **img_norm_cfg), # dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'], meta_keys=('filename', 'ori_filename', 'ori_shape', 'img_shape', 'pad_shape', 'scale_factor', 'img_norm_cfg') ) ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(256, 256), flip=False, transforms=[ dict(type='Resize', keep_ratio=False), # dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), # dict(type='Pad', size_divisor=32), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']) ]) ] data = dict( samples_per_gpu=100, workers_per_gpu=8, train=dict( type=dataset_type, ann_file='../data/tiny_ship/tiny_train.json', img_prefix=data_root+'/train', classes=class_name, pipeline=train_pipeline), val=dict( type=dataset_type, ann_file='../data/tiny_ship/tiny_val.json', img_prefix=data_root+'/val', classes=class_name, pipeline=test_pipeline), test=dict( type=dataset_type, ann_file='../data/tiny_ship/tiny_test.json', classes=class_name, img_prefix=data_root+'/test', pipeline=test_pipeline)) # optimizer # AdamW # optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005) optimizer = dict(type='AdamW', lr=0.01, betas=(0.9, 0.999), eps=1e-8, weight_decay=1e-2) # optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2)) optimizer_config = dict(grad_clip=None) # learning policy # lr_config = dict( # policy='PolyLrUpdaterHook', # warmup='linear', # warmup_iters=2000, # same as burn-in in darknet # warmup_ratio=0.1, # step=[218, 246]) lr_config = dict( policy='Poly', power=0.9, min_lr=0.00001, by_epoch=True, warmup='linear', warmup_iters=15, warmup_ratio=0.1, warmup_by_epoch=True) # runtime settings runner = dict(type='EpochBasedRunner', max_epochs=300) evaluation = dict(interval=1, metric=['bbox'], mode='eval', areaRng=[0, 20, 200]) test = dict(interval=2, metric=['bbox'], mode='test', areaRng=[0, 20, 200])
checkpoint_config = dict(interval=20) log_config = dict(interval=5, hooks=[dict(type='TextLoggerHook'), dict(type='TensorboardLoggerHook')]) custom_hooks = [dict(type='NumClassCheckHook')] dist_params = dict(backend='nccl') log_level = 'INFO' load_from = None resume_from = None workflow = [('train', 1)] class_name = ['0'] model = dict(type='YOLOV4', backbone=dict(type='YOLOV4Backbone'), neck=dict(type='YOLOV4Neck', in_channels=[1024, 512, 256], out_channels=[512, 256, 128]), bbox_head=dict(type='YOLOV4Head', num_classes=1, in_channels=[512, 256, 128], out_channels=[1024, 512, 256], anchor_generator=dict(type='YOLOAnchorGenerator', base_sizes=[[[2 * 30, 2 * 28], [2 * 33, 2 * 17], [2 * 21, 2 * 25]], [[2 * 24, 2 * 17], [2 * 18, 2 * 17], [2 * 14, 2 * 21]], [[2 * 20, 2 * 12], [2 * 12, 2 * 15], [2 * 11, 2 * 11]]], strides=[32, 16, 8]), bbox_coder=dict(type='YOLOBBoxCoder'), featmap_strides=[32, 16, 8], loss_cls=dict(type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0, reduction='sum'), loss_conf=dict(type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0, reduction='sum'), loss_xy=dict(type='CrossEntropyLoss', use_sigmoid=True, loss_weight=2.0, reduction='sum'), loss_wh=dict(type='MSELoss', loss_weight=2.0, reduction='sum')), train_cfg=dict(assigner=dict(type='GridAssigner', pos_iou_thr=0.3, neg_iou_thr=0.3, min_pos_iou=0)), test_cfg=dict(nms_pre=1000, min_bbox_size=0, score_thr=0.05, conf_thr=0.005, nms=dict(type='nms', iou_threshold=0.4), max_per_img=100)) dataset_type = 'MyCocoDataset' data_root = '/data/kyanchen/det/data/Tiny_P' img_norm_cfg = dict(mean=[52.27434974492982, 69.82640643452488, 79.01744958336889], std=[2.7533898592345842, 2.634773617140497, 2.172352333590293], to_rgb=True) train_pipeline = [dict(type='LoadImageFromFile', to_float32=True), dict(type='LoadAnnotations', with_bbox=True), dict(type='Expand', mean=img_norm_cfg['mean'], to_rgb=img_norm_cfg['to_rgb'], ratio_range=(1, 1.2)), dict(type='MinIoURandomCrop', min_ious=(0.01, 0.05, 0.1), min_crop_size=0.7), dict(type='Resize', img_scale=[(256, 256)], keep_ratio=False), dict(type='RandomFlip', flip_ratio=0.5, direction=['horizontal', 'vertical']), dict(type='PhotoMetricDistortion', brightness_delta=20, contrast_range=(0.7, 1.3), saturation_range=(0.7, 1.3), hue_delta=15), dict(type='Normalize', **img_norm_cfg), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'], meta_keys=('filename', 'ori_filename', 'ori_shape', 'img_shape', 'pad_shape', 'scale_factor', 'img_norm_cfg'))] test_pipeline = [dict(type='LoadImageFromFile'), dict(type='MultiScaleFlipAug', img_scale=(256, 256), flip=False, transforms=[dict(type='Resize', keep_ratio=False), dict(type='Normalize', **img_norm_cfg), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img'])])] data = dict(samples_per_gpu=100, workers_per_gpu=8, train=dict(type=dataset_type, ann_file='../data/tiny_ship/tiny_train.json', img_prefix=data_root + '/train', classes=class_name, pipeline=train_pipeline), val=dict(type=dataset_type, ann_file='../data/tiny_ship/tiny_val.json', img_prefix=data_root + '/val', classes=class_name, pipeline=test_pipeline), test=dict(type=dataset_type, ann_file='../data/tiny_ship/tiny_test.json', classes=class_name, img_prefix=data_root + '/test', pipeline=test_pipeline)) optimizer = dict(type='AdamW', lr=0.01, betas=(0.9, 0.999), eps=1e-08, weight_decay=0.01) optimizer_config = dict(grad_clip=None) lr_config = dict(policy='Poly', power=0.9, min_lr=1e-05, by_epoch=True, warmup='linear', warmup_iters=15, warmup_ratio=0.1, warmup_by_epoch=True) runner = dict(type='EpochBasedRunner', max_epochs=300) evaluation = dict(interval=1, metric=['bbox'], mode='eval', areaRng=[0, 20, 200]) test = dict(interval=2, metric=['bbox'], mode='test', areaRng=[0, 20, 200])
#!/usr/bin/python3 # suggested time: 10 mins # INSTRUCTIONS: # # Give this class a constructor that prints "hi". # # Give this class a destructor that prints "bye". class Spinner: """ Defining a constructor""" def __init__(self): print("Hi") def __del__(self): print("Bye") Spinner()
class Spinner: """ Defining a constructor""" def __init__(self): print('Hi') def __del__(self): print('Bye') spinner()
######################## #### Initialisation #### ######################## input_ex1 = [] with open("inputs/day6_1.txt") as inputfile: input_ex1 = [int(i) for i in inputfile.readline().strip().split()] ######################## #### Part one #### ######################## def redistribute(memblock): memsize = len(memblock) tracker = memblock.index(max(memblock)) stack = memblock[tracker] memblock[tracker] = 0 tracker += 1 while stack > 0: memblock[tracker % memsize] += 1 stack -= 1 tracker += 1 return memblock # input_ex1 = [0, 2, 7, 0] found_distributions = [] found_distributions.append(input_ex1[::]) new_input = redistribute(input_ex1[::]) steps = 1 while not new_input in found_distributions: found_distributions.append(new_input) new_input = redistribute(new_input[::]) steps += 1 print("It takes {} cycles to enter an infinite loop.".format(steps)) ######################## #### Part two #### ######################## length_of_loop = len(found_distributions) - \ found_distributions.index(new_input) print("The infinite loop contains {} steps".format(length_of_loop))
input_ex1 = [] with open('inputs/day6_1.txt') as inputfile: input_ex1 = [int(i) for i in inputfile.readline().strip().split()] def redistribute(memblock): memsize = len(memblock) tracker = memblock.index(max(memblock)) stack = memblock[tracker] memblock[tracker] = 0 tracker += 1 while stack > 0: memblock[tracker % memsize] += 1 stack -= 1 tracker += 1 return memblock found_distributions = [] found_distributions.append(input_ex1[:]) new_input = redistribute(input_ex1[:]) steps = 1 while not new_input in found_distributions: found_distributions.append(new_input) new_input = redistribute(new_input[:]) steps += 1 print('It takes {} cycles to enter an infinite loop.'.format(steps)) length_of_loop = len(found_distributions) - found_distributions.index(new_input) print('The infinite loop contains {} steps'.format(length_of_loop))
''' Given a set of candidate numbers (candidates) (without duplicates) and a target number (target), find all unique combinations in candidates where the candidate numbers sums to target. The same repeated number may be chosen from candidates unlimited number of times. Note: All numbers (including target) will be positive integers. The solution set must not contain duplicate combinations. ''' class Solution(object): def combinationSum(self, candidates, target): """ :type candidates: List[int] :type target: int :rtype: List[List[int]] """ result = [] def recursive(candidates, target, currList, index): if target < 0: return if target == 0: result.append(currList) return for start in range(index, len(candidates)): recursive(candidates, target - candidates[start], currList + [candidates[start]], start) recursive(candidates, target, [], 0) return result
""" Given a set of candidate numbers (candidates) (without duplicates) and a target number (target), find all unique combinations in candidates where the candidate numbers sums to target. The same repeated number may be chosen from candidates unlimited number of times. Note: All numbers (including target) will be positive integers. The solution set must not contain duplicate combinations. """ class Solution(object): def combination_sum(self, candidates, target): """ :type candidates: List[int] :type target: int :rtype: List[List[int]] """ result = [] def recursive(candidates, target, currList, index): if target < 0: return if target == 0: result.append(currList) return for start in range(index, len(candidates)): recursive(candidates, target - candidates[start], currList + [candidates[start]], start) recursive(candidates, target, [], 0) return result
class Solution: def thousandSeparator(self, n: int) -> str: s = str(n)[::-1] return ".".join(s[i : i + 3] for i in range(0, len(s), 3))[::-1]
class Solution: def thousand_separator(self, n: int) -> str: s = str(n)[::-1] return '.'.join((s[i:i + 3] for i in range(0, len(s), 3)))[::-1]
class Solution: def titleToNumber(self, s: str) -> int: length = len(s) num = 0 for i in range(0, length): num += (ord(s[length-i-1]) - ord('A') + 1) * (26 ** i) return num
class Solution: def title_to_number(self, s: str) -> int: length = len(s) num = 0 for i in range(0, length): num += (ord(s[length - i - 1]) - ord('A') + 1) * 26 ** i return num
""" Problem 3. Suppose we rewrite the FancyDivide function to use a helper function. def FancyDivide(list_of_numbers, index): denom = list_of_numbers[index] return [SimpleDivide(item, denom) for item in list_of_numbers] def SimpleDivide(item, denom): return item / denom This code raises a ZeroDivisionError exception for the following call: FancyDivide([0, 2, 4], 0) Your task is to change the definition of SimpleDivide so that the call does not raise an exception. When dividing by 0, FancyDivide should return a list with all 0 elements. Any other error cases should still raise exceptions. You should only handle the ZeroDivisionError. """ #define the SimpleDivide function here def SimpleDivide(item, denom): try: return item / denom except ZeroDivisionError: return 0
""" Problem 3. Suppose we rewrite the FancyDivide function to use a helper function. def FancyDivide(list_of_numbers, index): denom = list_of_numbers[index] return [SimpleDivide(item, denom) for item in list_of_numbers] def SimpleDivide(item, denom): return item / denom This code raises a ZeroDivisionError exception for the following call: FancyDivide([0, 2, 4], 0) Your task is to change the definition of SimpleDivide so that the call does not raise an exception. When dividing by 0, FancyDivide should return a list with all 0 elements. Any other error cases should still raise exceptions. You should only handle the ZeroDivisionError. """ def simple_divide(item, denom): try: return item / denom except ZeroDivisionError: return 0
""" private public protected """ ################################# # class A: # def __init__(self,yetki): # self.yetki = yetki # self.__gizli = 2 # # __gizli gizli # # __gizli_ gizli # # __gizli__ gizli degil # # _yari_gizli # obj1 = A(1) # print(obj1.yetki) # print(obj1.__gizli) ################################# # class A: # def __init__(self,yetki): # self.yetki = yetki # self.__gizli = 2 # def gizliGetir(self): # getter # if self.yetki == 1: # return self.__gizli # else: # raise Exception("Yetki Hatasi") # obj1 = A(1) # print(obj1.gizliGetir()) # print(obj2.gizliGetir()) # class A: def __init__(self,yetki): self.yetki = yetki self.__gizli = 2 @property def gizli(self): # getter if self.yetki == 1: return self.__gizli else: raise Exception("Yetki Hatasi") obj1 = A(1) print(obj1.gizli)
""" private public protected """ class A: def __init__(self, yetki): self.yetki = yetki self.__gizli = 2 @property def gizli(self): if self.yetki == 1: return self.__gizli else: raise exception('Yetki Hatasi') obj1 = a(1) print(obj1.gizli)
def get(event, context): return {"body": "GET OK", "statusCode": 200} def put(event, context): return {"body": "PUT OK", "statusCode": 200}
def get(event, context): return {'body': 'GET OK', 'statusCode': 200} def put(event, context): return {'body': 'PUT OK', 'statusCode': 200}
a=input('Enter a srting: ') i=0 for letters in a: if letters=='a' or letters=='e' or letters=='i' or letters=='o' or letters=='u': i+=1 print("Number of vowels:",i)
a = input('Enter a srting: ') i = 0 for letters in a: if letters == 'a' or letters == 'e' or letters == 'i' or (letters == 'o') or (letters == 'u'): i += 1 print('Number of vowels:', i)
class Solution: def lemonadeChange(self, bills: List[int]) -> bool: m5 = 0 m10 = 0 m20 = 0 for b in bills: if b == 5: m5 += 1 elif b == 10: m10 += 1 if m5 >= 1: m5 -= 1 else: return False else: m20 += 1 if m10 >= 1 and m5 >= 1: m10 -= 1 m5 -= 1 elif m5 >= 3: m5 -= 3 else: return False return True
class Solution: def lemonade_change(self, bills: List[int]) -> bool: m5 = 0 m10 = 0 m20 = 0 for b in bills: if b == 5: m5 += 1 elif b == 10: m10 += 1 if m5 >= 1: m5 -= 1 else: return False else: m20 += 1 if m10 >= 1 and m5 >= 1: m10 -= 1 m5 -= 1 elif m5 >= 3: m5 -= 3 else: return False return True
t = input() lines = int(input()) columns = int(input()) result = 0 if t == 'Premiere': result = columns * lines * 12 elif t == 'Normal': result = columns * lines * 7.5 elif t == 'Discount': result = columns * lines * 5 print('{0:.2f} leva'.format(result))
t = input() lines = int(input()) columns = int(input()) result = 0 if t == 'Premiere': result = columns * lines * 12 elif t == 'Normal': result = columns * lines * 7.5 elif t == 'Discount': result = columns * lines * 5 print('{0:.2f} leva'.format(result))
def main_pot_winner(hand_txt: str) -> str: """ Extracts the name of the main pot winner Parameters: hand_txt (int): a hand history Returns: winner (str): name of main pot winner or '**[CHOP-CHOP]**' when it's a chop """ # side pots if 'side pot-1' in str(hand_txt): for line in hand_txt.split('\n'): if 'main pot' in line: winner = line.split(' collected')[0] return winner # chop pot if str(hand_txt).count('from main pot') >= 2: winner = '**[CHOP-CHOP]**' return winner # normal main pot for line in hand_txt.split('\n'): if 'and won' in line: if 'did not' in line: winner = line.split(' did not ')[0].split(': ')[1] return winner elif 'showed' in line: winner = line.split(' showed ')[0].split(': ')[1].split(' (')[0] return winner
def main_pot_winner(hand_txt: str) -> str: """ Extracts the name of the main pot winner Parameters: hand_txt (int): a hand history Returns: winner (str): name of main pot winner or '**[CHOP-CHOP]**' when it's a chop """ if 'side pot-1' in str(hand_txt): for line in hand_txt.split('\n'): if 'main pot' in line: winner = line.split(' collected')[0] return winner if str(hand_txt).count('from main pot') >= 2: winner = '**[CHOP-CHOP]**' return winner for line in hand_txt.split('\n'): if 'and won' in line: if 'did not' in line: winner = line.split(' did not ')[0].split(': ')[1] return winner elif 'showed' in line: winner = line.split(' showed ')[0].split(': ')[1].split(' (')[0] return winner
""" Number to string map 0 => None 1 => None 2 => A,B,C 3 => D,E,F 4 => G,H,I 5 => J,K,L 6 => M,N,O 7 => P,Q,R,S 8 => T,U,V 9 => W,X,Y,Z Map the number to corresponding character. Example: 23 => AD,AE,AF,BD,BE,BF,CD,CE,CF """ num_letter_map = { "0":None, "1":None, "2":"ABC", "3":"DEF", "4":"GHI", "5":"JKL", "6":"MNO", "7":"PQRS", "8":"TUV", "9":"WXYZ" } def number_to_sting(number_str): left = 0 # Keep tracks the position of the number result = [] # store the combination find_combination(number_str, left, result) def find_combination(number_str, left, result ): if left == len(number_str): # If reached right_end print the combination print("".join(map(str,result))) return # Get the corresponding letters for the number letters = num_letter_map[number_str[left]] # check if it's empty then add empty in result and call the function if letters is None: result.append("") find_combination(number_str,left+1,result) result.pop() else: for l in letters: result.append(l) find_combination(number_str,left + 1, result) result.pop() test_input = ["23","12","31","208","345"] for t_case in test_input: number_to_sting(t_case)
""" Number to string map 0 => None 1 => None 2 => A,B,C 3 => D,E,F 4 => G,H,I 5 => J,K,L 6 => M,N,O 7 => P,Q,R,S 8 => T,U,V 9 => W,X,Y,Z Map the number to corresponding character. Example: 23 => AD,AE,AF,BD,BE,BF,CD,CE,CF """ num_letter_map = {'0': None, '1': None, '2': 'ABC', '3': 'DEF', '4': 'GHI', '5': 'JKL', '6': 'MNO', '7': 'PQRS', '8': 'TUV', '9': 'WXYZ'} def number_to_sting(number_str): left = 0 result = [] find_combination(number_str, left, result) def find_combination(number_str, left, result): if left == len(number_str): print(''.join(map(str, result))) return letters = num_letter_map[number_str[left]] if letters is None: result.append('') find_combination(number_str, left + 1, result) result.pop() else: for l in letters: result.append(l) find_combination(number_str, left + 1, result) result.pop() test_input = ['23', '12', '31', '208', '345'] for t_case in test_input: number_to_sting(t_case)
''' https://www.codingame.com/training/easy/prefix-code ''' def find_unique_lengths(inputs: dict) -> set: """ Given a dictionary, return a set of integers representing the unique lengths of the input strings """ return set([len(str(s)) for s in inputs]) n = int(input()) tokens = {} for i in range(n): inputs = input().split() b = inputs[0] c = int(inputs[1]) tokens[b] = c lengths = find_unique_lengths(tokens) s = input() i = 0 out = [] while True: if i == len(s): break for length in lengths: token = s[i:i+length] if token in tokens: out.append(tokens[token]) i += len(token) break else: print(f"DECODE FAIL AT INDEX {i}") exit(0) print("".join([chr(n) for n in out]))
""" https://www.codingame.com/training/easy/prefix-code """ def find_unique_lengths(inputs: dict) -> set: """ Given a dictionary, return a set of integers representing the unique lengths of the input strings """ return set([len(str(s)) for s in inputs]) n = int(input()) tokens = {} for i in range(n): inputs = input().split() b = inputs[0] c = int(inputs[1]) tokens[b] = c lengths = find_unique_lengths(tokens) s = input() i = 0 out = [] while True: if i == len(s): break for length in lengths: token = s[i:i + length] if token in tokens: out.append(tokens[token]) i += len(token) break else: print(f'DECODE FAIL AT INDEX {i}') exit(0) print(''.join([chr(n) for n in out]))
# Ugg, dummy file to make Github not think this site is written in PHP # that's an insult foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar'
foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar' foo = 'bar'
""" @author: David Lei @since: 6/11/2017 https://www.hackerrank.com/challenges/find-the-merge-point-of-two-joined-linked-lists/problem Given two lists guaranteed to converge (have same value) find when this occurs. Return the data at the node where this occurs. Both pass :) """ def FindMergeNodeBetter(headA, headB): # Space = O(1) # Time = O(a + b) # Idea: Since it is guaranteed to converge for pair of lists A and B with merge points a in list A # and b in list B. In the worst case they are the ends of both lists and both lists are different lengths. # If you join A and B and B and A then iterate through them you can find the point where the first common value occur. # For example: # A = [a, b, c, d, e] # B = [k, i, e] # a = e, b = e # A' = A + B = [a, b, c, d, e, k, i e] # B' = B + A = [k, i, e, a, b, c, d, e] # A = [a, b, c, e, f, g] # B = [z, f, g] # a = f, b = f # A' = A + B = [a, b, c, e, f, g, z, f, g] # B' = B + A = [z, f, g, a, b, c, e, f, g] # The same number of steps is required weather you traverse A first or B to find the common node to merge from. # Instead of making new list using extra space, you can just start the traversal from the head of the other list # when the current list is out of nodes. a = headA b = headB while True: if a.data == b.data: # Guaranteed. return a.data # Handles starting traversal from other list. a = a.next if a.next else headB b = b.next if b.next else headA # Note: If convergence is not guaranteed then can stop if iterations exceed len(a) + len(b). def FindMergeNodeNaive(headA, headB): # Naive solution # Space = O(a + b) # Time = O(a + b + a) a_values = set() # O(len(a)) space. b_values = set() # O(len(b)) space. while headA: # O(len(a)). a_values.add(headA.data) # O(1) amortized, O(n) worst. headA = headA.next while headB: # O(len(b)). b_values.add(headB.data) # O(1) amortized, O(n) worst. headB = headB.next for i in a_values: if i in b_values: # O(1) amortized, O(n) worst. return i
""" @author: David Lei @since: 6/11/2017 https://www.hackerrank.com/challenges/find-the-merge-point-of-two-joined-linked-lists/problem Given two lists guaranteed to converge (have same value) find when this occurs. Return the data at the node where this occurs. Both pass :) """ def find_merge_node_better(headA, headB): a = headA b = headB while True: if a.data == b.data: return a.data a = a.next if a.next else headB b = b.next if b.next else headA def find_merge_node_naive(headA, headB): a_values = set() b_values = set() while headA: a_values.add(headA.data) head_a = headA.next while headB: b_values.add(headB.data) head_b = headB.next for i in a_values: if i in b_values: return i
# -*- coding: utf-8 -*- def check_tuple(values): x1,x2,x3 = values try: assert (2*x1 - 4*x2 -x3) == 1 assert (x1 - 3 * x2 + x3) == 1 assert (3*x1 - 5*x2 - 3*x3) == 1 except: return False return True a = (3,1,1,) b = (3,-1,1) c = (13,5,2) d = (13/2,5/2,2) e = (17,7,5) print(check_tuple(a)) print(check_tuple(b)) print(check_tuple(c)) print(check_tuple(d)) print(check_tuple(e))
def check_tuple(values): (x1, x2, x3) = values try: assert 2 * x1 - 4 * x2 - x3 == 1 assert x1 - 3 * x2 + x3 == 1 assert 3 * x1 - 5 * x2 - 3 * x3 == 1 except: return False return True a = (3, 1, 1) b = (3, -1, 1) c = (13, 5, 2) d = (13 / 2, 5 / 2, 2) e = (17, 7, 5) print(check_tuple(a)) print(check_tuple(b)) print(check_tuple(c)) print(check_tuple(d)) print(check_tuple(e))
''' Given a 2D board of characters and a word, find if the word exists in the grid. The word can be constructed from letters of sequentially adjacent cell, where "adjacent" cells are those horizontally or vertically neighboring. The same letter cell may not be used more than once. For example, given the following board: [ ['A','B','C','E'], ['S','F','C','S'], ['A','D','E','E'] ] exists(board, "ABCCED") returns true, exists(board, "SEE") returns true, exists(board, "ABCB") returns false. ''' DIRECTION = [(0, 1), (1, 0), (0, -1), (-1, 0)] class Board: def __init__(self, board): self.board = board self.N = len(board) self.M = len(board[0]) def _check_exists(self, string, item): if not string: return True is_found = False row, col = item if 0<=row<self.N and 0<=col<self.M \ and self.board[row][col]==string[0]: self.board[row][col] = (string[0],) for mov in DIRECTION: if self._check_exists(string[1:], (row+mov[0], col+mov[1])): is_found = True break self.board[row][col] = string[0] return is_found def exists(self, string): start_points = [] for i in range(self.N): for j in range(self.M): if self.board[i][j]==string[0]: start_points.append((i,j)) for item in start_points: if self._check_exists(string, item): return True return False if __name__ == "__main__": data = [ ["ABCCED", True], ["SEE", True], ["ABCB", False] ] board = Board( [ ['A','B','C','E'], ['S','F','C','S'], ['A','D','E','E'] ]) for d in data: print('input', d[0], 'output', board.exists(d[0]))
""" Given a 2D board of characters and a word, find if the word exists in the grid. The word can be constructed from letters of sequentially adjacent cell, where "adjacent" cells are those horizontally or vertically neighboring. The same letter cell may not be used more than once. For example, given the following board: [ ['A','B','C','E'], ['S','F','C','S'], ['A','D','E','E'] ] exists(board, "ABCCED") returns true, exists(board, "SEE") returns true, exists(board, "ABCB") returns false. """ direction = [(0, 1), (1, 0), (0, -1), (-1, 0)] class Board: def __init__(self, board): self.board = board self.N = len(board) self.M = len(board[0]) def _check_exists(self, string, item): if not string: return True is_found = False (row, col) = item if 0 <= row < self.N and 0 <= col < self.M and (self.board[row][col] == string[0]): self.board[row][col] = (string[0],) for mov in DIRECTION: if self._check_exists(string[1:], (row + mov[0], col + mov[1])): is_found = True break self.board[row][col] = string[0] return is_found def exists(self, string): start_points = [] for i in range(self.N): for j in range(self.M): if self.board[i][j] == string[0]: start_points.append((i, j)) for item in start_points: if self._check_exists(string, item): return True return False if __name__ == '__main__': data = [['ABCCED', True], ['SEE', True], ['ABCB', False]] board = board([['A', 'B', 'C', 'E'], ['S', 'F', 'C', 'S'], ['A', 'D', 'E', 'E']]) for d in data: print('input', d[0], 'output', board.exists(d[0]))
#Encrypt Function def encrypt(password): password_li = list(password) encrypted_password = "" for i in password_li: ascii_password_li = chr(ord(i) + 5) encrypted_password = encrypted_password + ascii_password_li print("Your encrypted password is: ") print(encrypted_password) #decrypt Function def decrypt(password): password_li = list(password) decrypted_password = "" for i in password_li: ascii_password_li = chr(ord(i) - 5) decrypted_password = decrypted_password + ascii_password_li print("Your decrypted password is: ") print(decrypted_password) option_selected = input("You want to encrypt or decrypt: \n 1: Encrypt \n 2: Decrypt \n") if int(option_selected )== 1 : password = input("Enter you password: ") encrypt(password) else: password = input("Enter you encrypted password which was encrypted by this program only: ") decrypt(password)
def encrypt(password): password_li = list(password) encrypted_password = '' for i in password_li: ascii_password_li = chr(ord(i) + 5) encrypted_password = encrypted_password + ascii_password_li print('Your encrypted password is: ') print(encrypted_password) def decrypt(password): password_li = list(password) decrypted_password = '' for i in password_li: ascii_password_li = chr(ord(i) - 5) decrypted_password = decrypted_password + ascii_password_li print('Your decrypted password is: ') print(decrypted_password) option_selected = input('You want to encrypt or decrypt: \n 1: Encrypt \n 2: Decrypt \n') if int(option_selected) == 1: password = input('Enter you password: ') encrypt(password) else: password = input('Enter you encrypted password which was encrypted by this program only: ') decrypt(password)
film_budget = float(input()) statists = int(input()) dress = float(input()) decor_price = 0.1*film_budget if statists > 150: dress_price = (statists*dress) - 0.1*(statists*dress) else: dress_price = statists*dress if decor_price + dress_price > film_budget: print('Not enough money!') print(f'Wingard needs{(dress_price + decor_price) - film_budget: .2f} leva more.') else: print('Action!') print(f'Wingard starts filming with{film_budget - (dress_price + decor_price): .2f} leva left.')
film_budget = float(input()) statists = int(input()) dress = float(input()) decor_price = 0.1 * film_budget if statists > 150: dress_price = statists * dress - 0.1 * (statists * dress) else: dress_price = statists * dress if decor_price + dress_price > film_budget: print('Not enough money!') print(f'Wingard needs{dress_price + decor_price - film_budget: .2f} leva more.') else: print('Action!') print(f'Wingard starts filming with{film_budget - (dress_price + decor_price): .2f} leva left.')
class UserQueryError(Exception): def __init__(self, msg): Exception.__init__(self, None) self.msg = msg def __str__(self): return 'User query error: %s' % self.msg
class Userqueryerror(Exception): def __init__(self, msg): Exception.__init__(self, None) self.msg = msg def __str__(self): return 'User query error: %s' % self.msg
while True: try: n = int(input()) while n != 0: n -= 1 s = int(input()) array1 = list(map(int, input().split()))[:s] array2 = list(input()) count = 0 for i in range(s): if array2[i] == "J" and array1[i] > 2: count += 1 elif array2[i] == "S" and (array1[i] == 1 or array1[i] == 2): count += 1 print(count) except EOFError: break
while True: try: n = int(input()) while n != 0: n -= 1 s = int(input()) array1 = list(map(int, input().split()))[:s] array2 = list(input()) count = 0 for i in range(s): if array2[i] == 'J' and array1[i] > 2: count += 1 elif array2[i] == 'S' and (array1[i] == 1 or array1[i] == 2): count += 1 print(count) except EOFError: break
class Image: def __init__(self): self.id = 0 self.name = "" self.thumbnail = "" self.url = "" self.upload_time = "" self.upload_user = "" @classmethod def convert_to_images(cls, entity_images): images = [] for entity_image in entity_images: image = Image() image.id = entity_image.id image.name = entity_image.name image.url = entity_image.url image.upload_time = entity_image.upload_time image.upload_user = entity_image.upload_user image.thumbnail = "/api/images/thumbnail{image.id}".format(**locals()) # NOQA images.append(image) return images
class Image: def __init__(self): self.id = 0 self.name = '' self.thumbnail = '' self.url = '' self.upload_time = '' self.upload_user = '' @classmethod def convert_to_images(cls, entity_images): images = [] for entity_image in entity_images: image = image() image.id = entity_image.id image.name = entity_image.name image.url = entity_image.url image.upload_time = entity_image.upload_time image.upload_user = entity_image.upload_user image.thumbnail = '/api/images/thumbnail{image.id}'.format(**locals()) images.append(image) return images
# v. 1.0 # 10.01.2018 # Sergii Mamedov summa = 0 for i in range(3, 1000): if (i % 3 == 0) or (i % 5 == 0): summa += i print(summa)
summa = 0 for i in range(3, 1000): if i % 3 == 0 or i % 5 == 0: summa += i print(summa)
# https://www.hackerrank.com/challenges/circular-array-rotation/problem # Complete the circularArrayRotation function below. def circularArrayRotation(a, k, queries): return [a[(q-k)%len(a)] for q in queries]
def circular_array_rotation(a, k, queries): return [a[(q - k) % len(a)] for q in queries]
#!/usr/bin/env python """ 6. Parse the following 'show ip bgp summary' output (see link below). From this output, extract the following fields: Neighbor, Remote AS, Up_Down, and State_PrefixRcvd. Also include the Local AS and the BGP Router ID in each row of the tabular output (hint: use filldown for this). Note, in order to simplify this problem only worry about the data shown in the output (in other words, don't worry about all possible values that could be present in the output). Second hint: remember there is an implicit 'EOF -> Record' at the end of the template (by default). https://github.com/ktbyers/pyplus_course/blob/master/class4/exercises/ex6_show_ip_bgp_summary.txt """
""" 6. Parse the following 'show ip bgp summary' output (see link below). From this output, extract the following fields: Neighbor, Remote AS, Up_Down, and State_PrefixRcvd. Also include the Local AS and the BGP Router ID in each row of the tabular output (hint: use filldown for this). Note, in order to simplify this problem only worry about the data shown in the output (in other words, don't worry about all possible values that could be present in the output). Second hint: remember there is an implicit 'EOF -> Record' at the end of the template (by default). https://github.com/ktbyers/pyplus_course/blob/master/class4/exercises/ex6_show_ip_bgp_summary.txt """
word = input("Enter a simple word : ") if word[::-1].upper() == word.upper(): print("This word is a palindrome") elif word[::-1].upper() != word.upper(): print("This word is not a palindrome... It prints", word[::-1].upper(), "when reversed.")
word = input('Enter a simple word : ') if word[::-1].upper() == word.upper(): print('This word is a palindrome') elif word[::-1].upper() != word.upper(): print('This word is not a palindrome... It prints', word[::-1].upper(), 'when reversed.')
def get_cheapest_cost(rootNode): result = float("inf") stack = [] stack.append((rootNode, rootNode.cost)) seen = set() seen.add(rootNode) while stack: current_node, path_sum = stack.pop() if len(current_node.children) == 0: if path_sum < result: result = path_sum else: for child in current_node.children: if child not in seen and path_sum + child.cost <= result: stack.append((child, path_sum + child.cost)) seen.add(child) return result ########################################## # Use the helper code below to implement # # and test your function above # ########################################## # A node class Node: # Constructor to create a new node def __init__(self, cost): self.cost = cost self.children = [] self.parent = None if __name__ == '__main__': root = Node(0) one = Node(5) two = Node(3) three = Node(6) four = Node(4) five = Node(2) six = Node(0) seven = Node(1) eight = Node(5) nine = Node(1) ten = Node(10) eleven = Node(1) root.children.extend([one, two, three]) one.children.append(four) two.children.extend([five, six]) three.children.extend([seven, eight]) five.children.append(nine) six.children.append(ten) nine.children.append(eleven) print(get_cheapest_cost(root))
def get_cheapest_cost(rootNode): result = float('inf') stack = [] stack.append((rootNode, rootNode.cost)) seen = set() seen.add(rootNode) while stack: (current_node, path_sum) = stack.pop() if len(current_node.children) == 0: if path_sum < result: result = path_sum else: for child in current_node.children: if child not in seen and path_sum + child.cost <= result: stack.append((child, path_sum + child.cost)) seen.add(child) return result class Node: def __init__(self, cost): self.cost = cost self.children = [] self.parent = None if __name__ == '__main__': root = node(0) one = node(5) two = node(3) three = node(6) four = node(4) five = node(2) six = node(0) seven = node(1) eight = node(5) nine = node(1) ten = node(10) eleven = node(1) root.children.extend([one, two, three]) one.children.append(four) two.children.extend([five, six]) three.children.extend([seven, eight]) five.children.append(nine) six.children.append(ten) nine.children.append(eleven) print(get_cheapest_cost(root))
"""Quick sort function.""" def quicksort(arr): """.""" if type(arr) is not list: raise TypeError('Input must be a list type.') for i in range(0, len(arr)): if not isinstance(arr[i], (int, str)): raise TypeError('All elements must be either an \ integer or string.') if len(arr) <= 1: return arr quicksort_helper(arr, 0, len(arr) - 1) return arr def quicksort_helper(arr, first, last): """.""" if first < last: split_point = partition(arr, first, last) quicksort_helper(arr, first, split_point - 1) quicksort_helper(arr, split_point + 1, last) def partition(arr, first, last): """.""" pivot_val = arr[first] leftmark = first + 1 rightmark = last done = False while not done: while leftmark <= rightmark and arr[leftmark] <= pivot_val: # print(f'first while: lm{leftmark}, rm{rightmark}, {arr[leftmark]}, {pivot_val}') leftmark += 1 while arr[rightmark] >= pivot_val and rightmark >= leftmark: # print(f'{arr[rightmark]}, {pivot_val}, rm{rightmark}, lm{leftmark}') rightmark -= 1 if rightmark < leftmark: done = True else: arr[leftmark], arr[rightmark] = arr[rightmark], arr[leftmark] arr[first], arr[rightmark] = arr[rightmark], arr[first] return rightmark
"""Quick sort function.""" def quicksort(arr): """.""" if type(arr) is not list: raise type_error('Input must be a list type.') for i in range(0, len(arr)): if not isinstance(arr[i], (int, str)): raise type_error('All elements must be either an integer or string.') if len(arr) <= 1: return arr quicksort_helper(arr, 0, len(arr) - 1) return arr def quicksort_helper(arr, first, last): """.""" if first < last: split_point = partition(arr, first, last) quicksort_helper(arr, first, split_point - 1) quicksort_helper(arr, split_point + 1, last) def partition(arr, first, last): """.""" pivot_val = arr[first] leftmark = first + 1 rightmark = last done = False while not done: while leftmark <= rightmark and arr[leftmark] <= pivot_val: leftmark += 1 while arr[rightmark] >= pivot_val and rightmark >= leftmark: rightmark -= 1 if rightmark < leftmark: done = True else: (arr[leftmark], arr[rightmark]) = (arr[rightmark], arr[leftmark]) (arr[first], arr[rightmark]) = (arr[rightmark], arr[first]) return rightmark
# -*- coding: utf-8 -*- class SessionHelper(): def __init__(self, app): self.app = app @property def logged_user_name(self): return self.app.wd.find_element_by_css_selector(".user-info") def open_homepage(self): wd = self.app.wd if wd.current_url.endswith("/mantisbt/my_view_page.php"): return wd.get(self.app.base_url) def login(self, username, password): self.open_homepage() self._type_and_submit_input("username", username) self._type_and_submit_input("password", password) def _type_and_submit_input(self, location, value): wd = self.app.wd wd.find_element_by_name(location).click() wd.find_element_by_name(location).clear() wd.find_element_by_name(location).send_keys(value) wd.find_element_by_css_selector("input[type='submit']").click() def ensure_login(self, username, password): if self.is_logged_in(): if self.is_logged_in_as(username): return self.logout() self.login(username, password) def logout(self): wd = self.app.wd logout_button = wd.find_element_by_css_selector("a[href*='logout_page.php']") if not logout_button.is_displayed(): self.logged_user_name.click() logout_button.click() def ensure_logout(self): if self.is_logged_in(): self.logout() def is_logged_in(self): wd = self.app.wd return len(wd.find_elements_by_css_selector("span.user-info")) > 0 def is_logged_in_as(self, username): wd = self.app.wd return self.get_logged_user() == username def get_logged_user(self): wd = self.app.wd return self.logged_user_name.text
class Sessionhelper: def __init__(self, app): self.app = app @property def logged_user_name(self): return self.app.wd.find_element_by_css_selector('.user-info') def open_homepage(self): wd = self.app.wd if wd.current_url.endswith('/mantisbt/my_view_page.php'): return wd.get(self.app.base_url) def login(self, username, password): self.open_homepage() self._type_and_submit_input('username', username) self._type_and_submit_input('password', password) def _type_and_submit_input(self, location, value): wd = self.app.wd wd.find_element_by_name(location).click() wd.find_element_by_name(location).clear() wd.find_element_by_name(location).send_keys(value) wd.find_element_by_css_selector("input[type='submit']").click() def ensure_login(self, username, password): if self.is_logged_in(): if self.is_logged_in_as(username): return self.logout() self.login(username, password) def logout(self): wd = self.app.wd logout_button = wd.find_element_by_css_selector("a[href*='logout_page.php']") if not logout_button.is_displayed(): self.logged_user_name.click() logout_button.click() def ensure_logout(self): if self.is_logged_in(): self.logout() def is_logged_in(self): wd = self.app.wd return len(wd.find_elements_by_css_selector('span.user-info')) > 0 def is_logged_in_as(self, username): wd = self.app.wd return self.get_logged_user() == username def get_logged_user(self): wd = self.app.wd return self.logged_user_name.text
''' URL: https://leetcode.com/problems/non-decreasing-array/ Difficulty: Medium Description: Non-decreasing Array Given an array nums with n integers, your task is to check if it could become non-decreasing by modifying at most 1 element. We define an array is non-decreasing if nums[i] <= nums[i + 1] holds for every i (0-based) such that (0 <= i <= n - 2). Example 1: Input: nums = [4,2,3] Output: true Explanation: You could modify the first 4 to 1 to get a non-decreasing array. Example 2: Input: nums = [4,2,1] Output: false Explanation: You can't get a non-decreasing array by modify at most one element. Constraints: 1 <= n <= 10 ^ 4 - 10 ^ 5 <= nums[i] <= 10 ^ 5 ''' class Solution: def checkSorted(self, arr): # check if array is sorted in ascending order for i in range(len(arr)-1): if arr[i] > arr[i+1]: return False return True def checkPossibility(self, nums): for i in range(len(nums)): if self.checkSorted(nums[:i] + nums[i+1:]): return True return False
""" URL: https://leetcode.com/problems/non-decreasing-array/ Difficulty: Medium Description: Non-decreasing Array Given an array nums with n integers, your task is to check if it could become non-decreasing by modifying at most 1 element. We define an array is non-decreasing if nums[i] <= nums[i + 1] holds for every i (0-based) such that (0 <= i <= n - 2). Example 1: Input: nums = [4,2,3] Output: true Explanation: You could modify the first 4 to 1 to get a non-decreasing array. Example 2: Input: nums = [4,2,1] Output: false Explanation: You can't get a non-decreasing array by modify at most one element. Constraints: 1 <= n <= 10 ^ 4 - 10 ^ 5 <= nums[i] <= 10 ^ 5 """ class Solution: def check_sorted(self, arr): for i in range(len(arr) - 1): if arr[i] > arr[i + 1]: return False return True def check_possibility(self, nums): for i in range(len(nums)): if self.checkSorted(nums[:i] + nums[i + 1:]): return True return False
class Solution: def matrixReshape(self, nums: List[List[int]], r: int, c: int) -> List[List[int]]: m = len(nums) n = len(nums[0]) if m * n != r * c: return nums ori = [nums[i][j] for i in range(m) for j in range(n)] ans = [] for i in range(0, len(ori), c): ans.append(ori[i:i+c]) return ans
class Solution: def matrix_reshape(self, nums: List[List[int]], r: int, c: int) -> List[List[int]]: m = len(nums) n = len(nums[0]) if m * n != r * c: return nums ori = [nums[i][j] for i in range(m) for j in range(n)] ans = [] for i in range(0, len(ori), c): ans.append(ori[i:i + c]) return ans
class Solution: def reachingPoints(self, sx: int, sy: int, tx: int, ty: int) -> bool: while sx <= tx and sy <= ty: if tx < ty: if sx == tx: return (ty - sy) % sx == 0 else: ty %= tx else: if sy == ty: return (tx - sx) % sy == 0 else: tx %= ty return False
class Solution: def reaching_points(self, sx: int, sy: int, tx: int, ty: int) -> bool: while sx <= tx and sy <= ty: if tx < ty: if sx == tx: return (ty - sy) % sx == 0 else: ty %= tx elif sy == ty: return (tx - sx) % sy == 0 else: tx %= ty return False
datasetFile = open("datasets/rosalind_ba1f.txt", "r") genome = datasetFile.readline().strip() print("Find a Position in a Genome Minimizing the Skew") def minSkew(genome): indices = [0] skew = 0 min = 0 for i in range(len(genome)): if genome[i] == 'G': skew += 1 elif genome[i] == 'C': skew -= 1 if skew < min: indices = [i + 1] min = skew elif skew == min: indices.append(i + 1) return indices solution = " ".join(map(lambda x: str(x), minSkew(genome))) outputFile = open("output/rosalind_ba1f.txt", "a") outputFile.write(solution)
dataset_file = open('datasets/rosalind_ba1f.txt', 'r') genome = datasetFile.readline().strip() print('Find a Position in a Genome Minimizing the Skew') def min_skew(genome): indices = [0] skew = 0 min = 0 for i in range(len(genome)): if genome[i] == 'G': skew += 1 elif genome[i] == 'C': skew -= 1 if skew < min: indices = [i + 1] min = skew elif skew == min: indices.append(i + 1) return indices solution = ' '.join(map(lambda x: str(x), min_skew(genome))) output_file = open('output/rosalind_ba1f.txt', 'a') outputFile.write(solution)
def double_pole_fitness_func(target_len, cart, net): def fitness_func(genes): net.init_weight(genes) return net.evaluate(cart, target_len) return fitness_func
def double_pole_fitness_func(target_len, cart, net): def fitness_func(genes): net.init_weight(genes) return net.evaluate(cart, target_len) return fitness_func
""" More simple if Statements Problem # 2 Write a program that inputs two numbers and find whether whether they are equal """ # num1 = input("Enter the first number: ") # 4 # num1 = int(num1) # num2 = input("Enter the second number: ") # 4 # num2 = int(num2) # if num1 == num2: # print("both numbers are equal") # both numbers are equal # num1 = input("Enter the first number: ") # 3 # num1 = int(num1) # num2 = input("Enter the second number: ") # 7 # num2 = int(num2) # if num1 == num2: # print("both numbers are equal") # nothing was printed """ Problem # 3 Write a program that inputs marks of three subjects. If the average of the three marks is more than 80, it displays two messages "You are above standard!" and "Admission granted!" Pseudocode 1. Start 2. Take marks from user 3. If marks are greater than 80 1. Display the message "Write a program that inputs marks of three subjects. If the average of the three marks is more than 80, it displays two messages "You are above standard!" and "Admission granted!" 4. End """ # take input from the user subject1 = input("Please enter the marks of Subject 1: ") subject1 = float(subject1) subject2 = input("Please enter the marks of Subject 2: ") subject2 = float(subject2) subject3 = input("Please enter the marks of Subject 3: ") subject3 = float(subject3) Sum = subject1 + subject2 + subject3 avg = Sum / 3 if avg > 80: print("You are above standard!") print("Admission granted!") # Please enter the marks of Subject 1: 80 # Please enter the marks of Subject 2: 85 # Please enter the marks of Subject 3: 78 # You are above standard! # Admission granted!
""" More simple if Statements Problem # 2 Write a program that inputs two numbers and find whether whether they are equal """ ' Problem # 3\nWrite a program that inputs marks of three subjects. If the average of the three marks is more than 80, it displays two messages "You are above standard!" and "Admission granted!"\n\n\n Pseudocode\n 1. Start\n 2. Take marks from user\n 3. If marks are greater than 80\n 1. Display the message "Write a program that inputs marks of three subjects. If the average of the three marks is more than 80, it displays two messages "You are above standard!" and "Admission granted!"\n\n 4. End\n \n \n' subject1 = input('Please enter the marks of Subject 1: ') subject1 = float(subject1) subject2 = input('Please enter the marks of Subject 2: ') subject2 = float(subject2) subject3 = input('Please enter the marks of Subject 3: ') subject3 = float(subject3) sum = subject1 + subject2 + subject3 avg = Sum / 3 if avg > 80: print('You are above standard!') print('Admission granted!')
corpus = open('corpuslimpio.txt','r',encoding='utf8') pos = open('pos.txt','w',encoding='utf8') neg = open('neg.txt','w',encoding='utf8') for line in corpus.readlines(): if line[0] == 'P': pos.write(line[2:]) elif line[0] == 'N': neg.write(line[2:]) corpus.close() pos.close() neg.close()
corpus = open('corpuslimpio.txt', 'r', encoding='utf8') pos = open('pos.txt', 'w', encoding='utf8') neg = open('neg.txt', 'w', encoding='utf8') for line in corpus.readlines(): if line[0] == 'P': pos.write(line[2:]) elif line[0] == 'N': neg.write(line[2:]) corpus.close() pos.close() neg.close()
class Node: def __init__(self, value): self.value = value self.left = None self.right = None class BinarySearchTree: def __init__(self): self.root = None def insert(self, value): if self.root is None: self.root = Node(value) else: self._insert(value, self.root) def _insert(self, value, currentNode): if currentNode.value > value: if currentNode.left is None: currentNode.left = Node(value) else: self._insert(value, currentNode.left) elif currentNode.value < value: if currentNode.right is None: currentNode.right = Node(value) else: self._insert(value, currentNode.right) else: print("Node already in tree.") def printInorder(self): level = 0 if self.root: self._printInorder(self.root, level) def printPreorder(self): level = 0 if self.root: self._printPreorder(self.root, level) def printPostorder(self): level = 0 if self.root: self._printPostorder(self.root, level) def _printInorder(self, node, level): if node: self._printInorder(node.left, level + 1) print(' ' * level + str(node.value)) self._printInorder(node.right, level + 1) def _printPreorder(self, node, level): if node: print(' ' * level + str(node.value)) self._printPreorder(node.left, level + 1) self._printPreorder(node.right, level + 1) def _printPostorder(self, node, level): if node: self._printPostorder(node.left, level + 1) self._printPostorder(node.right, level + 1) print(' ' * level + str(node.value))
class Node: def __init__(self, value): self.value = value self.left = None self.right = None class Binarysearchtree: def __init__(self): self.root = None def insert(self, value): if self.root is None: self.root = node(value) else: self._insert(value, self.root) def _insert(self, value, currentNode): if currentNode.value > value: if currentNode.left is None: currentNode.left = node(value) else: self._insert(value, currentNode.left) elif currentNode.value < value: if currentNode.right is None: currentNode.right = node(value) else: self._insert(value, currentNode.right) else: print('Node already in tree.') def print_inorder(self): level = 0 if self.root: self._printInorder(self.root, level) def print_preorder(self): level = 0 if self.root: self._printPreorder(self.root, level) def print_postorder(self): level = 0 if self.root: self._printPostorder(self.root, level) def _print_inorder(self, node, level): if node: self._printInorder(node.left, level + 1) print(' ' * level + str(node.value)) self._printInorder(node.right, level + 1) def _print_preorder(self, node, level): if node: print(' ' * level + str(node.value)) self._printPreorder(node.left, level + 1) self._printPreorder(node.right, level + 1) def _print_postorder(self, node, level): if node: self._printPostorder(node.left, level + 1) self._printPostorder(node.right, level + 1) print(' ' * level + str(node.value))
# Definition for a binary tree node. # class TreeNode: # def __init__(self, val=0, left=None, right=None): # self.val = val # self.left = left # self.right = right class Solution: def tree2str(self, root: TreeNode) -> str: if not root: return '' s = f'{root.val}' l = self.tree2str(root.left) r = self.tree2str(root.right) if l or r: s += f'({l})' if r: s += f'({r})' return s
class Solution: def tree2str(self, root: TreeNode) -> str: if not root: return '' s = f'{root.val}' l = self.tree2str(root.left) r = self.tree2str(root.right) if l or r: s += f'({l})' if r: s += f'({r})' return s
""" You are playing the following Bulls and Cows game with your friend: You write down a number and ask your friend to guess what the number is. Each time your friend makes a guess, you provide a hint that indicates how many digits in said guess match your secret number exactly in both digit and position (called "bulls") and how many digits match the secret number but locate in the wrong position (called "cows"). Your friend will use successive guesses and hints to eventually derive the secret number. For example: Secret number: "1807" Friend's guess: "7810" Hint: 1 bull and 3 cows. (The bull is 8, the cows are 0, 1 and 7.) Write a function to return a hint according to the secret number and friend's guess, use A to indicate the bulls and B to indicate the cows. In the above example, your function should return "1A3B". Please note that both secret number and friend's guess may contain duplicate digits, for example: Secret number: "1123" Friend's guess: "0111" In this case, the 1st 1 in friend's guess is a bull, the 2nd or 3rd 1 is a cow, and your function should return "1A1B". You may assume that the secret number and your friend's guess only contain digits, and their lengths are always equal. Credits: Special thanks to @jeantimex for adding this problem and creating all test cases. """ class Solution(object): def getHint(self, secret, guess): """ :type secret: str :type guess: str :rtype: str """ secDigits=[int(i) for i in secret] gesDigits=[int(i) for i in guess] bull=0 cow=0 for i,j in enumerate(gesDigits): if secDigits[i]==j: bull+=1 gesDigits[i]=':' secDigits[i]='.' for i,j in enumerate(gesDigits): if j in secDigits: cow+=1 gesDigits[i]=':' secDigits[secDigits.index(j)]='.' return str(bull)+'A'+str(cow)+'B' """ faster and much faster version from http://bookshadow.com/weblog/2015/10/31/leetcode-bulls-and-cows/ class Solution(object): def getHint(self, secret, guess): bull = sum(map(operator.eq, secret, guess)) sa = collections.Counter(secret) sb = collections.Counter(guess) cow = sum((sa & sb).values()) - bull return str(bull) + 'A' + str(cow) + 'B' class Solution(object): def getHint(self, secret, guess): bulls = sum(map(operator.eq, secret, guess)) both = sum(min(secret.count(x), guess.count(x)) for x in '0123456789') return '%dA%dB' % (bulls, both - bulls) """
""" You are playing the following Bulls and Cows game with your friend: You write down a number and ask your friend to guess what the number is. Each time your friend makes a guess, you provide a hint that indicates how many digits in said guess match your secret number exactly in both digit and position (called "bulls") and how many digits match the secret number but locate in the wrong position (called "cows"). Your friend will use successive guesses and hints to eventually derive the secret number. For example: Secret number: "1807" Friend's guess: "7810" Hint: 1 bull and 3 cows. (The bull is 8, the cows are 0, 1 and 7.) Write a function to return a hint according to the secret number and friend's guess, use A to indicate the bulls and B to indicate the cows. In the above example, your function should return "1A3B". Please note that both secret number and friend's guess may contain duplicate digits, for example: Secret number: "1123" Friend's guess: "0111" In this case, the 1st 1 in friend's guess is a bull, the 2nd or 3rd 1 is a cow, and your function should return "1A1B". You may assume that the secret number and your friend's guess only contain digits, and their lengths are always equal. Credits: Special thanks to @jeantimex for adding this problem and creating all test cases. """ class Solution(object): def get_hint(self, secret, guess): """ :type secret: str :type guess: str :rtype: str """ sec_digits = [int(i) for i in secret] ges_digits = [int(i) for i in guess] bull = 0 cow = 0 for (i, j) in enumerate(gesDigits): if secDigits[i] == j: bull += 1 gesDigits[i] = ':' secDigits[i] = '.' for (i, j) in enumerate(gesDigits): if j in secDigits: cow += 1 gesDigits[i] = ':' secDigits[secDigits.index(j)] = '.' return str(bull) + 'A' + str(cow) + 'B' "\nfaster and much faster version from\nhttp://bookshadow.com/weblog/2015/10/31/leetcode-bulls-and-cows/\n\nclass Solution(object):\n def getHint(self, secret, guess):\n bull = sum(map(operator.eq, secret, guess))\n sa = collections.Counter(secret)\n sb = collections.Counter(guess)\n cow = sum((sa & sb).values()) - bull\n return str(bull) + 'A' + str(cow) + 'B'\n\nclass Solution(object):\n def getHint(self, secret, guess):\n bulls = sum(map(operator.eq, secret, guess))\n both = sum(min(secret.count(x), guess.count(x)) for x in '0123456789')\n return '%dA%dB' % (bulls, both - bulls)\n"
cust="customer1" my_age=int(input("Hello {}, Please confirm your age:".format(cust))) if my_age<18: print("you are minor, you have no entry") if my_age<18:print("you are minor, you have no entry")
cust = 'customer1' my_age = int(input('Hello {}, Please confirm your age:'.format(cust))) if my_age < 18: print('you are minor, you have no entry') if my_age < 18: print('you are minor, you have no entry')
p = int(input()) q = int(input()) result=[] for i in range(p,q+1,1): k = i**2 r = k % (10 ** len(str(i))) l = k // (10 ** len(str(i))) if i == int(r)+int(l): result.append(i) result= list(set(result)) result.sort() if len(result) == 0: print ('INVALID RANGE') else: print (' '.join([str(i) for i in result]))
p = int(input()) q = int(input()) result = [] for i in range(p, q + 1, 1): k = i ** 2 r = k % 10 ** len(str(i)) l = k // 10 ** len(str(i)) if i == int(r) + int(l): result.append(i) result = list(set(result)) result.sort() if len(result) == 0: print('INVALID RANGE') else: print(' '.join([str(i) for i in result]))
C_ANY = 0 # Any length C_ONE = 2 # Only one C_DEF = 4 # Up to 5 C_SAR = 8 # Up to 7 (shifts and rotates) END = 0x60 LDA = 0xA9 LDX = 0xA2 LDY = 0xA0 BIT = 0x24 CMP = 0xC9 CPX = 0xE0 CPY = 0xC0 OPCODES = { 0x00: (1, C_ANY, 0x00, 'BRK'), 0x01: (2, C_ANY, 0x01, 'ORA (nn,X)'), 0x05: (2, C_ANY, 0x02, 'ORA nn'), 0x06: (2, C_SAR, 0x06, 'ASL nn'), 0x08: (1, C_DEF, 0x08, 'PHP'), 0x09: (2, C_ONE, 0x09, 'ORA #nn'), 0x0A: (1, C_SAR, 0x0A, 'ASL A'), 0x0D: (3, C_ANY, 0x0D, 'ORA nnnn'), 0x0E: (3, C_SAR, 0x0E, 'ASL nnnn'), 0x10: (2, C_ONE, 0x10, 'BPL nnnn'), 0x11: (2, C_ANY, 0x11, 'ORA (nn),Y'), 0x15: (2, C_ANY, 0x15, 'ORA nn,X'), 0x16: (2, C_SAR, 0x16, 'ASL nn,X'), 0x18: (1, C_ONE, 0x18, 'CLC'), 0x19: (3, C_ANY, 0x19, 'ORA nnnn,Y'), 0x1D: (3, C_ANY, 0x1D, 'ORA nnnn,X'), 0x1E: (3, C_SAR, 0x1E, 'ASL nnnn,X'), 0x20: (3, C_ANY, 0x20, 'JSR nnnn'), 0x21: (2, C_ANY, 0x21, 'AND (nn,X)'), 0x24: (2, C_ONE, BIT, 'BIT nn'), 0x25: (2, C_ANY, 0x25, 'AND nn'), 0x26: (2, C_SAR, 0x26, 'ROL nn'), 0x28: (1, C_ONE, 0x28, 'PLP'), 0x29: (2, C_ONE, 0x29, 'AND #nn'), 0x2A: (1, C_SAR, 0x2A, 'ROL A'), 0x2C: (3, C_ONE, BIT, 'BIT nnnn'), 0x2D: (3, C_ANY, 0x2D, 'AND nnnn'), 0x2E: (3, C_SAR, 0x2E, 'ROL nnnn'), 0x30: (2, C_ONE, 0x30, 'BMI nnnn'), 0x31: (2, C_ANY, 0x31, 'AND (nn),Y'), 0x35: (2, C_ANY, 0x35, 'AND nn,X'), 0x36: (2, C_SAR, 0x36, 'ROL nn,X'), 0x38: (1, C_ONE, 0x38, 'SEC'), 0x39: (3, C_ANY, 0x39, 'AND nnnn,Y'), 0x3D: (3, C_ANY, 0x3D, 'AND nnnn,X'), 0x3E: (3, C_SAR, 0x3E, 'ROL nnnn,X'), 0x40: (1, C_ONE, END, 'RTI'), 0x41: (2, C_ANY, 0x41, 'EOR (nn,X)'), 0x45: (2, C_ANY, 0x45, 'EOR nn'), 0x46: (2, C_SAR, 0x46, 'LSR nn'), 0x48: (1, C_DEF, 0x48, 'PHA'), 0x49: (2, C_ONE, 0x49, 'EOR #nn'), 0x4A: (1, C_SAR, 0x4A, 'LSR A'), 0x4C: (3, C_ONE, END, 'JMP nnnn'), 0x4D: (3, C_ANY, 0x4D, 'EOR nnnn'), 0x4E: (3, C_SAR, 0x4E, 'LSR nnnn'), 0x50: (2, C_ONE, 0x50, 'BVC nnnn'), 0x51: (2, C_ANY, 0x51, 'EOR (nn),Y'), 0x55: (2, C_ANY, 0x55, 'EOR nn,X'), 0x56: (2, C_SAR, 0x56, 'LSR nn,X'), 0x58: (1, C_ONE, 0x58, 'CLI'), 0x59: (3, C_ANY, 0x59, 'EOR nnnn,Y'), 0x5D: (3, C_ANY, 0x5D, 'EOR nnnn,X'), 0x5E: (3, C_SAR, 0x5E, 'LSR nnnn,X'), 0x60: (1, C_ONE, END, 'RTS'), 0x61: (2, C_ANY, 0x61, 'ADC (nn,X)'), 0x65: (2, C_ANY, 0x65, 'ADC nn'), 0x66: (2, C_SAR, 0x66, 'ROR nn'), 0x68: (1, C_ONE, 0x68, 'PLA'), 0x69: (2, C_ONE, 0x69, 'ADC #nn'), 0x6A: (1, C_SAR, 0x6A, 'ROR A'), 0x6C: (3, C_ONE, END, 'JMP (nnnn)'), 0x6D: (3, C_ANY, 0x6D, 'ADC nnnn'), 0x6E: (3, C_SAR, 0x6E, 'ROR nnnn'), 0x70: (2, C_ONE, 0x70, 'BVS nnnn'), 0x71: (2, C_ANY, 0x71, 'ADC (nn),Y'), 0x75: (2, C_ANY, 0x75, 'ADC nn,X'), 0x76: (2, C_SAR, 0x76, 'ROR nn,X'), 0x78: (1, C_ONE, 0x78, 'SEI'), 0x79: (3, C_ANY, 0x79, 'ADC nnnn,Y'), 0x7D: (3, C_ANY, 0x7D, 'ADC nnnn,X'), 0x7E: (3, C_SAR, 0x7E, 'ROR nnnn,X'), 0x81: (2, C_ANY, 0x81, 'STA (nn,X)'), 0x84: (2, C_ANY, 0x84, 'STY nn'), 0x85: (2, C_ANY, 0x85, 'STA nn'), 0x86: (2, C_ANY, 0x86, 'STX nn'), 0x88: (1, C_ANY, 0x88, 'DEY'), 0x8A: (1, C_ONE, 0x8A, 'TXA'), 0x8C: (3, C_ANY, 0x8C, 'STY nnnn'), 0x8D: (3, C_ANY, 0x8D, 'STA nnnn'), 0x8E: (3, C_ANY, 0x8E, 'STX nnnn'), 0x90: (2, C_ONE, 0x90, 'BCC nnnn'), 0x91: (2, C_ANY, 0x91, 'STA (nn),Y'), 0x94: (2, C_ANY, 0x94, 'STY nn,X'), 0x95: (2, C_ANY, 0x95, 'STA nn,X'), 0x96: (2, C_ANY, 0x96, 'STX nn,Y'), 0x98: (1, C_ONE, 0x98, 'TYA'), 0x99: (3, C_ANY, 0x99, 'STA nnnn,Y'), 0x9A: (1, C_ONE, 0x9A, 'TXS'), 0x9D: (3, C_ANY, 0x9D, 'STA nnnn,X'), 0xA0: (2, C_ONE, LDY, 'LDY #nn'), 0xA1: (2, C_ONE, LDA, 'LDA (nn,X)'), 0xA2: (2, C_ONE, LDX, 'LDX #nn'), 0xA4: (2, C_ONE, LDY, 'LDY nn'), 0xA5: (2, C_ONE, LDA, 'LDA nn'), 0xA6: (2, C_ONE, LDX, 'LDX nn'), 0xA8: (1, C_ONE, 0xA8, 'TAY'), 0xA9: (2, C_ONE, LDA, 'LDA #nn'), 0xAA: (1, C_ONE, 0xAA, 'TAX'), 0xAC: (3, C_ONE, LDY, 'LDY nnnn'), 0xAD: (3, C_ONE, LDA, 'LDA nnnn'), 0xAE: (3, C_ONE, LDX, 'LDX nnnn'), 0xB0: (2, C_ONE, 0xB0, 'BCS nnnn'), 0xB1: (2, C_ONE, LDA, 'LDA (nn),Y'), 0xB4: (2, C_ONE, LDY, 'LDY nn,X'), 0xB5: (2, C_ONE, LDA, 'LDA nn,X'), 0xB6: (2, C_ONE, LDX, 'LDX nn,Y'), 0xB8: (1, C_ONE, 0xB8, 'CLV'), 0xB9: (3, C_ONE, LDA, 'LDA nnnn,Y'), 0xBA: (1, C_ONE, 0xBA, 'TSX'), 0xBC: (3, C_ONE, LDY, 'LDY nnnn,X'), 0xBD: (3, C_ONE, LDA, 'LDA nnnn,X'), 0xBE: (3, C_ONE, LDX, 'LDX nnnn,Y'), 0xC0: (2, C_ONE, CPY, 'CPY #nn'), 0xC1: (2, C_ONE, CMP, 'CMP (nn,X)'), 0xC4: (2, C_ONE, CPY, 'CPY nn'), 0xC5: (2, C_ONE, CMP, 'CMP nn'), 0xC6: (2, C_ANY, 0xC6, 'DEC nn'), 0xC8: (1, C_ANY, 0xC8, 'INY'), 0xC9: (2, C_ONE, CMP, 'CMP #nn'), 0xCA: (1, C_ANY, 0xCA, 'DEX'), 0xCC: (3, C_ONE, CPY, 'CPY nnnn'), 0xCD: (3, C_ONE, CMP, 'CMP nnnn'), 0xCE: (3, C_ANY, 0xCE, 'DEC nnnn'), 0xD0: (2, C_ONE, 0xD0, 'BNE nnnn'), 0xD1: (2, C_ONE, CMP, 'CMP (nn),Y'), 0xD5: (2, C_ONE, CMP, 'CMP nn,X'), 0xD6: (2, C_ANY, 0xD6, 'DEC nn,X'), 0xD8: (1, C_ONE, 0xD8, 'CLD'), 0xD9: (3, C_ONE, CMP, 'CMP nnnn,Y'), 0xDD: (3, C_ONE, CMP, 'CMP nnnn,X'), 0xDE: (3, C_ANY, 0xDE, 'DEC nnnn,X'), 0xE0: (2, C_ONE, CPX, 'CPX #nn'), 0xE1: (2, C_ANY, 0xE1, 'SBC (nn,X)'), 0xE4: (2, C_ONE, CPX, 'CPX nn'), 0xE5: (2, C_ANY, 0xE5, 'SBC nn'), 0xE6: (2, C_ANY, 0xE6, 'INC nn'), 0xE8: (1, C_ANY, 0xE8, 'INX'), 0xE9: (2, C_ONE, 0xE9, 'SBC #nn'), 0xEA: (1, C_ANY, 0xEA, 'NOP'), 0xEC: (3, C_ONE, CPX, 'CPX nnnn'), 0xED: (3, C_ANY, 0xED, 'SBC nnnn'), 0xEE: (3, C_ANY, 0xEE, 'INC nnnn'), 0xF0: (2, C_ONE, 0xF0, 'BEQ nnnn'), 0xF1: (2, C_ANY, 0xF1, 'SBC (nn),Y'), 0xF5: (2, C_ANY, 0xF5, 'SBC nn,X'), 0xF6: (2, C_ANY, 0xF6, 'INC nn,X'), 0xF8: (1, C_ONE, 0xF8, 'SED'), 0xF9: (3, C_ANY, 0xF9, 'SBC nnnn,Y'), 0xFD: (3, C_ANY, 0xFD, 'SBC nnnn,X'), 0xFE: (3, C_ANY, 0xFE, 'INC nnnn,X') } OP_SIZES = { 0x00: 2, 0x03: 2, 0x04: 2, 0x07: 2, 0x0B: 2, 0x0C: 3, 0x0F: 3, 0x11: 2, 0x13: 2, 0x14: 2, 0x17: 2, 0x1B: 3, 0x1C: 3, 0x1F: 3, 0x80: 2, 0x82: 2, 0x83: 2, 0x87: 2, 0x89: 2, 0x8B: 2, 0x8F: 3, 0x93: 2, 0x94: 2, 0x97: 2, 0x9B: 3, 0x9C: 3, 0x9E: 3, 0x9F: 3 } def _byte(snapshot, addr, size): op_id = 0 for i in range(addr, addr + size): op_id = 256 * op_id + snapshot[i] operation = '.BYTE ' + ','.join(str(v) for v in snapshot[addr:addr + size]) return size, 1, op_id, operation def _opcode(snapshot, addr, value): try: size, max_count, op_id, operation = OPCODES[value] except KeyError: size = OP_SIZES.get(value & 0x9F, 1) return _byte(snapshot, addr, min(size, 65536 - addr)) if addr + size < 65537: return size, max_count, op_id, operation return _byte(snapshot, addr, 65536 - addr) def _decode(snapshot, start, end): addr = start while addr < end: value = snapshot[addr] size, max_count, op_id, operation = _opcode(snapshot, addr, value) yield (addr, size, max_count, op_id, operation) addr += size def _check_text(t_blocks, t_start, t_end, text, min_length, words): if len(text) >= min_length: if words: t_lower = text.lower() for word in words: if word in t_lower: break else: return t_blocks.append((t_start, t_end)) def _get_text_blocks(snapshot, start, end, config, data=True): if data: min_length = config.text_min_length_data else: min_length = config.text_min_length_code t_blocks = [] if end - start >= min_length: text = '' for address in range(start, end): char = chr(snapshot[address]) if char in config.text_chars: if not text: t_start = address text += char elif text: _check_text(t_blocks, t_start, address, text, min_length, config.words) text = '' if text: _check_text(t_blocks, t_start, end, text, min_length, config.words) return t_blocks def _catch_data(ctls, ctl_addr, count, max_count, addr, op_bytes): if count >= max_count > 0: if not ctls or ctls[-1][1] != 'b': ctls.append((ctl_addr, 'b')) return addr return ctl_addr def _get_operation(operations, snapshot, addr): if addr not in operations: operations[addr] = next(_decode(snapshot, addr, addr + 1))[1::3] return operations[addr] def _generate_ctls_without_code_map(snapshot, start, end, config): operations = {} ctls = [] ctl_addr = start prev_max_count, prev_op_id, prev_op, prev_op_bytes = 0, None, None, () count = 1 for addr, size, max_count, op_id, operation in _decode(snapshot, start, end): operations[addr] = (size, operation) op_bytes = snapshot[addr:addr + size] if op_id == END: # Catch data-like sequences that precede a terminal instruction ctl_addr = _catch_data(ctls, ctl_addr, count, prev_max_count, addr, prev_op_bytes) ctls.append((ctl_addr, 'c')) ctl_addr = addr + size prev_max_count, prev_op_id, prev_op, prev_op_bytes = 0, None, None, () count = 1 continue if op_id == prev_op_id: count += 1 elif prev_op: ctl_addr = _catch_data(ctls, ctl_addr, count, prev_max_count, addr, prev_op_bytes) count = 1 prev_max_count, prev_op_id, prev_op, prev_op_bytes = max_count, op_id, operation, op_bytes if not ctls: ctls.append((ctl_addr, 'b')) ctls.append((end, 'i')) ctls = dict(ctls) # Join two adjacent blocks if the first one is code and branches or jumps # to the second edges = sorted(ctls) while 1: done = True while len(edges) > 1: addr, end = edges[0], edges[1] if ctls[addr] == 'c': while addr < end: size, operation = _get_operation(operations, snapshot, addr) if operation.startswith(('BC', 'BE', 'BM', 'BN', 'BP', 'BV')) or (snapshot[addr] == 0x4C and len(snapshot) > addr + 2): if snapshot[addr] == 0x4C: op_addr = snapshot[addr + 1] + 256 * snapshot[addr + 2] else: operand = snapshot[addr + 1] op_addr = addr + 2 + (operand if operand < 128 else operand - 256) if op_addr == end: del ctls[end], edges[1] done = False break addr += size if not done: break del edges[0] else: del edges[0] if done: break # Look for text edges = sorted(ctls) for i in range(len(edges) - 1): start, end = edges[i], edges[i + 1] ctl = ctls[start] text_blocks = _get_text_blocks(snapshot, start, end, config, ctl == 'b') if text_blocks: ctls[start] = 'b' for t_start, t_end in text_blocks: ctls[t_start] = 't' if t_end < end: ctls[t_end] = 'b' if t_end < end: addr = t_end while addr < end: addr += _get_operation(operations, snapshot, addr)[0] ctls[t_end] = ctl if addr == end else 'b' return ctls def generate_ctls(snapshot, start, end, code_map, config): return _generate_ctls_without_code_map(snapshot, start, end, config)
c_any = 0 c_one = 2 c_def = 4 c_sar = 8 end = 96 lda = 169 ldx = 162 ldy = 160 bit = 36 cmp = 201 cpx = 224 cpy = 192 opcodes = {0: (1, C_ANY, 0, 'BRK'), 1: (2, C_ANY, 1, 'ORA (nn,X)'), 5: (2, C_ANY, 2, 'ORA nn'), 6: (2, C_SAR, 6, 'ASL nn'), 8: (1, C_DEF, 8, 'PHP'), 9: (2, C_ONE, 9, 'ORA #nn'), 10: (1, C_SAR, 10, 'ASL A'), 13: (3, C_ANY, 13, 'ORA nnnn'), 14: (3, C_SAR, 14, 'ASL nnnn'), 16: (2, C_ONE, 16, 'BPL nnnn'), 17: (2, C_ANY, 17, 'ORA (nn),Y'), 21: (2, C_ANY, 21, 'ORA nn,X'), 22: (2, C_SAR, 22, 'ASL nn,X'), 24: (1, C_ONE, 24, 'CLC'), 25: (3, C_ANY, 25, 'ORA nnnn,Y'), 29: (3, C_ANY, 29, 'ORA nnnn,X'), 30: (3, C_SAR, 30, 'ASL nnnn,X'), 32: (3, C_ANY, 32, 'JSR nnnn'), 33: (2, C_ANY, 33, 'AND (nn,X)'), 36: (2, C_ONE, BIT, 'BIT nn'), 37: (2, C_ANY, 37, 'AND nn'), 38: (2, C_SAR, 38, 'ROL nn'), 40: (1, C_ONE, 40, 'PLP'), 41: (2, C_ONE, 41, 'AND #nn'), 42: (1, C_SAR, 42, 'ROL A'), 44: (3, C_ONE, BIT, 'BIT nnnn'), 45: (3, C_ANY, 45, 'AND nnnn'), 46: (3, C_SAR, 46, 'ROL nnnn'), 48: (2, C_ONE, 48, 'BMI nnnn'), 49: (2, C_ANY, 49, 'AND (nn),Y'), 53: (2, C_ANY, 53, 'AND nn,X'), 54: (2, C_SAR, 54, 'ROL nn,X'), 56: (1, C_ONE, 56, 'SEC'), 57: (3, C_ANY, 57, 'AND nnnn,Y'), 61: (3, C_ANY, 61, 'AND nnnn,X'), 62: (3, C_SAR, 62, 'ROL nnnn,X'), 64: (1, C_ONE, END, 'RTI'), 65: (2, C_ANY, 65, 'EOR (nn,X)'), 69: (2, C_ANY, 69, 'EOR nn'), 70: (2, C_SAR, 70, 'LSR nn'), 72: (1, C_DEF, 72, 'PHA'), 73: (2, C_ONE, 73, 'EOR #nn'), 74: (1, C_SAR, 74, 'LSR A'), 76: (3, C_ONE, END, 'JMP nnnn'), 77: (3, C_ANY, 77, 'EOR nnnn'), 78: (3, C_SAR, 78, 'LSR nnnn'), 80: (2, C_ONE, 80, 'BVC nnnn'), 81: (2, C_ANY, 81, 'EOR (nn),Y'), 85: (2, C_ANY, 85, 'EOR nn,X'), 86: (2, C_SAR, 86, 'LSR nn,X'), 88: (1, C_ONE, 88, 'CLI'), 89: (3, C_ANY, 89, 'EOR nnnn,Y'), 93: (3, C_ANY, 93, 'EOR nnnn,X'), 94: (3, C_SAR, 94, 'LSR nnnn,X'), 96: (1, C_ONE, END, 'RTS'), 97: (2, C_ANY, 97, 'ADC (nn,X)'), 101: (2, C_ANY, 101, 'ADC nn'), 102: (2, C_SAR, 102, 'ROR nn'), 104: (1, C_ONE, 104, 'PLA'), 105: (2, C_ONE, 105, 'ADC #nn'), 106: (1, C_SAR, 106, 'ROR A'), 108: (3, C_ONE, END, 'JMP (nnnn)'), 109: (3, C_ANY, 109, 'ADC nnnn'), 110: (3, C_SAR, 110, 'ROR nnnn'), 112: (2, C_ONE, 112, 'BVS nnnn'), 113: (2, C_ANY, 113, 'ADC (nn),Y'), 117: (2, C_ANY, 117, 'ADC nn,X'), 118: (2, C_SAR, 118, 'ROR nn,X'), 120: (1, C_ONE, 120, 'SEI'), 121: (3, C_ANY, 121, 'ADC nnnn,Y'), 125: (3, C_ANY, 125, 'ADC nnnn,X'), 126: (3, C_SAR, 126, 'ROR nnnn,X'), 129: (2, C_ANY, 129, 'STA (nn,X)'), 132: (2, C_ANY, 132, 'STY nn'), 133: (2, C_ANY, 133, 'STA nn'), 134: (2, C_ANY, 134, 'STX nn'), 136: (1, C_ANY, 136, 'DEY'), 138: (1, C_ONE, 138, 'TXA'), 140: (3, C_ANY, 140, 'STY nnnn'), 141: (3, C_ANY, 141, 'STA nnnn'), 142: (3, C_ANY, 142, 'STX nnnn'), 144: (2, C_ONE, 144, 'BCC nnnn'), 145: (2, C_ANY, 145, 'STA (nn),Y'), 148: (2, C_ANY, 148, 'STY nn,X'), 149: (2, C_ANY, 149, 'STA nn,X'), 150: (2, C_ANY, 150, 'STX nn,Y'), 152: (1, C_ONE, 152, 'TYA'), 153: (3, C_ANY, 153, 'STA nnnn,Y'), 154: (1, C_ONE, 154, 'TXS'), 157: (3, C_ANY, 157, 'STA nnnn,X'), 160: (2, C_ONE, LDY, 'LDY #nn'), 161: (2, C_ONE, LDA, 'LDA (nn,X)'), 162: (2, C_ONE, LDX, 'LDX #nn'), 164: (2, C_ONE, LDY, 'LDY nn'), 165: (2, C_ONE, LDA, 'LDA nn'), 166: (2, C_ONE, LDX, 'LDX nn'), 168: (1, C_ONE, 168, 'TAY'), 169: (2, C_ONE, LDA, 'LDA #nn'), 170: (1, C_ONE, 170, 'TAX'), 172: (3, C_ONE, LDY, 'LDY nnnn'), 173: (3, C_ONE, LDA, 'LDA nnnn'), 174: (3, C_ONE, LDX, 'LDX nnnn'), 176: (2, C_ONE, 176, 'BCS nnnn'), 177: (2, C_ONE, LDA, 'LDA (nn),Y'), 180: (2, C_ONE, LDY, 'LDY nn,X'), 181: (2, C_ONE, LDA, 'LDA nn,X'), 182: (2, C_ONE, LDX, 'LDX nn,Y'), 184: (1, C_ONE, 184, 'CLV'), 185: (3, C_ONE, LDA, 'LDA nnnn,Y'), 186: (1, C_ONE, 186, 'TSX'), 188: (3, C_ONE, LDY, 'LDY nnnn,X'), 189: (3, C_ONE, LDA, 'LDA nnnn,X'), 190: (3, C_ONE, LDX, 'LDX nnnn,Y'), 192: (2, C_ONE, CPY, 'CPY #nn'), 193: (2, C_ONE, CMP, 'CMP (nn,X)'), 196: (2, C_ONE, CPY, 'CPY nn'), 197: (2, C_ONE, CMP, 'CMP nn'), 198: (2, C_ANY, 198, 'DEC nn'), 200: (1, C_ANY, 200, 'INY'), 201: (2, C_ONE, CMP, 'CMP #nn'), 202: (1, C_ANY, 202, 'DEX'), 204: (3, C_ONE, CPY, 'CPY nnnn'), 205: (3, C_ONE, CMP, 'CMP nnnn'), 206: (3, C_ANY, 206, 'DEC nnnn'), 208: (2, C_ONE, 208, 'BNE nnnn'), 209: (2, C_ONE, CMP, 'CMP (nn),Y'), 213: (2, C_ONE, CMP, 'CMP nn,X'), 214: (2, C_ANY, 214, 'DEC nn,X'), 216: (1, C_ONE, 216, 'CLD'), 217: (3, C_ONE, CMP, 'CMP nnnn,Y'), 221: (3, C_ONE, CMP, 'CMP nnnn,X'), 222: (3, C_ANY, 222, 'DEC nnnn,X'), 224: (2, C_ONE, CPX, 'CPX #nn'), 225: (2, C_ANY, 225, 'SBC (nn,X)'), 228: (2, C_ONE, CPX, 'CPX nn'), 229: (2, C_ANY, 229, 'SBC nn'), 230: (2, C_ANY, 230, 'INC nn'), 232: (1, C_ANY, 232, 'INX'), 233: (2, C_ONE, 233, 'SBC #nn'), 234: (1, C_ANY, 234, 'NOP'), 236: (3, C_ONE, CPX, 'CPX nnnn'), 237: (3, C_ANY, 237, 'SBC nnnn'), 238: (3, C_ANY, 238, 'INC nnnn'), 240: (2, C_ONE, 240, 'BEQ nnnn'), 241: (2, C_ANY, 241, 'SBC (nn),Y'), 245: (2, C_ANY, 245, 'SBC nn,X'), 246: (2, C_ANY, 246, 'INC nn,X'), 248: (1, C_ONE, 248, 'SED'), 249: (3, C_ANY, 249, 'SBC nnnn,Y'), 253: (3, C_ANY, 253, 'SBC nnnn,X'), 254: (3, C_ANY, 254, 'INC nnnn,X')} op_sizes = {0: 2, 3: 2, 4: 2, 7: 2, 11: 2, 12: 3, 15: 3, 17: 2, 19: 2, 20: 2, 23: 2, 27: 3, 28: 3, 31: 3, 128: 2, 130: 2, 131: 2, 135: 2, 137: 2, 139: 2, 143: 3, 147: 2, 148: 2, 151: 2, 155: 3, 156: 3, 158: 3, 159: 3} def _byte(snapshot, addr, size): op_id = 0 for i in range(addr, addr + size): op_id = 256 * op_id + snapshot[i] operation = '.BYTE ' + ','.join((str(v) for v in snapshot[addr:addr + size])) return (size, 1, op_id, operation) def _opcode(snapshot, addr, value): try: (size, max_count, op_id, operation) = OPCODES[value] except KeyError: size = OP_SIZES.get(value & 159, 1) return _byte(snapshot, addr, min(size, 65536 - addr)) if addr + size < 65537: return (size, max_count, op_id, operation) return _byte(snapshot, addr, 65536 - addr) def _decode(snapshot, start, end): addr = start while addr < end: value = snapshot[addr] (size, max_count, op_id, operation) = _opcode(snapshot, addr, value) yield (addr, size, max_count, op_id, operation) addr += size def _check_text(t_blocks, t_start, t_end, text, min_length, words): if len(text) >= min_length: if words: t_lower = text.lower() for word in words: if word in t_lower: break else: return t_blocks.append((t_start, t_end)) def _get_text_blocks(snapshot, start, end, config, data=True): if data: min_length = config.text_min_length_data else: min_length = config.text_min_length_code t_blocks = [] if end - start >= min_length: text = '' for address in range(start, end): char = chr(snapshot[address]) if char in config.text_chars: if not text: t_start = address text += char elif text: _check_text(t_blocks, t_start, address, text, min_length, config.words) text = '' if text: _check_text(t_blocks, t_start, end, text, min_length, config.words) return t_blocks def _catch_data(ctls, ctl_addr, count, max_count, addr, op_bytes): if count >= max_count > 0: if not ctls or ctls[-1][1] != 'b': ctls.append((ctl_addr, 'b')) return addr return ctl_addr def _get_operation(operations, snapshot, addr): if addr not in operations: operations[addr] = next(_decode(snapshot, addr, addr + 1))[1::3] return operations[addr] def _generate_ctls_without_code_map(snapshot, start, end, config): operations = {} ctls = [] ctl_addr = start (prev_max_count, prev_op_id, prev_op, prev_op_bytes) = (0, None, None, ()) count = 1 for (addr, size, max_count, op_id, operation) in _decode(snapshot, start, end): operations[addr] = (size, operation) op_bytes = snapshot[addr:addr + size] if op_id == END: ctl_addr = _catch_data(ctls, ctl_addr, count, prev_max_count, addr, prev_op_bytes) ctls.append((ctl_addr, 'c')) ctl_addr = addr + size (prev_max_count, prev_op_id, prev_op, prev_op_bytes) = (0, None, None, ()) count = 1 continue if op_id == prev_op_id: count += 1 elif prev_op: ctl_addr = _catch_data(ctls, ctl_addr, count, prev_max_count, addr, prev_op_bytes) count = 1 (prev_max_count, prev_op_id, prev_op, prev_op_bytes) = (max_count, op_id, operation, op_bytes) if not ctls: ctls.append((ctl_addr, 'b')) ctls.append((end, 'i')) ctls = dict(ctls) edges = sorted(ctls) while 1: done = True while len(edges) > 1: (addr, end) = (edges[0], edges[1]) if ctls[addr] == 'c': while addr < end: (size, operation) = _get_operation(operations, snapshot, addr) if operation.startswith(('BC', 'BE', 'BM', 'BN', 'BP', 'BV')) or (snapshot[addr] == 76 and len(snapshot) > addr + 2): if snapshot[addr] == 76: op_addr = snapshot[addr + 1] + 256 * snapshot[addr + 2] else: operand = snapshot[addr + 1] op_addr = addr + 2 + (operand if operand < 128 else operand - 256) if op_addr == end: del ctls[end], edges[1] done = False break addr += size if not done: break del edges[0] else: del edges[0] if done: break edges = sorted(ctls) for i in range(len(edges) - 1): (start, end) = (edges[i], edges[i + 1]) ctl = ctls[start] text_blocks = _get_text_blocks(snapshot, start, end, config, ctl == 'b') if text_blocks: ctls[start] = 'b' for (t_start, t_end) in text_blocks: ctls[t_start] = 't' if t_end < end: ctls[t_end] = 'b' if t_end < end: addr = t_end while addr < end: addr += _get_operation(operations, snapshot, addr)[0] ctls[t_end] = ctl if addr == end else 'b' return ctls def generate_ctls(snapshot, start, end, code_map, config): return _generate_ctls_without_code_map(snapshot, start, end, config)
""" Variables that never change """ __version__ = '0.0.1a' __author__ = 'Seniatical & Daftscientist' __license__ = 'MIT License' USER_AGENT = 'PyPtero <https://github.com/Rapi-Dev/PyPtero> [Version {}]'.format(__version__) USE_SSL = {True: 'https', False: 'http'} REQUEST_TYPES = ('DELETE', 'GET', 'POST', 'PATCH')
""" Variables that never change """ __version__ = '0.0.1a' __author__ = 'Seniatical & Daftscientist' __license__ = 'MIT License' user_agent = 'PyPtero <https://github.com/Rapi-Dev/PyPtero> [Version {}]'.format(__version__) use_ssl = {True: 'https', False: 'http'} request_types = ('DELETE', 'GET', 'POST', 'PATCH')
# Copyright 2021 Huawei Technologies Co., Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. _base_ = '../retinanet/retinanet_r50_fpn_1x_coco.py' model = dict( bbox_head=dict( loss_cls=dict( _delete_=True, type='GHMC', bins=30, momentum=0.75, use_sigmoid=True, loss_weight=1.0), loss_bbox=dict( _delete_=True, type='GHMR', mu=0.02, bins=10, momentum=0.7, loss_weight=10.0))) optimizer_config = dict( _delete_=True, grad_clip=dict(max_norm=35, norm_type=2))
_base_ = '../retinanet/retinanet_r50_fpn_1x_coco.py' model = dict(bbox_head=dict(loss_cls=dict(_delete_=True, type='GHMC', bins=30, momentum=0.75, use_sigmoid=True, loss_weight=1.0), loss_bbox=dict(_delete_=True, type='GHMR', mu=0.02, bins=10, momentum=0.7, loss_weight=10.0))) optimizer_config = dict(_delete_=True, grad_clip=dict(max_norm=35, norm_type=2))
class NonogramError(Exception): """Base class for exceptions in this module.""" pass class LengthError(NonogramError): """Exception raised for unexpected list lengths.""" pass class AxisError(NonogramError): """Exception raised for unexpected axis.""" pass class ClueError(NonogramError): """Exception raised for invalid clue.""" pass class SolveError(NonogramError): """Exception raised for when solve action cannot be executed.""" pass class SetSolutionError(NonogramError): """Exception raised for an invalid change of the solution.""" pass
class Nonogramerror(Exception): """Base class for exceptions in this module.""" pass class Lengtherror(NonogramError): """Exception raised for unexpected list lengths.""" pass class Axiserror(NonogramError): """Exception raised for unexpected axis.""" pass class Clueerror(NonogramError): """Exception raised for invalid clue.""" pass class Solveerror(NonogramError): """Exception raised for when solve action cannot be executed.""" pass class Setsolutionerror(NonogramError): """Exception raised for an invalid change of the solution.""" pass
# TODO This is probably the wrong place to put it. class GameContext(object): """ This object will hold the instances required to be passed around. Avoiding the need to pass 40 arguments just in case something down the line needs it. """ def __init__(self): self.player = None self.factory_service = None self.body_factory = None self.character_factory = None self.json_template_loader = None self.item_factory = None self.echo_service = None self.current_level = None # TODO That factory service up there should contain the needs to use the factories themselves..
class Gamecontext(object): """ This object will hold the instances required to be passed around. Avoiding the need to pass 40 arguments just in case something down the line needs it. """ def __init__(self): self.player = None self.factory_service = None self.body_factory = None self.character_factory = None self.json_template_loader = None self.item_factory = None self.echo_service = None self.current_level = None
class MissingEnvError(Exception): """ An exception raised when looking up a missing environment variable without a default. """ pass
class Missingenverror(Exception): """ An exception raised when looking up a missing environment variable without a default. """ pass
""" constants.py Nicholas Boucher 2020 Contains the constant values utilized in the ElectionGuard protocol. """ # Large prime p P: int = 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF93C467E37DB0C7A4D1BE3F810152CB56A1CECC3AF65CC0190C03DF34709AFFBD8E4B59FA03A9F0EED0649CCB621057D11056AE9132135A08E43B4673D74BAFEA58DEB878CC86D733DBE7BF38154B36CF8A96D1567899AAAE0C09D4C8B6B7B86FD2A1EA1DE62FF8643EC7C271827977225E6AC2F0BD61C746961542A3CE3BEA5DB54FE70E63E6D09F8FC28658E80567A47CFDE60EE741E5D85A7BD46931CED8220365594964B839896FCAABCCC9B31959C083F22AD3EE591C32FAB2C7448F2A057DB2DB49EE52E0182741E53865F004CC8E704B7C5C40BF304C4D8C4F13EDF6047C555302D2238D8CE11DF2424F1B66C2C5D238D0744DB679AF2890487031F9C0AEA1C4BB6FE9554EE528FDF1B05E5B256223B2F09215F3719F9C7CCC69DDF172D0D6234217FCC0037F18B93EF5389130B7A661E5C26E54214068BBCAFEA32A67818BD3075AD1F5C7E9CC3D1737FB28171BAF84DBB6612B7881C1A48E439CD03A92BF52225A2B38E6542E9F722BCE15A381B5753EA842763381CCAE83512B30511B32E5E8D80362149AD030AABA5F3A5798BB22AA7EC1B6D0F17903F4E234EA6034AA85973F79A93FFB82A75C47C03D43D2F9CA02D03199BACEDDD45334DBC6B5FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF # Small prime q Q: int = 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF43 # Cofactor r R: int = 0x100000000000000000000000000000000000000000000000000000000000000BC93C467E37DB0C7A4D1BE3F810152CB56A1CECC3AF65CC0190C03DF34709B8AF6A64C0CEDCF2D559DA9D97F095C3076C686037619148D2C86C317102AFA2148031F04440AC0FF0C9A417A89212512E7607B2501DAA4D38A2C1410C4836149E2BDB8C8260E627C4646963EFFE9E16E495D48BD215C6D8EC9D1667657A2A1C8506F2113FFAD19A6B2BC7C45760456719183309F874BC9ACE570FFDA877AA2B23A2D6F291C1554CA2EB12F12CD009B8B8734A64AD51EB893BD891750B85162241D908F0C9709879758E7E8233EAB3BF2D6AB53AFA32AA153AD6682E5A0648897C9BE18A0D50BECE030C3432336AD9163E33F8E7DAF498F14BB2852AFFA814841EB18DD5F0E89516D557776285C16071D211194EE1C3F34642036AB886E3EC28882CE4003DEA335B4D935BAE4B58235B9FB2BAB713C8F705A1C7DE42220209D6BBCACC467318601565272E4A63E38E2499754AE493AC1A8E83469EEF35CA27C271BC792EEE21156E617B922EA8F713C22CF282DC5D6385BB12868EB781278FA0AB2A8958FCCB5FFE2E5C361FC174420122B0163CA4A46308C8C46C91EA7457C1AD0D69FD4A7F529FD4A7F529FD4A7F529FD4A7F529FD4A7F529FD4A7F529FD4A7F52A # Generator g G: int = 0x037DE384F98F6E038D2A3141825B33D5D45EC4CC64CFD15E750D6798F5196CF2A142CDF33F6EF853840EC7D4EC804794CFB0CFB65363B2566387B98EE0E3DEF1B706FA55D5038FFB4A62DCBB93B1DDD8D3B308DA86D1C3A525EF356FE5BB59314E65633480B396E1DD4B795F78DE07D86B0E2A05BE6AF78FD7F736FCBA6C032E26E050AF50A03C65FA7B6C87F4554CB57F3DABCBAD8EB9D8FDEBEEF58570669ACC3EDA17DBFC47B8B3C39AA08B829B28872E62B5D1B13A98F09D40AC20C2AB74A6750E7C8750B5141E221C41F55BBA31D8E41422B64D2CBA7AAA0E9FD8785702F6932825BF45DE8386D24900742062C1322B37C50AF182158090C35DA9355E6CF7F72DA39A2284FDFB1918B2A2A30E69501FA2342B728263DF23F1DB8355BDE1EB276FB3685F371672CEB313FDAB069CC9B11AB6C59BCE62BAAD96AAC96B0DBE0C7E71FCB22552545A5D1CEDEEE01E4BC0CDBDB76B6AD45F09AF5E71114A005F93AD97B8FE09274E76C94B2008926B38CAEC94C95E96D628F6BC80662BA06207801328B2C6A60526BF7CD02D9661385AC3B1CBDB50F759D0E9F61C11A07BF4218F299BCB2900520076EBD2D95A3DEE96D4809EF34ABEB83FDBA8A12C5CA82757288A89C931CF564F00E8A317AE1E1D828E61369BA0DDBADB10C136F8691101AD82DC54775AB8353840D9992197D80A6E94B38AC417CDDF40B0C73ABF03E8E0AA
""" constants.py Nicholas Boucher 2020 Contains the constant values utilized in the ElectionGuard protocol. """ p: int = 1044388881413152506691752710716624382579964249047383780384233483283953907971553643537729993126875883902173634017777416360502926082946377942955704498542097614841825246773580689398386320439747911160897731551074903967243883427132918813748016269754522343505285898816777211761912392772914485521155521641049273446207578961939840619466145806859275053476560973295158703823395710210329314709715239251736552384080845836048778667318931418338422443891025911884723433084701207771901944593286624979917391350564662632723703007964229849154756196890615252286533089643184902706926081744149289517418249153634178342075381874131646013444796894582106870531535803666254579602632453103741452569793905551901541856173251385047414840392753585581909950158046256810542678368121278509960520957624737942914600310646609792665012858397381435755902851312071248102599442308951327039250818892493767423329663783709190716162023529669217300939783171415808233146823000766917789286154006042281423733706462905243774854543127239500245873582012663666430583862778167369547603016344242729592244544608279405999759391099775667746401633668308698186721172238255007962658564443858927634850415775348839052026675785694826386930175303143450046575460843879941791946313299322976993405829119 q: int = 115792089237316195423570985008687907853269984665640564039457584007913129639747 r: int = 9019518416950528558373478086511232658951474842525520401496114928154304263969655687927867442562559311457926593510757267649063628681241064260953609180947464800958467390949485096429653122916928704841547265126247408167856620024815508684472819746384115369148322548696439327979752948311712506113890045287907335656308945630141969472484100558565879585476547782717283106837945923693806973017510492730838409381014701258202694245760602718602550739205297257940969992371799325870179746191672464736721424617639973324090288952006260483222894269928179970153634220390287255837625331668555933039199194619824375869291271098935000699785346405055160394688637074599519052655517388596327473273906029869030988064607361165803129718773877185415445291671089029845994683414682274353665003204293107284473196033588697845087556526514092678744031772226855409523354476737660407619436531080189837076164818131039104397776628128325247709678431023369197272126578394856752060591013812807437681624251867074769638052097737959472027002770963255207757153746376691827309573603635608169799503216990026029763868313819311401747718758606328306442737694783044330450178447543246397189503997649637375210794 g: int = 14245109091294741386751154342323521003543059865261911603340669522218159898070093327838595045175067897363301047764229640327930333001123401070596314469603183633790452807428416775717923182949583875381833912370889874572112086966300498607364501764494811956017881198827400327403252039184448888877644781610594801053753235453382508543906993571248387749420874609737451803650021788641249940534081464232937193671929586747339353451021712752406225276255010281004857233043241332527821911604413582442915993833774890228705495787357234006932755876972632840760599399514028393542345035433135159511099877773857622699742816228063106927776147867040336649025152771036361273329385354927395836330206311072577683892664475070720408447257635606891920123791602538518516524873664205034698194561673019535564273204744076336022130453963648114321050173994259620611015189498335966173440411967562175734606706258335095991140827763942280037063180207172918769921712003400007923888084296685269233298371143630883011213745082207405479978418089917768242592557172834921185990876960527013386693909961093302289646193295725135238595082039133488721800071459503353417574248679728577942863659802016004283193163470835709405666994892499382890912238098413819320185166580019604608311466
''' Merge sort ''' def merge ( lst1 , lst2 ): ''' This function merges 2 lists. :param lst1: :param lst2: :return list 1 merged with list 2: >>> merge ([1, 2, 4, 6] ,[3, 5, 7, 8]) [1, 2, 3, 4, 5, 6, 7, 8] ''' res = [] n1 , n2 = len( lst1 ) , len( lst2 ) i , j = 0 , 0 while i < n1 and j < n2 : if lst1 [ i ] <= lst2 [ j ]: res += [ lst1 [ i ]] i += 1 else : res += [ lst2 [ j ]] j += 1 return res + lst1 [ i :] + lst2 [ j :] def merge_sort ( lst ): ''' :param lst: :return sorted list: Input : list of elements Output : Sorted list of elements >>> merge_sort ([3, 7, 9, 6, 2, 5, 4, 1, 8]) [1, 2, 3, 4, 5, 6, 7, 8, 9] >>> merge_sort ([11, 0, 1, 5, 7, 2]) [0, 1, 2, 5, 7, 11] >>> merge_sort ([10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10] ''' k , n = 1 , len ( lst ) while k < n : nxt = [] for a in range (0 , n , 2* k ): b , c = a + k , a + 2* k nxt += merge ( lst [ a : b ] , lst [ b : c ]) lst = nxt k = 2* k return lst
""" Merge sort """ def merge(lst1, lst2): """ This function merges 2 lists. :param lst1: :param lst2: :return list 1 merged with list 2: >>> merge ([1, 2, 4, 6] ,[3, 5, 7, 8]) [1, 2, 3, 4, 5, 6, 7, 8] """ res = [] (n1, n2) = (len(lst1), len(lst2)) (i, j) = (0, 0) while i < n1 and j < n2: if lst1[i] <= lst2[j]: res += [lst1[i]] i += 1 else: res += [lst2[j]] j += 1 return res + lst1[i:] + lst2[j:] def merge_sort(lst): """ :param lst: :return sorted list: Input : list of elements Output : Sorted list of elements >>> merge_sort ([3, 7, 9, 6, 2, 5, 4, 1, 8]) [1, 2, 3, 4, 5, 6, 7, 8, 9] >>> merge_sort ([11, 0, 1, 5, 7, 2]) [0, 1, 2, 5, 7, 11] >>> merge_sort ([10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10] """ (k, n) = (1, len(lst)) while k < n: nxt = [] for a in range(0, n, 2 * k): (b, c) = (a + k, a + 2 * k) nxt += merge(lst[a:b], lst[b:c]) lst = nxt k = 2 * k return lst
""" * * Author: Juarez Paulino(coderemite) * Email: juarez.paulino@gmail.com * """ for _ in [0]*int(input()): a,b,c,d,k=map(int,input().split()) x,y=(a+c-1)//c,(b+d-1)//d print(-1) if x+y>k else print(x,y)
""" * * Author: Juarez Paulino(coderemite) * Email: juarez.paulino@gmail.com * """ for _ in [0] * int(input()): (a, b, c, d, k) = map(int, input().split()) (x, y) = ((a + c - 1) // c, (b + d - 1) // d) print(-1) if x + y > k else print(x, y)
# Runtime: 32 ms # Beats 99.87% of Python submissions # Definition for singly-linked list. # class ListNode: # def __init__(self, x): # self.val = x # self.next = None class Solution: def middleNode(self, head): """ :type head: ListNode :rtype: ListNode """ # Could make one pass to find number of elements, # then traverse to the middle of that and return the # element there, but that's stupid. slow_ptr = head fast_ptr = head while fast_ptr and fast_ptr.next is not None: fast_ptr = fast_ptr.next.next slow_ptr = slow_ptr.next if not fast_ptr or not fast_ptr.next: return slow_ptr return slow_ptr.next
class Solution: def middle_node(self, head): """ :type head: ListNode :rtype: ListNode """ slow_ptr = head fast_ptr = head while fast_ptr and fast_ptr.next is not None: fast_ptr = fast_ptr.next.next slow_ptr = slow_ptr.next if not fast_ptr or not fast_ptr.next: return slow_ptr return slow_ptr.next
# @Rexhino_Kovaci # hash tables we use dictionaries as it is an array whose indexes are obtained using a hash function on the keys # we use 3 collision handling problems: linear, quadratic, double hashing # we are obliged to use ASCII values and divide it by the element of our array/dictionary # declare a dictionary dict = {'Name': 'Rexhino', 'Age': 19, 'Class': 'CE/IT'} # Accessing the dictionary with its key print( "dict['Name']: ", dict['Name']) print ("dict['Age']: ", dict['Age']) # modify the dictionary dict = {'Name': '', 'Age': 99, 'Class': 'None'} dict['Age'] = 98 # update existing entry dict['School'] = "Canadian Institute of Technology" # add new entry print ("dict['Age']: ", dict['Age']) print ("dict['School']: ", dict['School'])
dict = {'Name': 'Rexhino', 'Age': 19, 'Class': 'CE/IT'} print("dict['Name']: ", dict['Name']) print("dict['Age']: ", dict['Age']) dict = {'Name': '', 'Age': 99, 'Class': 'None'} dict['Age'] = 98 dict['School'] = 'Canadian Institute of Technology' print("dict['Age']: ", dict['Age']) print("dict['School']: ", dict['School'])
heroes = {hero: [] for hero in input().split(", ")} command = input() while command != "End": hero, item, price = command.split("-") if item not in heroes[hero]: heroes[hero] += [item, price] command = input() for hero, items in heroes.items(): price = [int(item) for item in items if item.isdecimal()] print(f"{hero} -> Items: {int(len(items) / 2)}, Cost: {sum(price)}")
heroes = {hero: [] for hero in input().split(', ')} command = input() while command != 'End': (hero, item, price) = command.split('-') if item not in heroes[hero]: heroes[hero] += [item, price] command = input() for (hero, items) in heroes.items(): price = [int(item) for item in items if item.isdecimal()] print(f'{hero} -> Items: {int(len(items) / 2)}, Cost: {sum(price)}')
""" Given an input string (s) and a pattern (p), implement regular expression matching with support for '.' and '*'. '.' Matches any single character. '*' Matches zero or more of the preceding element. Example 1: Input: s = "aa" p = "a" Output: false Explanation: "a" does not match the entire string "aa". Example 2: Input: s = "aa" p = "a*" Output: true Explanation: '*' means zero or more of the precedeng element, 'a'. Therefore, by repeating 'a' once, it becomes "aa". create by swm 2018/05/31 """ class Solution(object): def isMatch(self, text, pattern): memo = {} def dp(i, j): if (i, j) not in memo: if j == len(pattern): ans = i == len(text) else: first_match = i < len(text) and pattern[j] in {text[i], '.'} if j+1 < len(pattern) and pattern[j+1] == '*': ans = dp(i, j+2) or first_match and dp(i+1, j) else: ans = first_match and dp(i+1, j+1) memo[i, j] = ans return memo[i, j] return dp(0, 0) if __name__ == '__main__': sl = Solution() s = "mississippi" p = "mis*is*p*." print(sl.isMatch(s, p))
""" Given an input string (s) and a pattern (p), implement regular expression matching with support for '.' and '*'. '.' Matches any single character. '*' Matches zero or more of the preceding element. Example 1: Input: s = "aa" p = "a" Output: false Explanation: "a" does not match the entire string "aa". Example 2: Input: s = "aa" p = "a*" Output: true Explanation: '*' means zero or more of the precedeng element, 'a'. Therefore, by repeating 'a' once, it becomes "aa". create by swm 2018/05/31 """ class Solution(object): def is_match(self, text, pattern): memo = {} def dp(i, j): if (i, j) not in memo: if j == len(pattern): ans = i == len(text) else: first_match = i < len(text) and pattern[j] in {text[i], '.'} if j + 1 < len(pattern) and pattern[j + 1] == '*': ans = dp(i, j + 2) or (first_match and dp(i + 1, j)) else: ans = first_match and dp(i + 1, j + 1) memo[i, j] = ans return memo[i, j] return dp(0, 0) if __name__ == '__main__': sl = solution() s = 'mississippi' p = 'mis*is*p*.' print(sl.isMatch(s, p))
def fib(n): if n <= 1: return 1 return fib(n-1) + fib(n-2) def fib2(n,cache={}): if n <= 1: return 1 if n not in cache: cache[n] = fib2(n-1,cache) + fib2(n-2,cache) return cache[n] sum=0 for i in range(1,100): if fib2(i) >= 4000000: break if fib2(i) % 2 == 0: sum+= fib2(i) print(sum)
def fib(n): if n <= 1: return 1 return fib(n - 1) + fib(n - 2) def fib2(n, cache={}): if n <= 1: return 1 if n not in cache: cache[n] = fib2(n - 1, cache) + fib2(n - 2, cache) return cache[n] sum = 0 for i in range(1, 100): if fib2(i) >= 4000000: break if fib2(i) % 2 == 0: sum += fib2(i) print(sum)
pw = ph = 500 s = 5 amount = int(pw / s + 4) newPage(pw, ph) translate(pw/2, ph/2) rect(-s, 0, s, s) for i in range(amount): rect(0, 0, s*i, s) rotate(-90) translate(0, s * (i-1)) # saveImage('spiral.jpg')
pw = ph = 500 s = 5 amount = int(pw / s + 4) new_page(pw, ph) translate(pw / 2, ph / 2) rect(-s, 0, s, s) for i in range(amount): rect(0, 0, s * i, s) rotate(-90) translate(0, s * (i - 1))
class Square: def __init__(self, id, x, y, data): self.id = id self.x = x self.y = y self.data = data
class Square: def __init__(self, id, x, y, data): self.id = id self.x = x self.y = y self.data = data
class PaireGraphs: def __init__(self,graphe1, graphe2, matching): self.premierGraphe = graphe1 self.secondGraphe = graphe2 self.matching = matching
class Pairegraphs: def __init__(self, graphe1, graphe2, matching): self.premierGraphe = graphe1 self.secondGraphe = graphe2 self.matching = matching
# Variable declaration myName = 'Dany Sluijk'; myAddress = 'Nijenoord 9'; result = 'Ik ben ' + myName + ' en mijn adres is: ' + myAddress; print(result);
my_name = 'Dany Sluijk' my_address = 'Nijenoord 9' result = 'Ik ben ' + myName + ' en mijn adres is: ' + myAddress print(result)
""" Demonstrates list functions. """ values = [6, 3, 1, 2] #A list named values containing 6, 4, 3, 1, 5 and 2 values.insert(1, 4) #Inserts 4 at index 1 print("values:", values) #Prints the values list (in list form) values.insert(4, 5) #Inserts 5 at index 4 print("values:", values) #Prints the values list (in list form) print() values.reverse() #Reverses the values list print("values (reversed):", values) #Prints the values list (in list form) print() values.sort() #Sorts the values list print("values (sorted):", values) #Prints the values list (in list form) print() values.reverse() #Reverses the values list again print("values (reversed):", values) #Prints the values list (in list form)
""" Demonstrates list functions. """ values = [6, 3, 1, 2] values.insert(1, 4) print('values:', values) values.insert(4, 5) print('values:', values) print() values.reverse() print('values (reversed):', values) print() values.sort() print('values (sorted):', values) print() values.reverse() print('values (reversed):', values)
print("Hello World!!") print() print('-----------------------') print() print(note := 'lets create a string') print(some_string := 'this is a string') try: # index starts from 0 thus len -1 if some_string[len(some_string) - 1] == 'g': print(note := 'this shall be executed') except IndexError: print(note := 'if we have the Index error this shall be executed') else: print('No exception was raised this time, we will execute the else block') finally: print(note := 'finally is always be executed') print() print('-----------------------') print() try: # this will case the index error as we overshoot the length of string if some_string[len(some_string)] == 'g': print(note := 'this shall not be executed') except IndexError: print(note := 'We will get the Index error') else: print('exception was raised this time, we shall not execute the else block') finally: print(note := 'finally is always be executed') print() print('-----------------------') print()
print('Hello World!!') print() print('-----------------------') print() print((note := 'lets create a string')) print((some_string := 'this is a string')) try: if some_string[len(some_string) - 1] == 'g': print((note := 'this shall be executed')) except IndexError: print((note := 'if we have the Index error this shall be executed')) else: print('No exception was raised this time, we will execute the else block') finally: print((note := 'finally is always be executed')) print() print('-----------------------') print() try: if some_string[len(some_string)] == 'g': print((note := 'this shall not be executed')) except IndexError: print((note := 'We will get the Index error')) else: print('exception was raised this time, we shall not execute the else block') finally: print((note := 'finally is always be executed')) print() print('-----------------------') print()
class Solution: def eliminateMaximum(self, dist: List[int], speed: List[int]) -> int: for i, arrivalTime in enumerate(sorted([(d - 1) // s for d, s in zip(dist, speed)])): if i > arrivalTime: return i return len(dist)
class Solution: def eliminate_maximum(self, dist: List[int], speed: List[int]) -> int: for (i, arrival_time) in enumerate(sorted([(d - 1) // s for (d, s) in zip(dist, speed)])): if i > arrivalTime: return i return len(dist)
"""Lambdata - a collection of Data Science helper functions""" # # accessing libraries through pipenv # import pandas as pd # import numpy as np COLORS = ["cyan", "teal", "mauve", "blue", "crimson"] FAVORITE_NMBERS = [2.71, 101, 55, 12, 3.14] def increment(x): return x + 1 # # Implement your helper functions # def df_cleaner(df): # """Cleans a DF""" # # implement df_cleaner # pass # def df_destoryer(df): # """Destroys a DF""" # pass
"""Lambdata - a collection of Data Science helper functions""" colors = ['cyan', 'teal', 'mauve', 'blue', 'crimson'] favorite_nmbers = [2.71, 101, 55, 12, 3.14] def increment(x): return x + 1
# GENERATED VERSION FILE # TIME: Tue Dec 28 10:55:50 2021 __version__ = '1.0.0+8da4630' short_version = '1.0.0'
__version__ = '1.0.0+8da4630' short_version = '1.0.0'
def addNums(x, y): """ Add two numbers together """ return x + y def subtractNums(x, y): """ subtract two numbers and return result """ return y - x
def add_nums(x, y): """ Add two numbers together """ return x + y def subtract_nums(x, y): """ subtract two numbers and return result """ return y - x
def arr2bin(arr): total = 0 for a in arr: if not type(a) == int: return False total += a return '{:b}'.format(total)
def arr2bin(arr): total = 0 for a in arr: if not type(a) == int: return False total += a return '{:b}'.format(total)
num1 = 111 num2 = 222 num3 = 3333333 num4 = 4444
num1 = 111 num2 = 222 num3 = 3333333 num4 = 4444
N, M = map(int, input().split()) if M == 1 or M == 2: print('NEWBIE!') elif 2 < M <= N: print("OLDBIE!") else: print("TLE!")
(n, m) = map(int, input().split()) if M == 1 or M == 2: print('NEWBIE!') elif 2 < M <= N: print('OLDBIE!') else: print('TLE!')
# # Print elements of a tuple or a default message # Used a lot in CodinGame Clash of Code # # Tuple of elements a = tuple(range(5)) # Unpack the elements of a # >>> print(*a) # 0 1 2 3 4 # If a is empty, *a = False # Therefore, unpack the elements inside ["None"], therefore "None" print(*a or ["None"]) # # Transpose a list of list # Used a lot in CodinGame Clash of Code # # List of lists a = [[*range(5)] for _ in range(5)] # Unpack then zip # Unpack : Returns every list in a # >>> print(*a) # [0, 1, 2, 3, 4] [0, 1, 2, 3, 4] [0, 1, 2, 3, 4] [0, 1, 2, 3, 4] [0, 1, 2, 3, 4] # Zip : Get an item from each list of a and puts it inside a tuple of elements # >>> for elem in zip(d.keys(), d.values(), d.items(), d.keys()): # ... print(type(elem), elem) # <class 'tuple'> ('0', 0, ('0', 0), '0') # <class 'tuple'> ('1', 1, ('1', 1), '1') # <class 'tuple'> ('2', 4, ('2', 4), '2') # <class 'tuple'> ('3', 9, ('3', 9), '3') a_t = list(zip(*a)) print(a_t) # # Convert a boolean to another variable # Used a lot in CodinGame Clash of Code # # Boolean b = True # Convert # False : 0, True : 1 res = ("bar", "foo") print(res[b])
a = tuple(range(5)) print(*(a or ['None'])) a = [[*range(5)] for _ in range(5)] a_t = list(zip(*a)) print(a_t) b = True res = ('bar', 'foo') print(res[b])
class HumanHandover: """ TODO: Not yet implemented """ def __init__(self, operators=None): self.operators = operators if operators else [] def register_operator(self): raise NotImplementedError def remove_operator(self, telegram_chat_id): raise NotImplementedError
class Humanhandover: """ TODO: Not yet implemented """ def __init__(self, operators=None): self.operators = operators if operators else [] def register_operator(self): raise NotImplementedError def remove_operator(self, telegram_chat_id): raise NotImplementedError
list = [[],[]] for v in range(1,8): numero = int(input(f"Digite o {v}o. valor: " )) if numero%2 == 0: list[0].append(numero) else: list[1].append(numero) print(f'Os valores pares digitados foram: {sorted(list[0])}') print(f'Os valores impares digitados foram: {sorted(list[1])}')
list = [[], []] for v in range(1, 8): numero = int(input(f'Digite o {v}o. valor: ')) if numero % 2 == 0: list[0].append(numero) else: list[1].append(numero) print(f'Os valores pares digitados foram: {sorted(list[0])}') print(f'Os valores impares digitados foram: {sorted(list[1])}')
def test_docker_running(host): docker = host.service("docker") assert docker.is_enabled assert docker.is_running def test_swarm_is_active(docker_info): assert "Swarm: active" in docker_info
def test_docker_running(host): docker = host.service('docker') assert docker.is_enabled assert docker.is_running def test_swarm_is_active(docker_info): assert 'Swarm: active' in docker_info
# weird string case from codewars 6 kyu # def to_weird_case(string): #TODO new_str = '' word_index = 0 current_index = 0 while current_index != len(string): print(f'word_index: {word_index}, stringval: {string[current_index]} ') if string[current_index] == ' ': new_str += ' ' current_index += 1 word_index = 0 continue elif word_index == 0: new_str += string[current_index].upper() elif word_index == 1: new_str += string[current_index].lower() elif word_index % 2 == 0: new_str += string[current_index].upper() elif word_index %2 == 1: new_str += string[current_index].lower() current_index += 1 word_index += 1 return new_str #edge cases empty string: return '', #0== e 1==odd # #'01234 012345 0123 word index #'Weird String case' #'0123456789... #if word index is 0, 1, even upper, odd: lower ################################################### #set word index, current_index (while is not leng(str)), #while current_index != len(string): #if string[current_index] == ' ': #new_str += ' ' #current_index += 1 #word_index == 0 #continue #elif word_index == 0: #new_str += string[current_index].upper(), #elif word_index == 1: #new_str += string[current_index].lower() #elif word_index % 2 == 0: #new_str += string[current_index].upper() #elif word_index %2 == 1: #new_str += string[current_index].lower() #current_index += 1 #word_index += 1 #123456789abcde c_i #this is a test #0123 01 0 0123 w_i #
def to_weird_case(string): new_str = '' word_index = 0 current_index = 0 while current_index != len(string): print(f'word_index: {word_index}, stringval: {string[current_index]} ') if string[current_index] == ' ': new_str += ' ' current_index += 1 word_index = 0 continue elif word_index == 0: new_str += string[current_index].upper() elif word_index == 1: new_str += string[current_index].lower() elif word_index % 2 == 0: new_str += string[current_index].upper() elif word_index % 2 == 1: new_str += string[current_index].lower() current_index += 1 word_index += 1 return new_str
class QuizBrain: def __init__(self,question_list): self.question_list = question_list self.question_number = 0 self.user_score = 0 def next_question(self): current_question = self.question_list[self.question_number] self.question_number += 1 user_answer = input(f"Q.{self.question_number}: {current_question.text}: (True/False) >>>") self.check_answer(user_answer,current_question.answer) def still_has_question(self): return self.question_number < len(self.question_list) def check_answer(self, user_answer, correct_answer): if user_answer.lower() == correct_answer.lower(): print("You got it. Your answer is correct.") self.user_score += 1 else: print("Your answer is wrong!") print(f"Correct answer is {correct_answer}") print(f"Your current score is {self.user_score}/{self.question_number}") print("\n") if self.question_number == len(self.question_list): print("You have completed the Quiz") print(f"Your final score is {self.user_score}/{self.question_number}")
class Quizbrain: def __init__(self, question_list): self.question_list = question_list self.question_number = 0 self.user_score = 0 def next_question(self): current_question = self.question_list[self.question_number] self.question_number += 1 user_answer = input(f'Q.{self.question_number}: {current_question.text}: (True/False) >>>') self.check_answer(user_answer, current_question.answer) def still_has_question(self): return self.question_number < len(self.question_list) def check_answer(self, user_answer, correct_answer): if user_answer.lower() == correct_answer.lower(): print('You got it. Your answer is correct.') self.user_score += 1 else: print('Your answer is wrong!') print(f'Correct answer is {correct_answer}') print(f'Your current score is {self.user_score}/{self.question_number}') print('\n') if self.question_number == len(self.question_list): print('You have completed the Quiz') print(f'Your final score is {self.user_score}/{self.question_number}')
swagger_file_content = ''' swagger: '2.0' info: description: Estuary agent will run your shell commands via REST API version: 4.4.0 title: estuary-agent contact: name: Catalin Dinuta url: 'https://github.com/dinuta' email: constantin.dinuta@gmail.com license: name: Apache 2.0 url: 'http://www.apache.org/licenses/LICENSE-2.0.html' host: 'localhost:8080' basePath: / tags: - name: estuary-agent description: root paths: /about: get: tags: - estuary-agent summary: Information about the application operationId: aboutGet produces: - application/json parameters: - name: Token in: header description: Token required: false type: string responses: '200': description: Prints the name and version of the application. schema: $ref: '#/definitions/ApiResponse' '401': description: Unauthorized '403': description: Forbidden '404': description: Not Found /command: post: tags: - estuary-agent summary: Starts multiple commands in blocking mode sequentially. Set the client timeout at needed value. operationId: commandPost_1 consumes: - application/json - application/x-www-form-urlencoded - text/plain produces: - application/json parameters: - in: body name: commands description: Commands to run. E.g. ls -lrt required: true schema: type: string - name: Token in: header description: Token required: false type: string responses: '200': description: Commands start success schema: $ref: '#/definitions/ApiResponse' '201': description: Created '401': description: Unauthorized '403': description: Forbidden '404': description: Not Found '500': description: Commands start failure schema: $ref: '#/definitions/ApiResponse' /commanddetached: get: tags: - estuary-agent summary: Gets information about the last command started in detached mode operationId: commandDetachedGet produces: - application/json parameters: - name: Token in: header description: Token required: false type: string responses: '200': description: Get command detached info success schema: $ref: '#/definitions/ApiResponse' '401': description: Unauthorized '403': description: Forbidden '404': description: Not Found '500': description: Get command detached info failure schema: $ref: '#/definitions/ApiResponse' delete: tags: - estuary-agent summary: Stops all commands that were previously started in detached mode operationId: commandDetachedDelete produces: - application/json parameters: - name: Token in: header description: Token required: false type: string responses: '200': description: command detached stop success schema: $ref: '#/definitions/ApiResponse' '204': description: No Content '401': description: Unauthorized '403': description: Forbidden '500': description: command detached stop failure schema: $ref: '#/definitions/ApiResponse' '/commanddetached/{id}': get: tags: - estuary-agent summary: Gets information about the command identified by id started in detached mode operationId: commandDetachedIdGet produces: - application/json parameters: - name: id in: path description: Command detached id set by the user required: true type: string - name: Token in: header description: Token required: false type: string responses: '200': description: Get command detached info success schema: $ref: '#/definitions/ApiResponse' '401': description: Unauthorized '403': description: Forbidden '404': description: Not Found '500': description: Get command detached info failure schema: $ref: '#/definitions/ApiResponse' post: tags: - estuary-agent summary: Starts the shell commands in detached mode and sequentially operationId: commandDetachedIdPost consumes: - application/json - application/x-www-form-urlencoded - text/plain produces: - application/json parameters: - in: body name: commandContent description: List of commands to run one after the other. E.g. make/mvn/sh/npm required: true schema: type: string - name: id in: path description: Command detached id set by the user required: true type: string - name: Token in: header description: Token required: false type: string responses: '200': description: Commands start success schema: $ref: '#/definitions/ApiResponse' '201': description: Created '401': description: Unauthorized '403': description: Forbidden '404': description: Not Found '500': description: Commands start failure schema: $ref: '#/definitions/ApiResponse' delete: tags: - estuary-agent summary: Deletes the associated processes of the shell commands in detached mode operationId: commandDetachedIdDelete produces: - application/json parameters: - name: id in: path description: Command detached id set by the user required: true type: string - name: Token in: header description: Token required: false type: string responses: '200': description: Command delete success schema: $ref: '#/definitions/ApiResponse' '204': description: No Content '401': description: Unauthorized '403': description: Forbidden '500': description: Command delete failure schema: $ref: '#/definitions/ApiResponse' '/commanddetachedyaml/{id}': post: tags: - estuary-agent summary: Starts the commands in detached mode and sequentially. The commands are described by yaml. operationId: commandDetachedIdPostYaml consumes: - application/json - application/x-www-form-urlencoded - text/plain produces: - application/json parameters: - in: body name: commandContent description: List of commands to run one after the other in yaml format. required: true schema: type: string - name: id in: path description: Command detached id set by the user required: true type: string - name: Token in: header description: Token required: false type: string responses: '200': description: Commands start success schema: $ref: '#/definitions/ApiResponse' '201': description: Created '401': description: Unauthorized '403': description: Forbidden '404': description: Not Found '500': description: Commands start failure schema: $ref: '#/definitions/ApiResponse' /commandparallel: post: tags: - estuary-agent summary: Starts multiple commands in blocking mode parallel. Set the client timeout at needed value. operationId: commandPost_2 consumes: - application/json - application/x-www-form-urlencoded - text/plain produces: - application/json parameters: - in: body name: commands description: Commands to run. E.g. ls -lrt required: true schema: type: string - name: Token in: header description: Token required: false type: string responses: '200': description: commands start success schema: $ref: '#/definitions/ApiResponse' '201': description: Created '401': description: Unauthorized '403': description: Forbidden '404': description: Not Found '500': description: commands start failure schema: $ref: '#/definitions/ApiResponse' /commandyaml: post: tags: - estuary-agent summary: Starts multiple commands in blocking mode sequentially. The commands are described in yaml format. Set the client timeout at needed value. operationId: commandPost consumes: - application/json - application/x-www-form-urlencoded - text/plain produces: - application/json parameters: - in: body name: commands description: Commands to run in yaml format required: true schema: type: string - name: Token in: header description: Token required: false type: string responses: '200': description: Commands start success schema: $ref: '#/definitions/ApiResponse' '201': description: Created '401': description: Unauthorized '403': description: Forbidden '404': description: Not Found '500': description: Commands start failure schema: $ref: '#/definitions/ApiResponse' /env: get: tags: - estuary-agent summary: Print all environment variables operationId: envGet produces: - application/json parameters: - name: Token in: header description: Token required: false type: string responses: '200': description: List of the entire environment variables schema: $ref: '#/definitions/ApiResponse' '401': description: Unauthorized '403': description: Forbidden '404': description: Not Found post: tags: - estuary-agent summary: Set environment variables operationId: envPost consumes: - application/json produces: - application/json parameters: - in: body name: envVars description: List of env vars by key-value pair in JSON format required: true schema: type: string - name: Token in: header description: Authentication Token required: false type: string responses: '200': description: Set environment variables success schema: $ref: '#/definitions/ApiResponse' '201': description: Created '401': description: Unauthorized '403': description: Forbidden '404': description: Not Found '500': description: Set environment variables failure schema: $ref: '#/definitions/ApiResponse' delete: tags: - estuary-agent summary: Deletes the custom defined env vars contained in the virtual environment operationId: envDelete produces: - application/json parameters: - name: Token in: header description: Token required: false type: string responses: '200': description: 'Deletes the entire virtual env vars, but keeping system env vars.' schema: $ref: '#/definitions/ApiResponse' '204': description: No Content '401': description: Unauthorized '403': description: Forbidden '/env/{env_name}': get: tags: - estuary-agent summary: Gets the environment variable value from the environment operationId: envEnvNameGet produces: - application/json parameters: - name: env_name in: path description: The name of the env var to get value from required: true type: string - name: Token in: header description: Token required: false type: string responses: '200': description: Get env var success schema: $ref: '#/definitions/ApiResponse' '401': description: Unauthorized '403': description: Forbidden '404': description: Not Found '500': description: Get env var failure schema: $ref: '#/definitions/ApiResponse' /file: get: tags: - estuary-agent summary: Gets the content of the file operationId: fileGet consumes: - application/octet-stream - text/plain produces: - application/json - application/zip parameters: - name: File-Path in: header description: Target file path to get required: false type: string - name: Token in: header description: Token required: false type: string responses: '200': description: 'The content of the file in plain text, success' schema: $ref: '#/definitions/ApiResponse' '401': description: Unauthorized '403': description: Forbidden '404': description: Not Found '500': description: 'Failure, the file content could not be read' schema: $ref: '#/definitions/ApiResponse' post: tags: - estuary-agent summary: Uploads a file no mater the format. Binary or raw operationId: filePost consumes: - application/octet-stream - application/json - application/x-www-form-urlencoded - text/plain produces: - application/json - text/plain parameters: - in: body name: content description: The content of the file required: false schema: type: string format: byte - name: File-Path in: header description: File-Path required: true type: string - name: Token in: header description: Token required: false type: string responses: '200': description: The content of the file was uploaded successfully schema: $ref: '#/definitions/ApiResponse' '201': description: Created '401': description: Unauthorized '403': description: Forbidden '404': description: Not Found '500': description: 'Failure, the file content could not be uploaded' schema: $ref: '#/definitions/ApiResponse' put: tags: - estuary-agent summary: Uploads a file no mater the format. Binary or raw operationId: filePut consumes: - application/octet-stream - application/json - application/x-www-form-urlencoded - text/plain produces: - application/json - text/plain parameters: - in: body name: content description: The content of the file required: false schema: type: string format: byte - name: File-Path in: header description: File-Path required: true type: string - name: Token in: header description: Token required: false type: string responses: '200': description: The content of the file was uploaded successfully schema: $ref: '#/definitions/ApiResponse' '201': description: Created '401': description: Unauthorized '403': description: Forbidden '404': description: Not Found '500': description: 'Failure, the file content could not be uploaded' schema: $ref: '#/definitions/ApiResponse' /folder: get: tags: - estuary-agent summary: Gets the folder as zip archive. Useful to get test results folder operationId: folderGet produces: - application/json - application/zip parameters: - name: Folder-Path in: header description: Target folder path to get as zip required: false type: string - name: Token in: header description: Token required: false type: string responses: '200': description: The content of the folder as zip archive schema: $ref: '#/definitions/ApiResponse' '401': description: Unauthorized '403': description: Forbidden '404': description: Not Found '500': description: The content of the folder could not be obtained schema: $ref: '#/definitions/ApiResponse' /ping: get: tags: - estuary-agent summary: Ping endpoint which replies with pong operationId: pingGet produces: - application/json parameters: - name: Token in: header description: Token required: false type: string responses: '200': description: Ping endpoint which replies with pong. Useful when checking the alive status of the service schema: $ref: '#/definitions/ApiResponse' '401': description: Unauthorized '403': description: Forbidden '404': description: Not Found definitions: ApiResponse: type: object properties: code: type: integer format: int32 description: type: object message: type: string name: type: string path: type: string timestamp: type: string version: type: string title: ApiResponse ApiResponseCommandDescription: type: object properties: code: type: integer format: int32 description: $ref: '#/definitions/CommandDescription' message: type: string name: type: string path: type: string timestamp: type: string version: type: string title: ApiResponseCommandDescription CommandDescription: type: object properties: commands: type: object additionalProperties: $ref: '#/definitions/CommandStatus' duration: type: number format: float finished: type: boolean finishedat: type: string id: type: string pid: type: integer format: int64 processes: type: array items: $ref: '#/definitions/ProcessInfo' started: type: boolean startedat: type: string title: CommandDescription CommandDetails: type: object properties: args: type: array items: type: string code: type: integer format: int64 err: type: string out: type: string pid: type: integer format: int64 title: CommandDetails CommandStatus: type: object properties: details: $ref: '#/definitions/CommandDetails' duration: type: number format: float finishedat: type: string example: 'yyyy-MM-dd HH:mm:ss.SSSSSS' startedat: type: string example: 'yyyy-MM-dd HH:mm:ss.SSSSSS' status: type: string title: CommandStatus ProcessHandle: type: object properties: alive: type: boolean title: ProcessHandle ProcessInfo: type: object properties: arguments: type: array items: type: string children: type: array items: $ref: '#/definitions/ProcessHandle' name: type: string parent: type: integer format: int64 pid: type: integer format: int64 status: type: string username: type: string title: ProcessInfo '''
swagger_file_content = "\nswagger: '2.0'\ninfo:\n description: Estuary agent will run your shell commands via REST API\n version: 4.4.0\n title: estuary-agent\n contact:\n name: Catalin Dinuta\n url: 'https://github.com/dinuta'\n email: constantin.dinuta@gmail.com\n license:\n name: Apache 2.0\n url: 'http://www.apache.org/licenses/LICENSE-2.0.html'\nhost: 'localhost:8080'\nbasePath: /\ntags:\n - name: estuary-agent\n description: root\npaths:\n /about:\n get:\n tags:\n - estuary-agent\n summary: Information about the application\n operationId: aboutGet\n produces:\n - application/json\n parameters:\n - name: Token\n in: header\n description: Token\n required: false\n type: string\n responses:\n '200':\n description: Prints the name and version of the application.\n schema:\n $ref: '#/definitions/ApiResponse'\n '401':\n description: Unauthorized\n '403':\n description: Forbidden\n '404':\n description: Not Found\n /command:\n post:\n tags:\n - estuary-agent\n summary: Starts multiple commands in blocking mode sequentially. Set the client timeout at needed value.\n operationId: commandPost_1\n consumes:\n - application/json\n - application/x-www-form-urlencoded\n - text/plain\n produces:\n - application/json\n parameters:\n - in: body\n name: commands\n description: Commands to run. E.g. ls -lrt\n required: true\n schema:\n type: string\n - name: Token\n in: header\n description: Token\n required: false\n type: string\n responses:\n '200':\n description: Commands start success\n schema:\n $ref: '#/definitions/ApiResponse'\n '201':\n description: Created\n '401':\n description: Unauthorized\n '403':\n description: Forbidden\n '404':\n description: Not Found\n '500':\n description: Commands start failure\n schema:\n $ref: '#/definitions/ApiResponse'\n /commanddetached:\n get:\n tags:\n - estuary-agent\n summary: Gets information about the last command started in detached mode\n operationId: commandDetachedGet\n produces:\n - application/json\n parameters:\n - name: Token\n in: header\n description: Token\n required: false\n type: string\n responses:\n '200':\n description: Get command detached info success\n schema:\n $ref: '#/definitions/ApiResponse'\n '401':\n description: Unauthorized\n '403':\n description: Forbidden\n '404':\n description: Not Found\n '500':\n description: Get command detached info failure\n schema:\n $ref: '#/definitions/ApiResponse'\n delete:\n tags:\n - estuary-agent\n summary: Stops all commands that were previously started in detached mode\n operationId: commandDetachedDelete\n produces:\n - application/json\n parameters:\n - name: Token\n in: header\n description: Token\n required: false\n type: string\n responses:\n '200':\n description: command detached stop success\n schema:\n $ref: '#/definitions/ApiResponse'\n '204':\n description: No Content\n '401':\n description: Unauthorized\n '403':\n description: Forbidden\n '500':\n description: command detached stop failure\n schema:\n $ref: '#/definitions/ApiResponse'\n '/commanddetached/{id}':\n get:\n tags:\n - estuary-agent\n summary: Gets information about the command identified by id started in detached mode\n operationId: commandDetachedIdGet\n produces:\n - application/json\n parameters:\n - name: id\n in: path\n description: Command detached id set by the user\n required: true\n type: string\n - name: Token\n in: header\n description: Token\n required: false\n type: string\n responses:\n '200':\n description: Get command detached info success\n schema:\n $ref: '#/definitions/ApiResponse'\n '401':\n description: Unauthorized\n '403':\n description: Forbidden\n '404':\n description: Not Found\n '500':\n description: Get command detached info failure\n schema:\n $ref: '#/definitions/ApiResponse'\n post:\n tags:\n - estuary-agent\n summary: Starts the shell commands in detached mode and sequentially\n operationId: commandDetachedIdPost\n consumes:\n - application/json\n - application/x-www-form-urlencoded\n - text/plain\n produces:\n - application/json\n parameters:\n - in: body\n name: commandContent\n description: List of commands to run one after the other. E.g. make/mvn/sh/npm\n required: true\n schema:\n type: string\n - name: id\n in: path\n description: Command detached id set by the user\n required: true\n type: string\n - name: Token\n in: header\n description: Token\n required: false\n type: string\n responses:\n '200':\n description: Commands start success\n schema:\n $ref: '#/definitions/ApiResponse'\n '201':\n description: Created\n '401':\n description: Unauthorized\n '403':\n description: Forbidden\n '404':\n description: Not Found\n '500':\n description: Commands start failure\n schema:\n $ref: '#/definitions/ApiResponse'\n delete:\n tags:\n - estuary-agent\n summary: Deletes the associated processes of the shell commands in detached mode\n operationId: commandDetachedIdDelete\n produces:\n - application/json\n parameters:\n - name: id\n in: path\n description: Command detached id set by the user\n required: true\n type: string\n - name: Token\n in: header\n description: Token\n required: false\n type: string\n responses:\n '200':\n description: Command delete success\n schema:\n $ref: '#/definitions/ApiResponse'\n '204':\n description: No Content\n '401':\n description: Unauthorized\n '403':\n description: Forbidden\n '500':\n description: Command delete failure\n schema:\n $ref: '#/definitions/ApiResponse'\n '/commanddetachedyaml/{id}':\n post:\n tags:\n - estuary-agent\n summary: Starts the commands in detached mode and sequentially. The commands are described by yaml.\n operationId: commandDetachedIdPostYaml\n consumes:\n - application/json\n - application/x-www-form-urlencoded\n - text/plain\n produces:\n - application/json\n parameters:\n - in: body\n name: commandContent\n description: List of commands to run one after the other in yaml format.\n required: true\n schema:\n type: string\n - name: id\n in: path\n description: Command detached id set by the user\n required: true\n type: string\n - name: Token\n in: header\n description: Token\n required: false\n type: string\n responses:\n '200':\n description: Commands start success\n schema:\n $ref: '#/definitions/ApiResponse'\n '201':\n description: Created\n '401':\n description: Unauthorized\n '403':\n description: Forbidden\n '404':\n description: Not Found\n '500':\n description: Commands start failure\n schema:\n $ref: '#/definitions/ApiResponse'\n /commandparallel:\n post:\n tags:\n - estuary-agent\n summary: Starts multiple commands in blocking mode parallel. Set the client timeout at needed value.\n operationId: commandPost_2\n consumes:\n - application/json\n - application/x-www-form-urlencoded\n - text/plain\n produces:\n - application/json\n parameters:\n - in: body\n name: commands\n description: Commands to run. E.g. ls -lrt\n required: true\n schema:\n type: string\n - name: Token\n in: header\n description: Token\n required: false\n type: string\n responses:\n '200':\n description: commands start success\n schema:\n $ref: '#/definitions/ApiResponse'\n '201':\n description: Created\n '401':\n description: Unauthorized\n '403':\n description: Forbidden\n '404':\n description: Not Found\n '500':\n description: commands start failure\n schema:\n $ref: '#/definitions/ApiResponse'\n /commandyaml:\n post:\n tags:\n - estuary-agent\n summary: Starts multiple commands in blocking mode sequentially. The commands are described in yaml format. Set the client timeout at needed value.\n operationId: commandPost\n consumes:\n - application/json\n - application/x-www-form-urlencoded\n - text/plain\n produces:\n - application/json\n parameters:\n - in: body\n name: commands\n description: Commands to run in yaml format\n required: true\n schema:\n type: string\n - name: Token\n in: header\n description: Token\n required: false\n type: string\n responses:\n '200':\n description: Commands start success\n schema:\n $ref: '#/definitions/ApiResponse'\n '201':\n description: Created\n '401':\n description: Unauthorized\n '403':\n description: Forbidden\n '404':\n description: Not Found\n '500':\n description: Commands start failure\n schema:\n $ref: '#/definitions/ApiResponse'\n /env:\n get:\n tags:\n - estuary-agent\n summary: Print all environment variables\n operationId: envGet\n produces:\n - application/json\n parameters:\n - name: Token\n in: header\n description: Token\n required: false\n type: string\n responses:\n '200':\n description: List of the entire environment variables\n schema:\n $ref: '#/definitions/ApiResponse'\n '401':\n description: Unauthorized\n '403':\n description: Forbidden\n '404':\n description: Not Found\n post:\n tags:\n - estuary-agent\n summary: Set environment variables\n operationId: envPost\n consumes:\n - application/json\n produces:\n - application/json\n parameters:\n - in: body\n name: envVars\n description: List of env vars by key-value pair in JSON format\n required: true\n schema:\n type: string\n - name: Token\n in: header\n description: Authentication Token\n required: false\n type: string\n responses:\n '200':\n description: Set environment variables success\n schema:\n $ref: '#/definitions/ApiResponse'\n '201':\n description: Created\n '401':\n description: Unauthorized\n '403':\n description: Forbidden\n '404':\n description: Not Found\n '500':\n description: Set environment variables failure\n schema:\n $ref: '#/definitions/ApiResponse'\n delete:\n tags:\n - estuary-agent\n summary: Deletes the custom defined env vars contained in the virtual environment\n operationId: envDelete\n produces:\n - application/json\n parameters:\n - name: Token\n in: header\n description: Token\n required: false\n type: string\n responses:\n '200':\n description: 'Deletes the entire virtual env vars, but keeping system env vars.'\n schema:\n $ref: '#/definitions/ApiResponse'\n '204':\n description: No Content\n '401':\n description: Unauthorized\n '403':\n description: Forbidden\n '/env/{env_name}':\n get:\n tags:\n - estuary-agent\n summary: Gets the environment variable value from the environment\n operationId: envEnvNameGet\n produces:\n - application/json\n parameters:\n - name: env_name\n in: path\n description: The name of the env var to get value from\n required: true\n type: string\n - name: Token\n in: header\n description: Token\n required: false\n type: string\n responses:\n '200':\n description: Get env var success\n schema:\n $ref: '#/definitions/ApiResponse'\n '401':\n description: Unauthorized\n '403':\n description: Forbidden\n '404':\n description: Not Found\n '500':\n description: Get env var failure\n schema:\n $ref: '#/definitions/ApiResponse'\n /file:\n get:\n tags:\n - estuary-agent\n summary: Gets the content of the file\n operationId: fileGet\n consumes:\n - application/octet-stream\n - text/plain\n produces:\n - application/json\n - application/zip\n parameters:\n - name: File-Path\n in: header\n description: Target file path to get\n required: false\n type: string\n - name: Token\n in: header\n description: Token\n required: false\n type: string\n responses:\n '200':\n description: 'The content of the file in plain text, success'\n schema:\n $ref: '#/definitions/ApiResponse'\n '401':\n description: Unauthorized\n '403':\n description: Forbidden\n '404':\n description: Not Found\n '500':\n description: 'Failure, the file content could not be read'\n schema:\n $ref: '#/definitions/ApiResponse'\n post:\n tags:\n - estuary-agent\n summary: Uploads a file no mater the format. Binary or raw\n operationId: filePost\n consumes:\n - application/octet-stream\n - application/json\n - application/x-www-form-urlencoded\n - text/plain\n produces:\n - application/json\n - text/plain\n parameters:\n - in: body\n name: content\n description: The content of the file\n required: false\n schema:\n type: string\n format: byte\n - name: File-Path\n in: header\n description: File-Path\n required: true\n type: string\n - name: Token\n in: header\n description: Token\n required: false\n type: string\n responses:\n '200':\n description: The content of the file was uploaded successfully\n schema:\n $ref: '#/definitions/ApiResponse'\n '201':\n description: Created\n '401':\n description: Unauthorized\n '403':\n description: Forbidden\n '404':\n description: Not Found\n '500':\n description: 'Failure, the file content could not be uploaded'\n schema:\n $ref: '#/definitions/ApiResponse'\n put:\n tags:\n - estuary-agent\n summary: Uploads a file no mater the format. Binary or raw\n operationId: filePut\n consumes:\n - application/octet-stream\n - application/json\n - application/x-www-form-urlencoded\n - text/plain\n produces:\n - application/json\n - text/plain\n parameters:\n - in: body\n name: content\n description: The content of the file\n required: false\n schema:\n type: string\n format: byte\n - name: File-Path\n in: header\n description: File-Path\n required: true\n type: string\n - name: Token\n in: header\n description: Token\n required: false\n type: string\n responses:\n '200':\n description: The content of the file was uploaded successfully\n schema:\n $ref: '#/definitions/ApiResponse'\n '201':\n description: Created\n '401':\n description: Unauthorized\n '403':\n description: Forbidden\n '404':\n description: Not Found\n '500':\n description: 'Failure, the file content could not be uploaded'\n schema:\n $ref: '#/definitions/ApiResponse'\n /folder:\n get:\n tags:\n - estuary-agent\n summary: Gets the folder as zip archive. Useful to get test results folder\n operationId: folderGet\n produces:\n - application/json\n - application/zip\n parameters:\n - name: Folder-Path\n in: header\n description: Target folder path to get as zip\n required: false\n type: string\n - name: Token\n in: header\n description: Token\n required: false\n type: string\n responses:\n '200':\n description: The content of the folder as zip archive\n schema:\n $ref: '#/definitions/ApiResponse'\n '401':\n description: Unauthorized\n '403':\n description: Forbidden\n '404':\n description: Not Found\n '500':\n description: The content of the folder could not be obtained\n schema:\n $ref: '#/definitions/ApiResponse'\n /ping:\n get:\n tags:\n - estuary-agent\n summary: Ping endpoint which replies with pong\n operationId: pingGet\n produces:\n - application/json\n parameters:\n - name: Token\n in: header\n description: Token\n required: false\n type: string\n responses:\n '200':\n description: Ping endpoint which replies with pong. Useful when checking the alive status of the service\n schema:\n $ref: '#/definitions/ApiResponse'\n '401':\n description: Unauthorized\n '403':\n description: Forbidden\n '404':\n description: Not Found\ndefinitions:\n ApiResponse:\n type: object\n properties:\n code:\n type: integer\n format: int32\n description:\n type: object\n message:\n type: string\n name:\n type: string\n path:\n type: string\n timestamp:\n type: string\n version:\n type: string\n title: ApiResponse\n ApiResponseCommandDescription:\n type: object\n properties:\n code:\n type: integer\n format: int32\n description:\n $ref: '#/definitions/CommandDescription'\n message:\n type: string\n name:\n type: string\n path:\n type: string\n timestamp:\n type: string\n version:\n type: string\n title: ApiResponseCommandDescription\n CommandDescription:\n type: object\n properties:\n commands:\n type: object\n additionalProperties:\n $ref: '#/definitions/CommandStatus'\n duration:\n type: number\n format: float\n finished:\n type: boolean\n finishedat:\n type: string\n id:\n type: string\n pid:\n type: integer\n format: int64\n processes:\n type: array\n items:\n $ref: '#/definitions/ProcessInfo'\n started:\n type: boolean\n startedat:\n type: string\n title: CommandDescription\n CommandDetails:\n type: object\n properties:\n args:\n type: array\n items:\n type: string\n code:\n type: integer\n format: int64\n err:\n type: string\n out:\n type: string\n pid:\n type: integer\n format: int64\n title: CommandDetails\n CommandStatus:\n type: object\n properties:\n details:\n $ref: '#/definitions/CommandDetails'\n duration:\n type: number\n format: float\n finishedat:\n type: string\n example: 'yyyy-MM-dd HH:mm:ss.SSSSSS'\n startedat:\n type: string\n example: 'yyyy-MM-dd HH:mm:ss.SSSSSS'\n status:\n type: string\n title: CommandStatus\n ProcessHandle:\n type: object\n properties:\n alive:\n type: boolean\n title: ProcessHandle\n ProcessInfo:\n type: object\n properties:\n arguments:\n type: array\n items:\n type: string\n children:\n type: array\n items:\n $ref: '#/definitions/ProcessHandle'\n name:\n type: string\n parent:\n type: integer\n format: int64\n pid:\n type: integer\n format: int64\n status:\n type: string\n username:\n type: string\n title: ProcessInfo\n"
ip=input().split() key=input() n='' for i in ip: for j in i: if j in key: n+=j l=list(key) s=[[]] for i in range(len(l)+1): for j in range(i+1,len(l)+1): s.append(l[i:j]) print(s)
ip = input().split() key = input() n = '' for i in ip: for j in i: if j in key: n += j l = list(key) s = [[]] for i in range(len(l) + 1): for j in range(i + 1, len(l) + 1): s.append(l[i:j]) print(s)
# Copyright 2021 IBM All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Code system and values for ACD Scoring method See example at: https://alvearie.io/alvearie-fhir-ig/Condition-ConditionInstanceWithNLPInsights.json.html """ SCORING_METHOD_ACD_CODE_SYSTEM = ( "http://ibm.com/fhir/cdm/CodeSystem/1.0/acd-confidence-method" ) # Values for acd scoring method DIAGNOSIS_EXPLICIT_SCORE = "Diagnosis_Explicit_Score" DIAGNOSIS_PATIENT_REPORTED_SCORE = "Diagnosis_Patient_Reported_Score" MEDICATION_TAKEN_SCORE = "Medication_Taken_Score" ADVERSE_EVENT_SCORE = "Adverse_Event_Score"
"""Code system and values for ACD Scoring method See example at: https://alvearie.io/alvearie-fhir-ig/Condition-ConditionInstanceWithNLPInsights.json.html """ scoring_method_acd_code_system = 'http://ibm.com/fhir/cdm/CodeSystem/1.0/acd-confidence-method' diagnosis_explicit_score = 'Diagnosis_Explicit_Score' diagnosis_patient_reported_score = 'Diagnosis_Patient_Reported_Score' medication_taken_score = 'Medication_Taken_Score' adverse_event_score = 'Adverse_Event_Score'
''' The MIT License (MIT) Copyright (c) 2016 WavyCloud Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ''' def add_tags(ResourceId=None, TagsList=None): """ Adds one or more tags to a trail, up to a limit of 50. Tags must be unique per trail. Overwrites an existing tag's value when a new value is specified for an existing tag key. If you specify a key without a value, the tag will be created with the specified key and a value of null. You can tag a trail that applies to all regions only from the region in which the trail was created (that is, from its home region). See also: AWS API Documentation :example: response = client.add_tags( ResourceId='string', TagsList=[ { 'Key': 'string', 'Value': 'string' }, ] ) :type ResourceId: string :param ResourceId: [REQUIRED] Specifies the ARN of the trail to which one or more tags will be added. The format of a trail ARN is: arn:aws:cloudtrail:us-east-1:123456789012:trail/MyTrail :type TagsList: list :param TagsList: Contains a list of CloudTrail tags, up to a limit of 50 (dict) --A custom key-value pair associated with a resource such as a CloudTrail trail. Key (string) -- [REQUIRED]The key in a key-value pair. The key must be must be no longer than 128 Unicode characters. The key must be unique for the resource to which it applies. Value (string) --The value in a key-value pair of a tag. The value must be no longer than 256 Unicode characters. :rtype: dict :return: {} """ pass def can_paginate(operation_name=None): """ Check if an operation can be paginated. :type operation_name: string :param operation_name: The operation name. This is the same name as the method name on the client. For example, if the method name is create_foo, and you'd normally invoke the operation as client.create_foo(**kwargs), if the create_foo operation can be paginated, you can use the call client.get_paginator('create_foo'). """ pass def create_trail(Name=None, S3BucketName=None, S3KeyPrefix=None, SnsTopicName=None, IncludeGlobalServiceEvents=None, IsMultiRegionTrail=None, EnableLogFileValidation=None, CloudWatchLogsLogGroupArn=None, CloudWatchLogsRoleArn=None, KmsKeyId=None): """ Creates a trail that specifies the settings for delivery of log data to an Amazon S3 bucket. A maximum of five trails can exist in a region, irrespective of the region in which they were created. See also: AWS API Documentation :example: response = client.create_trail( Name='string', S3BucketName='string', S3KeyPrefix='string', SnsTopicName='string', IncludeGlobalServiceEvents=True|False, IsMultiRegionTrail=True|False, EnableLogFileValidation=True|False, CloudWatchLogsLogGroupArn='string', CloudWatchLogsRoleArn='string', KmsKeyId='string' ) :type Name: string :param Name: [REQUIRED] Specifies the name of the trail. The name must meet the following requirements: Contain only ASCII letters (a-z, A-Z), numbers (0-9), periods (.), underscores (_), or dashes (-) Start with a letter or number, and end with a letter or number Be between 3 and 128 characters Have no adjacent periods, underscores or dashes. Names like my-_namespace and my--namespace are invalid. Not be in IP address format (for example, 192.168.5.4) :type S3BucketName: string :param S3BucketName: [REQUIRED] Specifies the name of the Amazon S3 bucket designated for publishing log files. See Amazon S3 Bucket Naming Requirements . :type S3KeyPrefix: string :param S3KeyPrefix: Specifies the Amazon S3 key prefix that comes after the name of the bucket you have designated for log file delivery. For more information, see Finding Your CloudTrail Log Files . The maximum length is 200 characters. :type SnsTopicName: string :param SnsTopicName: Specifies the name of the Amazon SNS topic defined for notification of log file delivery. The maximum length is 256 characters. :type IncludeGlobalServiceEvents: boolean :param IncludeGlobalServiceEvents: Specifies whether the trail is publishing events from global services such as IAM to the log files. :type IsMultiRegionTrail: boolean :param IsMultiRegionTrail: Specifies whether the trail is created in the current region or in all regions. The default is false. :type EnableLogFileValidation: boolean :param EnableLogFileValidation: Specifies whether log file integrity validation is enabled. The default is false. Note When you disable log file integrity validation, the chain of digest files is broken after one hour. CloudTrail will not create digest files for log files that were delivered during a period in which log file integrity validation was disabled. For example, if you enable log file integrity validation at noon on January 1, disable it at noon on January 2, and re-enable it at noon on January 10, digest files will not be created for the log files delivered from noon on January 2 to noon on January 10. The same applies whenever you stop CloudTrail logging or delete a trail. :type CloudWatchLogsLogGroupArn: string :param CloudWatchLogsLogGroupArn: Specifies a log group name using an Amazon Resource Name (ARN), a unique identifier that represents the log group to which CloudTrail logs will be delivered. Not required unless you specify CloudWatchLogsRoleArn. :type CloudWatchLogsRoleArn: string :param CloudWatchLogsRoleArn: Specifies the role for the CloudWatch Logs endpoint to assume to write to a user's log group. :type KmsKeyId: string :param KmsKeyId: Specifies the KMS key ID to use to encrypt the logs delivered by CloudTrail. The value can be an alias name prefixed by 'alias/', a fully specified ARN to an alias, a fully specified ARN to a key, or a globally unique identifier. Examples: alias/MyAliasName arn:aws:kms:us-east-1:123456789012:alias/MyAliasName arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012 12345678-1234-1234-1234-123456789012 :rtype: dict :return: { 'Name': 'string', 'S3BucketName': 'string', 'S3KeyPrefix': 'string', 'SnsTopicName': 'string', 'SnsTopicARN': 'string', 'IncludeGlobalServiceEvents': True|False, 'IsMultiRegionTrail': True|False, 'TrailARN': 'string', 'LogFileValidationEnabled': True|False, 'CloudWatchLogsLogGroupArn': 'string', 'CloudWatchLogsRoleArn': 'string', 'KmsKeyId': 'string' } """ pass def delete_trail(Name=None): """ Deletes a trail. This operation must be called from the region in which the trail was created. DeleteTrail cannot be called on the shadow trails (replicated trails in other regions) of a trail that is enabled in all regions. See also: AWS API Documentation :example: response = client.delete_trail( Name='string' ) :type Name: string :param Name: [REQUIRED] Specifies the name or the CloudTrail ARN of the trail to be deleted. The format of a trail ARN is: arn:aws:cloudtrail:us-east-1:123456789012:trail/MyTrail :rtype: dict :return: {} """ pass def describe_trails(trailNameList=None, includeShadowTrails=None): """ Retrieves settings for the trail associated with the current region for your account. See also: AWS API Documentation :example: response = client.describe_trails( trailNameList=[ 'string', ], includeShadowTrails=True|False ) :type trailNameList: list :param trailNameList: Specifies a list of trail names, trail ARNs, or both, of the trails to describe. The format of a trail ARN is: arn:aws:cloudtrail:us-east-1:123456789012:trail/MyTrail If an empty list is specified, information for the trail in the current region is returned. If an empty list is specified and IncludeShadowTrails is false, then information for all trails in the current region is returned. If an empty list is specified and IncludeShadowTrails is null or true, then information for all trails in the current region and any associated shadow trails in other regions is returned. Note If one or more trail names are specified, information is returned only if the names match the names of trails belonging only to the current region. To return information about a trail in another region, you must specify its trail ARN. (string) -- :type includeShadowTrails: boolean :param includeShadowTrails: Specifies whether to include shadow trails in the response. A shadow trail is the replication in a region of a trail that was created in a different region. The default is true. :rtype: dict :return: { 'trailList': [ { 'Name': 'string', 'S3BucketName': 'string', 'S3KeyPrefix': 'string', 'SnsTopicName': 'string', 'SnsTopicARN': 'string', 'IncludeGlobalServiceEvents': True|False, 'IsMultiRegionTrail': True|False, 'HomeRegion': 'string', 'TrailARN': 'string', 'LogFileValidationEnabled': True|False, 'CloudWatchLogsLogGroupArn': 'string', 'CloudWatchLogsRoleArn': 'string', 'KmsKeyId': 'string', 'HasCustomEventSelectors': True|False }, ] } """ pass def generate_presigned_url(ClientMethod=None, Params=None, ExpiresIn=None, HttpMethod=None): """ Generate a presigned url given a client, its method, and arguments :type ClientMethod: string :param ClientMethod: The client method to presign for :type Params: dict :param Params: The parameters normally passed to ClientMethod. :type ExpiresIn: int :param ExpiresIn: The number of seconds the presigned url is valid for. By default it expires in an hour (3600 seconds) :type HttpMethod: string :param HttpMethod: The http method to use on the generated url. By default, the http method is whatever is used in the method's model. """ pass def get_event_selectors(TrailName=None): """ Describes the settings for the event selectors that you configured for your trail. The information returned for your event selectors includes the following: For more information, see Logging Data and Management Events for Trails in the AWS CloudTrail User Guide . See also: AWS API Documentation :example: response = client.get_event_selectors( TrailName='string' ) :type TrailName: string :param TrailName: [REQUIRED] Specifies the name of the trail or trail ARN. If you specify a trail name, the string must meet the following requirements: Contain only ASCII letters (a-z, A-Z), numbers (0-9), periods (.), underscores (_), or dashes (-) Start with a letter or number, and end with a letter or number Be between 3 and 128 characters Have no adjacent periods, underscores or dashes. Names like my-_namespace and my--namespace are invalid. Not be in IP address format (for example, 192.168.5.4) If you specify a trail ARN, it must be in the format: arn:aws:cloudtrail:us-east-1:123456789012:trail/MyTrail :rtype: dict :return: { 'TrailARN': 'string', 'EventSelectors': [ { 'ReadWriteType': 'ReadOnly'|'WriteOnly'|'All', 'IncludeManagementEvents': True|False, 'DataResources': [ { 'Type': 'string', 'Values': [ 'string', ] }, ] }, ] } :returns: Contain only ASCII letters (a-z, A-Z), numbers (0-9), periods (.), underscores (_), or dashes (-) Start with a letter or number, and end with a letter or number Be between 3 and 128 characters Have no adjacent periods, underscores or dashes. Names like my-_namespace and my--namespace are invalid. Not be in IP address format (for example, 192.168.5.4) """ pass def get_paginator(operation_name=None): """ Create a paginator for an operation. :type operation_name: string :param operation_name: The operation name. This is the same name as the method name on the client. For example, if the method name is create_foo, and you'd normally invoke the operation as client.create_foo(**kwargs), if the create_foo operation can be paginated, you can use the call client.get_paginator('create_foo'). :rtype: L{botocore.paginate.Paginator} """ pass def get_trail_status(Name=None): """ Returns a JSON-formatted list of information about the specified trail. Fields include information on delivery errors, Amazon SNS and Amazon S3 errors, and start and stop logging times for each trail. This operation returns trail status from a single region. To return trail status from all regions, you must call the operation on each region. See also: AWS API Documentation :example: response = client.get_trail_status( Name='string' ) :type Name: string :param Name: [REQUIRED] Specifies the name or the CloudTrail ARN of the trail for which you are requesting status. To get the status of a shadow trail (a replication of the trail in another region), you must specify its ARN. The format of a trail ARN is: arn:aws:cloudtrail:us-east-1:123456789012:trail/MyTrail :rtype: dict :return: { 'IsLogging': True|False, 'LatestDeliveryError': 'string', 'LatestNotificationError': 'string', 'LatestDeliveryTime': datetime(2015, 1, 1), 'LatestNotificationTime': datetime(2015, 1, 1), 'StartLoggingTime': datetime(2015, 1, 1), 'StopLoggingTime': datetime(2015, 1, 1), 'LatestCloudWatchLogsDeliveryError': 'string', 'LatestCloudWatchLogsDeliveryTime': datetime(2015, 1, 1), 'LatestDigestDeliveryTime': datetime(2015, 1, 1), 'LatestDigestDeliveryError': 'string', 'LatestDeliveryAttemptTime': 'string', 'LatestNotificationAttemptTime': 'string', 'LatestNotificationAttemptSucceeded': 'string', 'LatestDeliveryAttemptSucceeded': 'string', 'TimeLoggingStarted': 'string', 'TimeLoggingStopped': 'string' } """ pass def get_waiter(): """ """ pass def list_public_keys(StartTime=None, EndTime=None, NextToken=None): """ Returns all public keys whose private keys were used to sign the digest files within the specified time range. The public key is needed to validate digest files that were signed with its corresponding private key. See also: AWS API Documentation :example: response = client.list_public_keys( StartTime=datetime(2015, 1, 1), EndTime=datetime(2015, 1, 1), NextToken='string' ) :type StartTime: datetime :param StartTime: Optionally specifies, in UTC, the start of the time range to look up public keys for CloudTrail digest files. If not specified, the current time is used, and the current public key is returned. :type EndTime: datetime :param EndTime: Optionally specifies, in UTC, the end of the time range to look up public keys for CloudTrail digest files. If not specified, the current time is used. :type NextToken: string :param NextToken: Reserved for future use. :rtype: dict :return: { 'PublicKeyList': [ { 'Value': b'bytes', 'ValidityStartTime': datetime(2015, 1, 1), 'ValidityEndTime': datetime(2015, 1, 1), 'Fingerprint': 'string' }, ], 'NextToken': 'string' } """ pass def list_tags(ResourceIdList=None, NextToken=None): """ Lists the tags for the trail in the current region. See also: AWS API Documentation :example: response = client.list_tags( ResourceIdList=[ 'string', ], NextToken='string' ) :type ResourceIdList: list :param ResourceIdList: [REQUIRED] Specifies a list of trail ARNs whose tags will be listed. The list has a limit of 20 ARNs. The format of a trail ARN is: arn:aws:cloudtrail:us-east-1:123456789012:trail/MyTrail (string) -- :type NextToken: string :param NextToken: Reserved for future use. :rtype: dict :return: { 'ResourceTagList': [ { 'ResourceId': 'string', 'TagsList': [ { 'Key': 'string', 'Value': 'string' }, ] }, ], 'NextToken': 'string' } """ pass def lookup_events(LookupAttributes=None, StartTime=None, EndTime=None, MaxResults=None, NextToken=None): """ Looks up API activity events captured by CloudTrail that create, update, or delete resources in your account. Events for a region can be looked up for the times in which you had CloudTrail turned on in that region during the last seven days. Lookup supports the following attributes: All attributes are optional. The default number of results returned is 10, with a maximum of 50 possible. The response includes a token that you can use to get the next page of results. See also: AWS API Documentation :example: response = client.lookup_events( LookupAttributes=[ { 'AttributeKey': 'EventId'|'EventName'|'Username'|'ResourceType'|'ResourceName'|'EventSource', 'AttributeValue': 'string' }, ], StartTime=datetime(2015, 1, 1), EndTime=datetime(2015, 1, 1), MaxResults=123, NextToken='string' ) :type LookupAttributes: list :param LookupAttributes: Contains a list of lookup attributes. Currently the list can contain only one item. (dict) --Specifies an attribute and value that filter the events returned. AttributeKey (string) -- [REQUIRED]Specifies an attribute on which to filter the events returned. AttributeValue (string) -- [REQUIRED]Specifies a value for the specified AttributeKey. :type StartTime: datetime :param StartTime: Specifies that only events that occur after or at the specified time are returned. If the specified start time is after the specified end time, an error is returned. :type EndTime: datetime :param EndTime: Specifies that only events that occur before or at the specified time are returned. If the specified end time is before the specified start time, an error is returned. :type MaxResults: integer :param MaxResults: The number of events to return. Possible values are 1 through 50. The default is 10. :type NextToken: string :param NextToken: The token to use to get the next page of results after a previous API call. This token must be passed in with the same parameters that were specified in the the original call. For example, if the original call specified an AttributeKey of 'Username' with a value of 'root', the call with NextToken should include those same parameters. :rtype: dict :return: { 'Events': [ { 'EventId': 'string', 'EventName': 'string', 'EventTime': datetime(2015, 1, 1), 'EventSource': 'string', 'Username': 'string', 'Resources': [ { 'ResourceType': 'string', 'ResourceName': 'string' }, ], 'CloudTrailEvent': 'string' }, ], 'NextToken': 'string' } :returns: LookupAttributes (list) -- Contains a list of lookup attributes. Currently the list can contain only one item. (dict) --Specifies an attribute and value that filter the events returned. AttributeKey (string) -- [REQUIRED]Specifies an attribute on which to filter the events returned. AttributeValue (string) -- [REQUIRED]Specifies a value for the specified AttributeKey. StartTime (datetime) -- Specifies that only events that occur after or at the specified time are returned. If the specified start time is after the specified end time, an error is returned. EndTime (datetime) -- Specifies that only events that occur before or at the specified time are returned. If the specified end time is before the specified start time, an error is returned. MaxResults (integer) -- The number of events to return. Possible values are 1 through 50. The default is 10. NextToken (string) -- The token to use to get the next page of results after a previous API call. This token must be passed in with the same parameters that were specified in the the original call. For example, if the original call specified an AttributeKey of 'Username' with a value of 'root', the call with NextToken should include those same parameters. """ pass def put_event_selectors(TrailName=None, EventSelectors=None): """ Configures an event selector for your trail. Use event selectors to specify whether you want your trail to log management and/or data events. When an event occurs in your account, CloudTrail evaluates the event selectors in all trails. For each trail, if the event matches any event selector, the trail processes and logs the event. If the event doesn't match any event selector, the trail doesn't log the event. Example The PutEventSelectors operation must be called from the region in which the trail was created; otherwise, an InvalidHomeRegionException is thrown. You can configure up to five event selectors for each trail. For more information, see Logging Data and Management Events for Trails in the AWS CloudTrail User Guide . See also: AWS API Documentation :example: response = client.put_event_selectors( TrailName='string', EventSelectors=[ { 'ReadWriteType': 'ReadOnly'|'WriteOnly'|'All', 'IncludeManagementEvents': True|False, 'DataResources': [ { 'Type': 'string', 'Values': [ 'string', ] }, ] }, ] ) :type TrailName: string :param TrailName: [REQUIRED] Specifies the name of the trail or trail ARN. If you specify a trail name, the string must meet the following requirements: Contain only ASCII letters (a-z, A-Z), numbers (0-9), periods (.), underscores (_), or dashes (-) Start with a letter or number, and end with a letter or number Be between 3 and 128 characters Have no adjacent periods, underscores or dashes. Names like my-_namespace and my--namespace are invalid. Not be in IP address format (for example, 192.168.5.4) If you specify a trail ARN, it must be in the format: arn:aws:cloudtrail:us-east-1:123456789012:trail/MyTrail :type EventSelectors: list :param EventSelectors: [REQUIRED] Specifies the settings for your event selectors. You can configure up to five event selectors for a trail. (dict) --Use event selectors to specify whether you want your trail to log management and/or data events. When an event occurs in your account, CloudTrail evaluates the event selector for all trails. For each trail, if the event matches any event selector, the trail processes and logs the event. If the event doesn't match any event selector, the trail doesn't log the event. You can configure up to five event selectors for a trail. ReadWriteType (string) --Specify if you want your trail to log read-only events, write-only events, or all. For example, the EC2 GetConsoleOutput is a read-only API operation and RunInstances is a write-only API operation. By default, the value is All . IncludeManagementEvents (boolean) --Specify if you want your event selector to include management events for your trail. For more information, see Management Events in the AWS CloudTrail User Guide . By default, the value is true . DataResources (list) --CloudTrail supports logging only data events for S3 objects. You can specify up to 250 S3 buckets and object prefixes for a trail. For more information, see Data Events in the AWS CloudTrail User Guide . (dict) --The Amazon S3 objects that you specify in your event selectors for your trail to log data events. Data events are object-level API operations that access S3 objects, such as GetObject , DeleteObject , and PutObject . You can specify up to 250 S3 buckets and object prefixes for a trail. Example You create an event selector for a trail and specify an S3 bucket and an empty prefix, such as arn:aws:s3:::bucket-1/ . You upload an image file to bucket-1 . The PutObject API operation occurs on an object in the S3 bucket that you specified in the event selector. The trail processes and logs the event. You upload another image file to a different S3 bucket named arn:aws:s3:::bucket-2 . The event occurs on an object in an S3 bucket that you didn't specify in the event selector. The trail doesn t log the event. Type (string) --The resource type in which you want to log data events. You can specify only the following value: AWS::S3::Object . Values (list) --A list of ARN-like strings for the specified S3 objects. To log data events for all objects in an S3 bucket, specify the bucket and an empty object prefix such as arn:aws:s3:::bucket-1/ . The trail logs data events for all objects in this S3 bucket. To log data events for specific objects, specify the S3 bucket and object prefix such as arn:aws:s3:::bucket-1/example-images . The trail logs data events for objects in this S3 bucket that match the prefix. (string) -- :rtype: dict :return: { 'TrailARN': 'string', 'EventSelectors': [ { 'ReadWriteType': 'ReadOnly'|'WriteOnly'|'All', 'IncludeManagementEvents': True|False, 'DataResources': [ { 'Type': 'string', 'Values': [ 'string', ] }, ] }, ] } :returns: TrailName (string) -- [REQUIRED] Specifies the name of the trail or trail ARN. If you specify a trail name, the string must meet the following requirements: Contain only ASCII letters (a-z, A-Z), numbers (0-9), periods (.), underscores (_), or dashes (-) Start with a letter or number, and end with a letter or number Be between 3 and 128 characters Have no adjacent periods, underscores or dashes. Names like my-_namespace and my--namespace are invalid. Not be in IP address format (for example, 192.168.5.4) If you specify a trail ARN, it must be in the format: arn:aws:cloudtrail:us-east-1:123456789012:trail/MyTrail EventSelectors (list) -- [REQUIRED] Specifies the settings for your event selectors. You can configure up to five event selectors for a trail. (dict) --Use event selectors to specify whether you want your trail to log management and/or data events. When an event occurs in your account, CloudTrail evaluates the event selector for all trails. For each trail, if the event matches any event selector, the trail processes and logs the event. If the event doesn't match any event selector, the trail doesn't log the event. You can configure up to five event selectors for a trail. ReadWriteType (string) --Specify if you want your trail to log read-only events, write-only events, or all. For example, the EC2 GetConsoleOutput is a read-only API operation and RunInstances is a write-only API operation. By default, the value is All . IncludeManagementEvents (boolean) --Specify if you want your event selector to include management events for your trail. For more information, see Management Events in the AWS CloudTrail User Guide . By default, the value is true . DataResources (list) --CloudTrail supports logging only data events for S3 objects. You can specify up to 250 S3 buckets and object prefixes for a trail. For more information, see Data Events in the AWS CloudTrail User Guide . (dict) --The Amazon S3 objects that you specify in your event selectors for your trail to log data events. Data events are object-level API operations that access S3 objects, such as GetObject , DeleteObject , and PutObject . You can specify up to 250 S3 buckets and object prefixes for a trail. Example You create an event selector for a trail and specify an S3 bucket and an empty prefix, such as arn:aws:s3:::bucket-1/ . You upload an image file to bucket-1 . The PutObject API operation occurs on an object in the S3 bucket that you specified in the event selector. The trail processes and logs the event. You upload another image file to a different S3 bucket named arn:aws:s3:::bucket-2 . The event occurs on an object in an S3 bucket that you didn't specify in the event selector. The trail doesnt log the event. Type (string) --The resource type in which you want to log data events. You can specify only the following value: AWS::S3::Object . Values (list) --A list of ARN-like strings for the specified S3 objects. To log data events for all objects in an S3 bucket, specify the bucket and an empty object prefix such as arn:aws:s3:::bucket-1/ . The trail logs data events for all objects in this S3 bucket. To log data events for specific objects, specify the S3 bucket and object prefix such as arn:aws:s3:::bucket-1/example-images . The trail logs data events for objects in this S3 bucket that match the prefix. (string) -- """ pass def remove_tags(ResourceId=None, TagsList=None): """ Removes the specified tags from a trail. See also: AWS API Documentation :example: response = client.remove_tags( ResourceId='string', TagsList=[ { 'Key': 'string', 'Value': 'string' }, ] ) :type ResourceId: string :param ResourceId: [REQUIRED] Specifies the ARN of the trail from which tags should be removed. The format of a trail ARN is: arn:aws:cloudtrail:us-east-1:123456789012:trail/MyTrail :type TagsList: list :param TagsList: Specifies a list of tags to be removed. (dict) --A custom key-value pair associated with a resource such as a CloudTrail trail. Key (string) -- [REQUIRED]The key in a key-value pair. The key must be must be no longer than 128 Unicode characters. The key must be unique for the resource to which it applies. Value (string) --The value in a key-value pair of a tag. The value must be no longer than 256 Unicode characters. :rtype: dict :return: {} """ pass def start_logging(Name=None): """ Starts the recording of AWS API calls and log file delivery for a trail. For a trail that is enabled in all regions, this operation must be called from the region in which the trail was created. This operation cannot be called on the shadow trails (replicated trails in other regions) of a trail that is enabled in all regions. See also: AWS API Documentation :example: response = client.start_logging( Name='string' ) :type Name: string :param Name: [REQUIRED] Specifies the name or the CloudTrail ARN of the trail for which CloudTrail logs AWS API calls. The format of a trail ARN is: arn:aws:cloudtrail:us-east-1:123456789012:trail/MyTrail :rtype: dict :return: {} """ pass def stop_logging(Name=None): """ Suspends the recording of AWS API calls and log file delivery for the specified trail. Under most circumstances, there is no need to use this action. You can update a trail without stopping it first. This action is the only way to stop recording. For a trail enabled in all regions, this operation must be called from the region in which the trail was created, or an InvalidHomeRegionException will occur. This operation cannot be called on the shadow trails (replicated trails in other regions) of a trail enabled in all regions. See also: AWS API Documentation :example: response = client.stop_logging( Name='string' ) :type Name: string :param Name: [REQUIRED] Specifies the name or the CloudTrail ARN of the trail for which CloudTrail will stop logging AWS API calls. The format of a trail ARN is: arn:aws:cloudtrail:us-east-1:123456789012:trail/MyTrail :rtype: dict :return: {} """ pass def update_trail(Name=None, S3BucketName=None, S3KeyPrefix=None, SnsTopicName=None, IncludeGlobalServiceEvents=None, IsMultiRegionTrail=None, EnableLogFileValidation=None, CloudWatchLogsLogGroupArn=None, CloudWatchLogsRoleArn=None, KmsKeyId=None): """ Updates the settings that specify delivery of log files. Changes to a trail do not require stopping the CloudTrail service. Use this action to designate an existing bucket for log delivery. If the existing bucket has previously been a target for CloudTrail log files, an IAM policy exists for the bucket. UpdateTrail must be called from the region in which the trail was created; otherwise, an InvalidHomeRegionException is thrown. See also: AWS API Documentation :example: response = client.update_trail( Name='string', S3BucketName='string', S3KeyPrefix='string', SnsTopicName='string', IncludeGlobalServiceEvents=True|False, IsMultiRegionTrail=True|False, EnableLogFileValidation=True|False, CloudWatchLogsLogGroupArn='string', CloudWatchLogsRoleArn='string', KmsKeyId='string' ) :type Name: string :param Name: [REQUIRED] Specifies the name of the trail or trail ARN. If Name is a trail name, the string must meet the following requirements: Contain only ASCII letters (a-z, A-Z), numbers (0-9), periods (.), underscores (_), or dashes (-) Start with a letter or number, and end with a letter or number Be between 3 and 128 characters Have no adjacent periods, underscores or dashes. Names like my-_namespace and my--namespace are invalid. Not be in IP address format (for example, 192.168.5.4) If Name is a trail ARN, it must be in the format: arn:aws:cloudtrail:us-east-1:123456789012:trail/MyTrail :type S3BucketName: string :param S3BucketName: Specifies the name of the Amazon S3 bucket designated for publishing log files. See Amazon S3 Bucket Naming Requirements . :type S3KeyPrefix: string :param S3KeyPrefix: Specifies the Amazon S3 key prefix that comes after the name of the bucket you have designated for log file delivery. For more information, see Finding Your CloudTrail Log Files . The maximum length is 200 characters. :type SnsTopicName: string :param SnsTopicName: Specifies the name of the Amazon SNS topic defined for notification of log file delivery. The maximum length is 256 characters. :type IncludeGlobalServiceEvents: boolean :param IncludeGlobalServiceEvents: Specifies whether the trail is publishing events from global services such as IAM to the log files. :type IsMultiRegionTrail: boolean :param IsMultiRegionTrail: Specifies whether the trail applies only to the current region or to all regions. The default is false. If the trail exists only in the current region and this value is set to true, shadow trails (replications of the trail) will be created in the other regions. If the trail exists in all regions and this value is set to false, the trail will remain in the region where it was created, and its shadow trails in other regions will be deleted. :type EnableLogFileValidation: boolean :param EnableLogFileValidation: Specifies whether log file validation is enabled. The default is false. Note When you disable log file integrity validation, the chain of digest files is broken after one hour. CloudTrail will not create digest files for log files that were delivered during a period in which log file integrity validation was disabled. For example, if you enable log file integrity validation at noon on January 1, disable it at noon on January 2, and re-enable it at noon on January 10, digest files will not be created for the log files delivered from noon on January 2 to noon on January 10. The same applies whenever you stop CloudTrail logging or delete a trail. :type CloudWatchLogsLogGroupArn: string :param CloudWatchLogsLogGroupArn: Specifies a log group name using an Amazon Resource Name (ARN), a unique identifier that represents the log group to which CloudTrail logs will be delivered. Not required unless you specify CloudWatchLogsRoleArn. :type CloudWatchLogsRoleArn: string :param CloudWatchLogsRoleArn: Specifies the role for the CloudWatch Logs endpoint to assume to write to a user's log group. :type KmsKeyId: string :param KmsKeyId: Specifies the KMS key ID to use to encrypt the logs delivered by CloudTrail. The value can be an alias name prefixed by 'alias/', a fully specified ARN to an alias, a fully specified ARN to a key, or a globally unique identifier. Examples: alias/MyAliasName arn:aws:kms:us-east-1:123456789012:alias/MyAliasName arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012 12345678-1234-1234-1234-123456789012 :rtype: dict :return: { 'Name': 'string', 'S3BucketName': 'string', 'S3KeyPrefix': 'string', 'SnsTopicName': 'string', 'SnsTopicARN': 'string', 'IncludeGlobalServiceEvents': True|False, 'IsMultiRegionTrail': True|False, 'TrailARN': 'string', 'LogFileValidationEnabled': True|False, 'CloudWatchLogsLogGroupArn': 'string', 'CloudWatchLogsRoleArn': 'string', 'KmsKeyId': 'string' } """ pass
""" The MIT License (MIT) Copyright (c) 2016 WavyCloud Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ def add_tags(ResourceId=None, TagsList=None): """ Adds one or more tags to a trail, up to a limit of 50. Tags must be unique per trail. Overwrites an existing tag's value when a new value is specified for an existing tag key. If you specify a key without a value, the tag will be created with the specified key and a value of null. You can tag a trail that applies to all regions only from the region in which the trail was created (that is, from its home region). See also: AWS API Documentation :example: response = client.add_tags( ResourceId='string', TagsList=[ { 'Key': 'string', 'Value': 'string' }, ] ) :type ResourceId: string :param ResourceId: [REQUIRED] Specifies the ARN of the trail to which one or more tags will be added. The format of a trail ARN is: arn:aws:cloudtrail:us-east-1:123456789012:trail/MyTrail :type TagsList: list :param TagsList: Contains a list of CloudTrail tags, up to a limit of 50 (dict) --A custom key-value pair associated with a resource such as a CloudTrail trail. Key (string) -- [REQUIRED]The key in a key-value pair. The key must be must be no longer than 128 Unicode characters. The key must be unique for the resource to which it applies. Value (string) --The value in a key-value pair of a tag. The value must be no longer than 256 Unicode characters. :rtype: dict :return: {} """ pass def can_paginate(operation_name=None): """ Check if an operation can be paginated. :type operation_name: string :param operation_name: The operation name. This is the same name as the method name on the client. For example, if the method name is create_foo, and you'd normally invoke the operation as client.create_foo(**kwargs), if the create_foo operation can be paginated, you can use the call client.get_paginator('create_foo'). """ pass def create_trail(Name=None, S3BucketName=None, S3KeyPrefix=None, SnsTopicName=None, IncludeGlobalServiceEvents=None, IsMultiRegionTrail=None, EnableLogFileValidation=None, CloudWatchLogsLogGroupArn=None, CloudWatchLogsRoleArn=None, KmsKeyId=None): """ Creates a trail that specifies the settings for delivery of log data to an Amazon S3 bucket. A maximum of five trails can exist in a region, irrespective of the region in which they were created. See also: AWS API Documentation :example: response = client.create_trail( Name='string', S3BucketName='string', S3KeyPrefix='string', SnsTopicName='string', IncludeGlobalServiceEvents=True|False, IsMultiRegionTrail=True|False, EnableLogFileValidation=True|False, CloudWatchLogsLogGroupArn='string', CloudWatchLogsRoleArn='string', KmsKeyId='string' ) :type Name: string :param Name: [REQUIRED] Specifies the name of the trail. The name must meet the following requirements: Contain only ASCII letters (a-z, A-Z), numbers (0-9), periods (.), underscores (_), or dashes (-) Start with a letter or number, and end with a letter or number Be between 3 and 128 characters Have no adjacent periods, underscores or dashes. Names like my-_namespace and my--namespace are invalid. Not be in IP address format (for example, 192.168.5.4) :type S3BucketName: string :param S3BucketName: [REQUIRED] Specifies the name of the Amazon S3 bucket designated for publishing log files. See Amazon S3 Bucket Naming Requirements . :type S3KeyPrefix: string :param S3KeyPrefix: Specifies the Amazon S3 key prefix that comes after the name of the bucket you have designated for log file delivery. For more information, see Finding Your CloudTrail Log Files . The maximum length is 200 characters. :type SnsTopicName: string :param SnsTopicName: Specifies the name of the Amazon SNS topic defined for notification of log file delivery. The maximum length is 256 characters. :type IncludeGlobalServiceEvents: boolean :param IncludeGlobalServiceEvents: Specifies whether the trail is publishing events from global services such as IAM to the log files. :type IsMultiRegionTrail: boolean :param IsMultiRegionTrail: Specifies whether the trail is created in the current region or in all regions. The default is false. :type EnableLogFileValidation: boolean :param EnableLogFileValidation: Specifies whether log file integrity validation is enabled. The default is false. Note When you disable log file integrity validation, the chain of digest files is broken after one hour. CloudTrail will not create digest files for log files that were delivered during a period in which log file integrity validation was disabled. For example, if you enable log file integrity validation at noon on January 1, disable it at noon on January 2, and re-enable it at noon on January 10, digest files will not be created for the log files delivered from noon on January 2 to noon on January 10. The same applies whenever you stop CloudTrail logging or delete a trail. :type CloudWatchLogsLogGroupArn: string :param CloudWatchLogsLogGroupArn: Specifies a log group name using an Amazon Resource Name (ARN), a unique identifier that represents the log group to which CloudTrail logs will be delivered. Not required unless you specify CloudWatchLogsRoleArn. :type CloudWatchLogsRoleArn: string :param CloudWatchLogsRoleArn: Specifies the role for the CloudWatch Logs endpoint to assume to write to a user's log group. :type KmsKeyId: string :param KmsKeyId: Specifies the KMS key ID to use to encrypt the logs delivered by CloudTrail. The value can be an alias name prefixed by 'alias/', a fully specified ARN to an alias, a fully specified ARN to a key, or a globally unique identifier. Examples: alias/MyAliasName arn:aws:kms:us-east-1:123456789012:alias/MyAliasName arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012 12345678-1234-1234-1234-123456789012 :rtype: dict :return: { 'Name': 'string', 'S3BucketName': 'string', 'S3KeyPrefix': 'string', 'SnsTopicName': 'string', 'SnsTopicARN': 'string', 'IncludeGlobalServiceEvents': True|False, 'IsMultiRegionTrail': True|False, 'TrailARN': 'string', 'LogFileValidationEnabled': True|False, 'CloudWatchLogsLogGroupArn': 'string', 'CloudWatchLogsRoleArn': 'string', 'KmsKeyId': 'string' } """ pass def delete_trail(Name=None): """ Deletes a trail. This operation must be called from the region in which the trail was created. DeleteTrail cannot be called on the shadow trails (replicated trails in other regions) of a trail that is enabled in all regions. See also: AWS API Documentation :example: response = client.delete_trail( Name='string' ) :type Name: string :param Name: [REQUIRED] Specifies the name or the CloudTrail ARN of the trail to be deleted. The format of a trail ARN is: arn:aws:cloudtrail:us-east-1:123456789012:trail/MyTrail :rtype: dict :return: {} """ pass def describe_trails(trailNameList=None, includeShadowTrails=None): """ Retrieves settings for the trail associated with the current region for your account. See also: AWS API Documentation :example: response = client.describe_trails( trailNameList=[ 'string', ], includeShadowTrails=True|False ) :type trailNameList: list :param trailNameList: Specifies a list of trail names, trail ARNs, or both, of the trails to describe. The format of a trail ARN is: arn:aws:cloudtrail:us-east-1:123456789012:trail/MyTrail If an empty list is specified, information for the trail in the current region is returned. If an empty list is specified and IncludeShadowTrails is false, then information for all trails in the current region is returned. If an empty list is specified and IncludeShadowTrails is null or true, then information for all trails in the current region and any associated shadow trails in other regions is returned. Note If one or more trail names are specified, information is returned only if the names match the names of trails belonging only to the current region. To return information about a trail in another region, you must specify its trail ARN. (string) -- :type includeShadowTrails: boolean :param includeShadowTrails: Specifies whether to include shadow trails in the response. A shadow trail is the replication in a region of a trail that was created in a different region. The default is true. :rtype: dict :return: { 'trailList': [ { 'Name': 'string', 'S3BucketName': 'string', 'S3KeyPrefix': 'string', 'SnsTopicName': 'string', 'SnsTopicARN': 'string', 'IncludeGlobalServiceEvents': True|False, 'IsMultiRegionTrail': True|False, 'HomeRegion': 'string', 'TrailARN': 'string', 'LogFileValidationEnabled': True|False, 'CloudWatchLogsLogGroupArn': 'string', 'CloudWatchLogsRoleArn': 'string', 'KmsKeyId': 'string', 'HasCustomEventSelectors': True|False }, ] } """ pass def generate_presigned_url(ClientMethod=None, Params=None, ExpiresIn=None, HttpMethod=None): """ Generate a presigned url given a client, its method, and arguments :type ClientMethod: string :param ClientMethod: The client method to presign for :type Params: dict :param Params: The parameters normally passed to ClientMethod. :type ExpiresIn: int :param ExpiresIn: The number of seconds the presigned url is valid for. By default it expires in an hour (3600 seconds) :type HttpMethod: string :param HttpMethod: The http method to use on the generated url. By default, the http method is whatever is used in the method's model. """ pass def get_event_selectors(TrailName=None): """ Describes the settings for the event selectors that you configured for your trail. The information returned for your event selectors includes the following: For more information, see Logging Data and Management Events for Trails in the AWS CloudTrail User Guide . See also: AWS API Documentation :example: response = client.get_event_selectors( TrailName='string' ) :type TrailName: string :param TrailName: [REQUIRED] Specifies the name of the trail or trail ARN. If you specify a trail name, the string must meet the following requirements: Contain only ASCII letters (a-z, A-Z), numbers (0-9), periods (.), underscores (_), or dashes (-) Start with a letter or number, and end with a letter or number Be between 3 and 128 characters Have no adjacent periods, underscores or dashes. Names like my-_namespace and my--namespace are invalid. Not be in IP address format (for example, 192.168.5.4) If you specify a trail ARN, it must be in the format: arn:aws:cloudtrail:us-east-1:123456789012:trail/MyTrail :rtype: dict :return: { 'TrailARN': 'string', 'EventSelectors': [ { 'ReadWriteType': 'ReadOnly'|'WriteOnly'|'All', 'IncludeManagementEvents': True|False, 'DataResources': [ { 'Type': 'string', 'Values': [ 'string', ] }, ] }, ] } :returns: Contain only ASCII letters (a-z, A-Z), numbers (0-9), periods (.), underscores (_), or dashes (-) Start with a letter or number, and end with a letter or number Be between 3 and 128 characters Have no adjacent periods, underscores or dashes. Names like my-_namespace and my--namespace are invalid. Not be in IP address format (for example, 192.168.5.4) """ pass def get_paginator(operation_name=None): """ Create a paginator for an operation. :type operation_name: string :param operation_name: The operation name. This is the same name as the method name on the client. For example, if the method name is create_foo, and you'd normally invoke the operation as client.create_foo(**kwargs), if the create_foo operation can be paginated, you can use the call client.get_paginator('create_foo'). :rtype: L{botocore.paginate.Paginator} """ pass def get_trail_status(Name=None): """ Returns a JSON-formatted list of information about the specified trail. Fields include information on delivery errors, Amazon SNS and Amazon S3 errors, and start and stop logging times for each trail. This operation returns trail status from a single region. To return trail status from all regions, you must call the operation on each region. See also: AWS API Documentation :example: response = client.get_trail_status( Name='string' ) :type Name: string :param Name: [REQUIRED] Specifies the name or the CloudTrail ARN of the trail for which you are requesting status. To get the status of a shadow trail (a replication of the trail in another region), you must specify its ARN. The format of a trail ARN is: arn:aws:cloudtrail:us-east-1:123456789012:trail/MyTrail :rtype: dict :return: { 'IsLogging': True|False, 'LatestDeliveryError': 'string', 'LatestNotificationError': 'string', 'LatestDeliveryTime': datetime(2015, 1, 1), 'LatestNotificationTime': datetime(2015, 1, 1), 'StartLoggingTime': datetime(2015, 1, 1), 'StopLoggingTime': datetime(2015, 1, 1), 'LatestCloudWatchLogsDeliveryError': 'string', 'LatestCloudWatchLogsDeliveryTime': datetime(2015, 1, 1), 'LatestDigestDeliveryTime': datetime(2015, 1, 1), 'LatestDigestDeliveryError': 'string', 'LatestDeliveryAttemptTime': 'string', 'LatestNotificationAttemptTime': 'string', 'LatestNotificationAttemptSucceeded': 'string', 'LatestDeliveryAttemptSucceeded': 'string', 'TimeLoggingStarted': 'string', 'TimeLoggingStopped': 'string' } """ pass def get_waiter(): """ """ pass def list_public_keys(StartTime=None, EndTime=None, NextToken=None): """ Returns all public keys whose private keys were used to sign the digest files within the specified time range. The public key is needed to validate digest files that were signed with its corresponding private key. See also: AWS API Documentation :example: response = client.list_public_keys( StartTime=datetime(2015, 1, 1), EndTime=datetime(2015, 1, 1), NextToken='string' ) :type StartTime: datetime :param StartTime: Optionally specifies, in UTC, the start of the time range to look up public keys for CloudTrail digest files. If not specified, the current time is used, and the current public key is returned. :type EndTime: datetime :param EndTime: Optionally specifies, in UTC, the end of the time range to look up public keys for CloudTrail digest files. If not specified, the current time is used. :type NextToken: string :param NextToken: Reserved for future use. :rtype: dict :return: { 'PublicKeyList': [ { 'Value': b'bytes', 'ValidityStartTime': datetime(2015, 1, 1), 'ValidityEndTime': datetime(2015, 1, 1), 'Fingerprint': 'string' }, ], 'NextToken': 'string' } """ pass def list_tags(ResourceIdList=None, NextToken=None): """ Lists the tags for the trail in the current region. See also: AWS API Documentation :example: response = client.list_tags( ResourceIdList=[ 'string', ], NextToken='string' ) :type ResourceIdList: list :param ResourceIdList: [REQUIRED] Specifies a list of trail ARNs whose tags will be listed. The list has a limit of 20 ARNs. The format of a trail ARN is: arn:aws:cloudtrail:us-east-1:123456789012:trail/MyTrail (string) -- :type NextToken: string :param NextToken: Reserved for future use. :rtype: dict :return: { 'ResourceTagList': [ { 'ResourceId': 'string', 'TagsList': [ { 'Key': 'string', 'Value': 'string' }, ] }, ], 'NextToken': 'string' } """ pass def lookup_events(LookupAttributes=None, StartTime=None, EndTime=None, MaxResults=None, NextToken=None): """ Looks up API activity events captured by CloudTrail that create, update, or delete resources in your account. Events for a region can be looked up for the times in which you had CloudTrail turned on in that region during the last seven days. Lookup supports the following attributes: All attributes are optional. The default number of results returned is 10, with a maximum of 50 possible. The response includes a token that you can use to get the next page of results. See also: AWS API Documentation :example: response = client.lookup_events( LookupAttributes=[ { 'AttributeKey': 'EventId'|'EventName'|'Username'|'ResourceType'|'ResourceName'|'EventSource', 'AttributeValue': 'string' }, ], StartTime=datetime(2015, 1, 1), EndTime=datetime(2015, 1, 1), MaxResults=123, NextToken='string' ) :type LookupAttributes: list :param LookupAttributes: Contains a list of lookup attributes. Currently the list can contain only one item. (dict) --Specifies an attribute and value that filter the events returned. AttributeKey (string) -- [REQUIRED]Specifies an attribute on which to filter the events returned. AttributeValue (string) -- [REQUIRED]Specifies a value for the specified AttributeKey. :type StartTime: datetime :param StartTime: Specifies that only events that occur after or at the specified time are returned. If the specified start time is after the specified end time, an error is returned. :type EndTime: datetime :param EndTime: Specifies that only events that occur before or at the specified time are returned. If the specified end time is before the specified start time, an error is returned. :type MaxResults: integer :param MaxResults: The number of events to return. Possible values are 1 through 50. The default is 10. :type NextToken: string :param NextToken: The token to use to get the next page of results after a previous API call. This token must be passed in with the same parameters that were specified in the the original call. For example, if the original call specified an AttributeKey of 'Username' with a value of 'root', the call with NextToken should include those same parameters. :rtype: dict :return: { 'Events': [ { 'EventId': 'string', 'EventName': 'string', 'EventTime': datetime(2015, 1, 1), 'EventSource': 'string', 'Username': 'string', 'Resources': [ { 'ResourceType': 'string', 'ResourceName': 'string' }, ], 'CloudTrailEvent': 'string' }, ], 'NextToken': 'string' } :returns: LookupAttributes (list) -- Contains a list of lookup attributes. Currently the list can contain only one item. (dict) --Specifies an attribute and value that filter the events returned. AttributeKey (string) -- [REQUIRED]Specifies an attribute on which to filter the events returned. AttributeValue (string) -- [REQUIRED]Specifies a value for the specified AttributeKey. StartTime (datetime) -- Specifies that only events that occur after or at the specified time are returned. If the specified start time is after the specified end time, an error is returned. EndTime (datetime) -- Specifies that only events that occur before or at the specified time are returned. If the specified end time is before the specified start time, an error is returned. MaxResults (integer) -- The number of events to return. Possible values are 1 through 50. The default is 10. NextToken (string) -- The token to use to get the next page of results after a previous API call. This token must be passed in with the same parameters that were specified in the the original call. For example, if the original call specified an AttributeKey of 'Username' with a value of 'root', the call with NextToken should include those same parameters. """ pass def put_event_selectors(TrailName=None, EventSelectors=None): """ Configures an event selector for your trail. Use event selectors to specify whether you want your trail to log management and/or data events. When an event occurs in your account, CloudTrail evaluates the event selectors in all trails. For each trail, if the event matches any event selector, the trail processes and logs the event. If the event doesn't match any event selector, the trail doesn't log the event. Example The PutEventSelectors operation must be called from the region in which the trail was created; otherwise, an InvalidHomeRegionException is thrown. You can configure up to five event selectors for each trail. For more information, see Logging Data and Management Events for Trails in the AWS CloudTrail User Guide . See also: AWS API Documentation :example: response = client.put_event_selectors( TrailName='string', EventSelectors=[ { 'ReadWriteType': 'ReadOnly'|'WriteOnly'|'All', 'IncludeManagementEvents': True|False, 'DataResources': [ { 'Type': 'string', 'Values': [ 'string', ] }, ] }, ] ) :type TrailName: string :param TrailName: [REQUIRED] Specifies the name of the trail or trail ARN. If you specify a trail name, the string must meet the following requirements: Contain only ASCII letters (a-z, A-Z), numbers (0-9), periods (.), underscores (_), or dashes (-) Start with a letter or number, and end with a letter or number Be between 3 and 128 characters Have no adjacent periods, underscores or dashes. Names like my-_namespace and my--namespace are invalid. Not be in IP address format (for example, 192.168.5.4) If you specify a trail ARN, it must be in the format: arn:aws:cloudtrail:us-east-1:123456789012:trail/MyTrail :type EventSelectors: list :param EventSelectors: [REQUIRED] Specifies the settings for your event selectors. You can configure up to five event selectors for a trail. (dict) --Use event selectors to specify whether you want your trail to log management and/or data events. When an event occurs in your account, CloudTrail evaluates the event selector for all trails. For each trail, if the event matches any event selector, the trail processes and logs the event. If the event doesn't match any event selector, the trail doesn't log the event. You can configure up to five event selectors for a trail. ReadWriteType (string) --Specify if you want your trail to log read-only events, write-only events, or all. For example, the EC2 GetConsoleOutput is a read-only API operation and RunInstances is a write-only API operation. By default, the value is All . IncludeManagementEvents (boolean) --Specify if you want your event selector to include management events for your trail. For more information, see Management Events in the AWS CloudTrail User Guide . By default, the value is true . DataResources (list) --CloudTrail supports logging only data events for S3 objects. You can specify up to 250 S3 buckets and object prefixes for a trail. For more information, see Data Events in the AWS CloudTrail User Guide . (dict) --The Amazon S3 objects that you specify in your event selectors for your trail to log data events. Data events are object-level API operations that access S3 objects, such as GetObject , DeleteObject , and PutObject . You can specify up to 250 S3 buckets and object prefixes for a trail. Example You create an event selector for a trail and specify an S3 bucket and an empty prefix, such as arn:aws:s3:::bucket-1/ . You upload an image file to bucket-1 . The PutObject API operation occurs on an object in the S3 bucket that you specified in the event selector. The trail processes and logs the event. You upload another image file to a different S3 bucket named arn:aws:s3:::bucket-2 . The event occurs on an object in an S3 bucket that you didn't specify in the event selector. The trail doesn t log the event. Type (string) --The resource type in which you want to log data events. You can specify only the following value: AWS::S3::Object . Values (list) --A list of ARN-like strings for the specified S3 objects. To log data events for all objects in an S3 bucket, specify the bucket and an empty object prefix such as arn:aws:s3:::bucket-1/ . The trail logs data events for all objects in this S3 bucket. To log data events for specific objects, specify the S3 bucket and object prefix such as arn:aws:s3:::bucket-1/example-images . The trail logs data events for objects in this S3 bucket that match the prefix. (string) -- :rtype: dict :return: { 'TrailARN': 'string', 'EventSelectors': [ { 'ReadWriteType': 'ReadOnly'|'WriteOnly'|'All', 'IncludeManagementEvents': True|False, 'DataResources': [ { 'Type': 'string', 'Values': [ 'string', ] }, ] }, ] } :returns: TrailName (string) -- [REQUIRED] Specifies the name of the trail or trail ARN. If you specify a trail name, the string must meet the following requirements: Contain only ASCII letters (a-z, A-Z), numbers (0-9), periods (.), underscores (_), or dashes (-) Start with a letter or number, and end with a letter or number Be between 3 and 128 characters Have no adjacent periods, underscores or dashes. Names like my-_namespace and my--namespace are invalid. Not be in IP address format (for example, 192.168.5.4) If you specify a trail ARN, it must be in the format: arn:aws:cloudtrail:us-east-1:123456789012:trail/MyTrail EventSelectors (list) -- [REQUIRED] Specifies the settings for your event selectors. You can configure up to five event selectors for a trail. (dict) --Use event selectors to specify whether you want your trail to log management and/or data events. When an event occurs in your account, CloudTrail evaluates the event selector for all trails. For each trail, if the event matches any event selector, the trail processes and logs the event. If the event doesn't match any event selector, the trail doesn't log the event. You can configure up to five event selectors for a trail. ReadWriteType (string) --Specify if you want your trail to log read-only events, write-only events, or all. For example, the EC2 GetConsoleOutput is a read-only API operation and RunInstances is a write-only API operation. By default, the value is All . IncludeManagementEvents (boolean) --Specify if you want your event selector to include management events for your trail. For more information, see Management Events in the AWS CloudTrail User Guide . By default, the value is true . DataResources (list) --CloudTrail supports logging only data events for S3 objects. You can specify up to 250 S3 buckets and object prefixes for a trail. For more information, see Data Events in the AWS CloudTrail User Guide . (dict) --The Amazon S3 objects that you specify in your event selectors for your trail to log data events. Data events are object-level API operations that access S3 objects, such as GetObject , DeleteObject , and PutObject . You can specify up to 250 S3 buckets and object prefixes for a trail. Example You create an event selector for a trail and specify an S3 bucket and an empty prefix, such as arn:aws:s3:::bucket-1/ . You upload an image file to bucket-1 . The PutObject API operation occurs on an object in the S3 bucket that you specified in the event selector. The trail processes and logs the event. You upload another image file to a different S3 bucket named arn:aws:s3:::bucket-2 . The event occurs on an object in an S3 bucket that you didn't specify in the event selector. The trail doesnt log the event. Type (string) --The resource type in which you want to log data events. You can specify only the following value: AWS::S3::Object . Values (list) --A list of ARN-like strings for the specified S3 objects. To log data events for all objects in an S3 bucket, specify the bucket and an empty object prefix such as arn:aws:s3:::bucket-1/ . The trail logs data events for all objects in this S3 bucket. To log data events for specific objects, specify the S3 bucket and object prefix such as arn:aws:s3:::bucket-1/example-images . The trail logs data events for objects in this S3 bucket that match the prefix. (string) -- """ pass def remove_tags(ResourceId=None, TagsList=None): """ Removes the specified tags from a trail. See also: AWS API Documentation :example: response = client.remove_tags( ResourceId='string', TagsList=[ { 'Key': 'string', 'Value': 'string' }, ] ) :type ResourceId: string :param ResourceId: [REQUIRED] Specifies the ARN of the trail from which tags should be removed. The format of a trail ARN is: arn:aws:cloudtrail:us-east-1:123456789012:trail/MyTrail :type TagsList: list :param TagsList: Specifies a list of tags to be removed. (dict) --A custom key-value pair associated with a resource such as a CloudTrail trail. Key (string) -- [REQUIRED]The key in a key-value pair. The key must be must be no longer than 128 Unicode characters. The key must be unique for the resource to which it applies. Value (string) --The value in a key-value pair of a tag. The value must be no longer than 256 Unicode characters. :rtype: dict :return: {} """ pass def start_logging(Name=None): """ Starts the recording of AWS API calls and log file delivery for a trail. For a trail that is enabled in all regions, this operation must be called from the region in which the trail was created. This operation cannot be called on the shadow trails (replicated trails in other regions) of a trail that is enabled in all regions. See also: AWS API Documentation :example: response = client.start_logging( Name='string' ) :type Name: string :param Name: [REQUIRED] Specifies the name or the CloudTrail ARN of the trail for which CloudTrail logs AWS API calls. The format of a trail ARN is: arn:aws:cloudtrail:us-east-1:123456789012:trail/MyTrail :rtype: dict :return: {} """ pass def stop_logging(Name=None): """ Suspends the recording of AWS API calls and log file delivery for the specified trail. Under most circumstances, there is no need to use this action. You can update a trail without stopping it first. This action is the only way to stop recording. For a trail enabled in all regions, this operation must be called from the region in which the trail was created, or an InvalidHomeRegionException will occur. This operation cannot be called on the shadow trails (replicated trails in other regions) of a trail enabled in all regions. See also: AWS API Documentation :example: response = client.stop_logging( Name='string' ) :type Name: string :param Name: [REQUIRED] Specifies the name or the CloudTrail ARN of the trail for which CloudTrail will stop logging AWS API calls. The format of a trail ARN is: arn:aws:cloudtrail:us-east-1:123456789012:trail/MyTrail :rtype: dict :return: {} """ pass def update_trail(Name=None, S3BucketName=None, S3KeyPrefix=None, SnsTopicName=None, IncludeGlobalServiceEvents=None, IsMultiRegionTrail=None, EnableLogFileValidation=None, CloudWatchLogsLogGroupArn=None, CloudWatchLogsRoleArn=None, KmsKeyId=None): """ Updates the settings that specify delivery of log files. Changes to a trail do not require stopping the CloudTrail service. Use this action to designate an existing bucket for log delivery. If the existing bucket has previously been a target for CloudTrail log files, an IAM policy exists for the bucket. UpdateTrail must be called from the region in which the trail was created; otherwise, an InvalidHomeRegionException is thrown. See also: AWS API Documentation :example: response = client.update_trail( Name='string', S3BucketName='string', S3KeyPrefix='string', SnsTopicName='string', IncludeGlobalServiceEvents=True|False, IsMultiRegionTrail=True|False, EnableLogFileValidation=True|False, CloudWatchLogsLogGroupArn='string', CloudWatchLogsRoleArn='string', KmsKeyId='string' ) :type Name: string :param Name: [REQUIRED] Specifies the name of the trail or trail ARN. If Name is a trail name, the string must meet the following requirements: Contain only ASCII letters (a-z, A-Z), numbers (0-9), periods (.), underscores (_), or dashes (-) Start with a letter or number, and end with a letter or number Be between 3 and 128 characters Have no adjacent periods, underscores or dashes. Names like my-_namespace and my--namespace are invalid. Not be in IP address format (for example, 192.168.5.4) If Name is a trail ARN, it must be in the format: arn:aws:cloudtrail:us-east-1:123456789012:trail/MyTrail :type S3BucketName: string :param S3BucketName: Specifies the name of the Amazon S3 bucket designated for publishing log files. See Amazon S3 Bucket Naming Requirements . :type S3KeyPrefix: string :param S3KeyPrefix: Specifies the Amazon S3 key prefix that comes after the name of the bucket you have designated for log file delivery. For more information, see Finding Your CloudTrail Log Files . The maximum length is 200 characters. :type SnsTopicName: string :param SnsTopicName: Specifies the name of the Amazon SNS topic defined for notification of log file delivery. The maximum length is 256 characters. :type IncludeGlobalServiceEvents: boolean :param IncludeGlobalServiceEvents: Specifies whether the trail is publishing events from global services such as IAM to the log files. :type IsMultiRegionTrail: boolean :param IsMultiRegionTrail: Specifies whether the trail applies only to the current region or to all regions. The default is false. If the trail exists only in the current region and this value is set to true, shadow trails (replications of the trail) will be created in the other regions. If the trail exists in all regions and this value is set to false, the trail will remain in the region where it was created, and its shadow trails in other regions will be deleted. :type EnableLogFileValidation: boolean :param EnableLogFileValidation: Specifies whether log file validation is enabled. The default is false. Note When you disable log file integrity validation, the chain of digest files is broken after one hour. CloudTrail will not create digest files for log files that were delivered during a period in which log file integrity validation was disabled. For example, if you enable log file integrity validation at noon on January 1, disable it at noon on January 2, and re-enable it at noon on January 10, digest files will not be created for the log files delivered from noon on January 2 to noon on January 10. The same applies whenever you stop CloudTrail logging or delete a trail. :type CloudWatchLogsLogGroupArn: string :param CloudWatchLogsLogGroupArn: Specifies a log group name using an Amazon Resource Name (ARN), a unique identifier that represents the log group to which CloudTrail logs will be delivered. Not required unless you specify CloudWatchLogsRoleArn. :type CloudWatchLogsRoleArn: string :param CloudWatchLogsRoleArn: Specifies the role for the CloudWatch Logs endpoint to assume to write to a user's log group. :type KmsKeyId: string :param KmsKeyId: Specifies the KMS key ID to use to encrypt the logs delivered by CloudTrail. The value can be an alias name prefixed by 'alias/', a fully specified ARN to an alias, a fully specified ARN to a key, or a globally unique identifier. Examples: alias/MyAliasName arn:aws:kms:us-east-1:123456789012:alias/MyAliasName arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012 12345678-1234-1234-1234-123456789012 :rtype: dict :return: { 'Name': 'string', 'S3BucketName': 'string', 'S3KeyPrefix': 'string', 'SnsTopicName': 'string', 'SnsTopicARN': 'string', 'IncludeGlobalServiceEvents': True|False, 'IsMultiRegionTrail': True|False, 'TrailARN': 'string', 'LogFileValidationEnabled': True|False, 'CloudWatchLogsLogGroupArn': 'string', 'CloudWatchLogsRoleArn': 'string', 'KmsKeyId': 'string' } """ pass
#!/usr/bin/python # encoding=utf-8 """ @Author : Don @Date : 2020/12/25 9:53 @Desc : """
""" @Author : Don @Date : 2020/12/25 9:53 @Desc : """
for i in range(1, 10+1): if i%3==0: print(i)
for i in range(1, 10 + 1): if i % 3 == 0: print(i)
N = int(input()) m = 1000000007 dp = [0] * 10 dp[0] = 1 for _ in range(N): for i in range(8, -1, -1): for j in range(i + 1, 10): dp[j] += dp[i] dp[j] %= m print(sum(dp) % m)
n = int(input()) m = 1000000007 dp = [0] * 10 dp[0] = 1 for _ in range(N): for i in range(8, -1, -1): for j in range(i + 1, 10): dp[j] += dp[i] dp[j] %= m print(sum(dp) % m)
# # PySNMP MIB module HH3C-DLDP2-MIB (http://snmplabs.com/pysmi) # ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/HH3C-DLDP2-MIB # Produced by pysmi-0.3.4 at Wed May 1 13:26:06 2019 # On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4 # Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15) # OctetString, ObjectIdentifier, Integer = mibBuilder.importSymbols("ASN1", "OctetString", "ObjectIdentifier", "Integer") NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues") ConstraintsIntersection, SingleValueConstraint, ValueRangeConstraint, ConstraintsUnion, ValueSizeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsIntersection", "SingleValueConstraint", "ValueRangeConstraint", "ConstraintsUnion", "ValueSizeConstraint") hh3cCommon, = mibBuilder.importSymbols("HH3C-OID-MIB", "hh3cCommon") ifIndex, ifDescr = mibBuilder.importSymbols("IF-MIB", "ifIndex", "ifDescr") ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup") Unsigned32, IpAddress, ObjectIdentity, Bits, ModuleIdentity, Counter32, Integer32, Gauge32, MibIdentifier, NotificationType, MibScalar, MibTable, MibTableRow, MibTableColumn, Counter64, iso, TimeTicks = mibBuilder.importSymbols("SNMPv2-SMI", "Unsigned32", "IpAddress", "ObjectIdentity", "Bits", "ModuleIdentity", "Counter32", "Integer32", "Gauge32", "MibIdentifier", "NotificationType", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Counter64", "iso", "TimeTicks") TruthValue, DisplayString, MacAddress, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "TruthValue", "DisplayString", "MacAddress", "TextualConvention") hh3cDldp2 = ModuleIdentity((1, 3, 6, 1, 4, 1, 25506, 2, 117)) hh3cDldp2.setRevisions(('2011-12-26 15:30',)) if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0): if mibBuilder.loadTexts: hh3cDldp2.setRevisionsDescriptions(('Initial version of this MIB module.',)) if mibBuilder.loadTexts: hh3cDldp2.setLastUpdated('201112261530Z') if mibBuilder.loadTexts: hh3cDldp2.setOrganization('Hangzhou H3C Technologies. Co., Ltd.') if mibBuilder.loadTexts: hh3cDldp2.setContactInfo('Platform Team Hangzhou H3C Technologies. Co., Ltd. Hai-Dian District Beijing P.R. China http://www.h3c.com Zip: 100085') if mibBuilder.loadTexts: hh3cDldp2.setDescription('Device Link Detection Protocol (DLDP) MIB. Device Link Detection Protocol is a private Layer 2 protocol, which can be used to detect and shut down unidirectional links (fiber or copper links) to avoid network problems.') hh3cDldp2ScalarGroup = MibIdentifier((1, 3, 6, 1, 4, 1, 25506, 2, 117, 1)) hh3cDldp2GlobalEnable = MibScalar((1, 3, 6, 1, 4, 1, 25506, 2, 117, 1, 1), TruthValue()).setMaxAccess("readwrite") if mibBuilder.loadTexts: hh3cDldp2GlobalEnable.setStatus('current') if mibBuilder.loadTexts: hh3cDldp2GlobalEnable.setDescription('Enable(true) or disable(false) DLDP on the device.') hh3cDldp2Interval = MibScalar((1, 3, 6, 1, 4, 1, 25506, 2, 117, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 100)).clone(5)).setUnits('second').setMaxAccess("readwrite") if mibBuilder.loadTexts: hh3cDldp2Interval.setStatus('current') if mibBuilder.loadTexts: hh3cDldp2Interval.setDescription('Indicates the advertisement packet sending interval.') hh3cDldp2AuthMode = MibScalar((1, 3, 6, 1, 4, 1, 25506, 2, 117, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("unknown", 1), ("none", 2), ("simple", 3), ("md5", 4))).clone('none')).setMaxAccess("readwrite") if mibBuilder.loadTexts: hh3cDldp2AuthMode.setStatus('current') if mibBuilder.loadTexts: hh3cDldp2AuthMode.setDescription('Indicates the authentication mode. unknown: cannot be determined for some reason. none: not authenticated. simple: authenticated by a clear text password. md5: authenticated by MD5 digest.') hh3cDldp2AuthPassword = MibScalar((1, 3, 6, 1, 4, 1, 25506, 2, 117, 1, 4), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 16))).setMaxAccess("readwrite") if mibBuilder.loadTexts: hh3cDldp2AuthPassword.setStatus('current') if mibBuilder.loadTexts: hh3cDldp2AuthPassword.setDescription('Indicates the authentication password. Setting the password to a zero-length octet string means deleting the password. When read, it always returns a zero-length octet string.') hh3cDldp2UniShutdown = MibScalar((1, 3, 6, 1, 4, 1, 25506, 2, 117, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("unknown", 1), ("auto", 2), ("manual", 3))).clone('auto')).setMaxAccess("readwrite") if mibBuilder.loadTexts: hh3cDldp2UniShutdown.setStatus('current') if mibBuilder.loadTexts: hh3cDldp2UniShutdown.setDescription('Indicates the shutdown mode when a unidirectional link has been detected. unknown: cannot be determined for some reason. auto: the port will be shutdown automatically. manual: the port must be shut down manually.') hh3cDldp2TableGroup = MibIdentifier((1, 3, 6, 1, 4, 1, 25506, 2, 117, 2)) hh3cDldp2PortConfigTable = MibTable((1, 3, 6, 1, 4, 1, 25506, 2, 117, 2, 1), ) if mibBuilder.loadTexts: hh3cDldp2PortConfigTable.setStatus('current') if mibBuilder.loadTexts: hh3cDldp2PortConfigTable.setDescription('This table contains all ports that support DLDP.') hh3cDldp2PortConfigEntry = MibTableRow((1, 3, 6, 1, 4, 1, 25506, 2, 117, 2, 1, 1), ).setIndexNames((0, "IF-MIB", "ifIndex")) if mibBuilder.loadTexts: hh3cDldp2PortConfigEntry.setStatus('current') if mibBuilder.loadTexts: hh3cDldp2PortConfigEntry.setDescription('This entry describes a port that supports DLDP.') hh3cDldp2PortEnable = MibTableColumn((1, 3, 6, 1, 4, 1, 25506, 2, 117, 2, 1, 1, 1), TruthValue()).setMaxAccess("readwrite") if mibBuilder.loadTexts: hh3cDldp2PortEnable.setStatus('current') if mibBuilder.loadTexts: hh3cDldp2PortEnable.setDescription('Enable(true) or disable(false) DLDP on a port.') hh3cDldp2PortStatusTable = MibTable((1, 3, 6, 1, 4, 1, 25506, 2, 117, 2, 2), ) if mibBuilder.loadTexts: hh3cDldp2PortStatusTable.setStatus('current') if mibBuilder.loadTexts: hh3cDldp2PortStatusTable.setDescription('This table contains all ports enabled with DLDP.') hh3cDldp2PortStatusEntry = MibTableRow((1, 3, 6, 1, 4, 1, 25506, 2, 117, 2, 2, 1), ).setIndexNames((0, "IF-MIB", "ifIndex")) if mibBuilder.loadTexts: hh3cDldp2PortStatusEntry.setStatus('current') if mibBuilder.loadTexts: hh3cDldp2PortStatusEntry.setDescription('This entry describes a port enabled with DLDP.') hh3cDldp2PortOperStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 25506, 2, 117, 2, 2, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5))).clone(namedValues=NamedValues(("unknown", 1), ("initial", 2), ("inactive", 3), ("unidirectional", 4), ("bidirectional", 5)))).setMaxAccess("readonly") if mibBuilder.loadTexts: hh3cDldp2PortOperStatus.setStatus('current') if mibBuilder.loadTexts: hh3cDldp2PortOperStatus.setDescription("Indicates the DLDP operating status on the port. unknown: cannot be determined for some reason. initial: DLDP is not globally enabled. inactive: physical status of the port is down. unidirectional: all neighbors of the port are in 'unconfirmed' status. bidirectional: more than one neighbor of the port is in 'confirmed' status.") hh3cDldp2PortLinkStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 25506, 2, 117, 2, 2, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("unknown", 1), ("down", 2), ("up", 3)))).setMaxAccess("readonly") if mibBuilder.loadTexts: hh3cDldp2PortLinkStatus.setStatus('current') if mibBuilder.loadTexts: hh3cDldp2PortLinkStatus.setDescription("Indicates the DLDP link status of the port. unknown: cannot be determined for some reason. down: the DLDP link status of the port is down. up: the DLDP link status of the port is up. If the port operating status is not 'inactive', 'unidirectional', or 'bidirectional', it always returns 'unknown'.") hh3cDldp2NeighborTable = MibTable((1, 3, 6, 1, 4, 1, 25506, 2, 117, 2, 3), ) if mibBuilder.loadTexts: hh3cDldp2NeighborTable.setStatus('current') if mibBuilder.loadTexts: hh3cDldp2NeighborTable.setDescription("This table contains all port's neighbors.") hh3cDldp2NeighborEntry = MibTableRow((1, 3, 6, 1, 4, 1, 25506, 2, 117, 2, 3, 1), ).setIndexNames((0, "IF-MIB", "ifIndex"), (0, "HH3C-DLDP2-MIB", "hh3cDldp2NeighborBridgeMac"), (0, "HH3C-DLDP2-MIB", "hh3cDldp2NeighborPortIndex")) if mibBuilder.loadTexts: hh3cDldp2NeighborEntry.setStatus('current') if mibBuilder.loadTexts: hh3cDldp2NeighborEntry.setDescription("This entry describes a port's neighbors.") hh3cDldp2NeighborBridgeMac = MibTableColumn((1, 3, 6, 1, 4, 1, 25506, 2, 117, 2, 3, 1, 1), MacAddress()) if mibBuilder.loadTexts: hh3cDldp2NeighborBridgeMac.setStatus('current') if mibBuilder.loadTexts: hh3cDldp2NeighborBridgeMac.setDescription('Indicates the bridge MAC address of a neighbor.') hh3cDldp2NeighborPortIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 25506, 2, 117, 2, 3, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 2147483647))) if mibBuilder.loadTexts: hh3cDldp2NeighborPortIndex.setStatus('current') if mibBuilder.loadTexts: hh3cDldp2NeighborPortIndex.setDescription('Indicates the port index of a neighbor.') hh3cDldp2NeighborStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 25506, 2, 117, 2, 3, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("unknown", 1), ("unconfirmed", 2), ("confirmed", 3)))).setMaxAccess("readonly") if mibBuilder.loadTexts: hh3cDldp2NeighborStatus.setStatus('current') if mibBuilder.loadTexts: hh3cDldp2NeighborStatus.setDescription('Indicates the status of a neighbor. unknown: cannot be determined for some reason. unconfirmed: unidirectional communication between the port and its neighbor. confirmed: bidirectional communication between the port and its neighbor.') hh3cDldp2NeighborAgingTime = MibTableColumn((1, 3, 6, 1, 4, 1, 25506, 2, 117, 2, 3, 1, 4), Integer32()).setUnits('second').setMaxAccess("readonly") if mibBuilder.loadTexts: hh3cDldp2NeighborAgingTime.setStatus('current') if mibBuilder.loadTexts: hh3cDldp2NeighborAgingTime.setDescription("Indicates the aging time of a neighbor. If the neighbor status is not 'confirmed', it always returns 0.") hh3cDldp2TrapBindObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 25506, 2, 117, 3)) hh3cDldp2Trap = MibIdentifier((1, 3, 6, 1, 4, 1, 25506, 2, 117, 4)) hh3cDldp2TrapPrefix = MibIdentifier((1, 3, 6, 1, 4, 1, 25506, 2, 117, 4, 0)) hh3cDldp2TrapUniLink = NotificationType((1, 3, 6, 1, 4, 1, 25506, 2, 117, 4, 0, 1)).setObjects(("IF-MIB", "ifIndex"), ("IF-MIB", "ifDescr")) if mibBuilder.loadTexts: hh3cDldp2TrapUniLink.setStatus('current') if mibBuilder.loadTexts: hh3cDldp2TrapUniLink.setDescription('This trap is generated when DLDP detects a unidirectional link, ifIndex and ifDescr identify the port.') hh3cDldp2TrapBidLink = NotificationType((1, 3, 6, 1, 4, 1, 25506, 2, 117, 4, 0, 2)).setObjects(("IF-MIB", "ifIndex"), ("IF-MIB", "ifDescr")) if mibBuilder.loadTexts: hh3cDldp2TrapBidLink.setStatus('current') if mibBuilder.loadTexts: hh3cDldp2TrapBidLink.setDescription('This trap is generated when DLDP detects a bidirectional link, ifIndex and ifDescr identify the port.') mibBuilder.exportSymbols("HH3C-DLDP2-MIB", hh3cDldp2PortOperStatus=hh3cDldp2PortOperStatus, hh3cDldp2UniShutdown=hh3cDldp2UniShutdown, hh3cDldp2NeighborStatus=hh3cDldp2NeighborStatus, hh3cDldp2TrapBidLink=hh3cDldp2TrapBidLink, hh3cDldp2PortConfigTable=hh3cDldp2PortConfigTable, hh3cDldp2TrapPrefix=hh3cDldp2TrapPrefix, hh3cDldp2PortEnable=hh3cDldp2PortEnable, hh3cDldp2GlobalEnable=hh3cDldp2GlobalEnable, hh3cDldp2NeighborAgingTime=hh3cDldp2NeighborAgingTime, hh3cDldp2PortStatusEntry=hh3cDldp2PortStatusEntry, PYSNMP_MODULE_ID=hh3cDldp2, hh3cDldp2NeighborEntry=hh3cDldp2NeighborEntry, hh3cDldp2=hh3cDldp2, hh3cDldp2PortConfigEntry=hh3cDldp2PortConfigEntry, hh3cDldp2NeighborTable=hh3cDldp2NeighborTable, hh3cDldp2NeighborPortIndex=hh3cDldp2NeighborPortIndex, hh3cDldp2Trap=hh3cDldp2Trap, hh3cDldp2PortLinkStatus=hh3cDldp2PortLinkStatus, hh3cDldp2NeighborBridgeMac=hh3cDldp2NeighborBridgeMac, hh3cDldp2TrapBindObjects=hh3cDldp2TrapBindObjects, hh3cDldp2AuthMode=hh3cDldp2AuthMode, hh3cDldp2PortStatusTable=hh3cDldp2PortStatusTable, hh3cDldp2ScalarGroup=hh3cDldp2ScalarGroup, hh3cDldp2AuthPassword=hh3cDldp2AuthPassword, hh3cDldp2Interval=hh3cDldp2Interval, hh3cDldp2TableGroup=hh3cDldp2TableGroup, hh3cDldp2TrapUniLink=hh3cDldp2TrapUniLink)
(octet_string, object_identifier, integer) = mibBuilder.importSymbols('ASN1', 'OctetString', 'ObjectIdentifier', 'Integer') (named_values,) = mibBuilder.importSymbols('ASN1-ENUMERATION', 'NamedValues') (constraints_intersection, single_value_constraint, value_range_constraint, constraints_union, value_size_constraint) = mibBuilder.importSymbols('ASN1-REFINEMENT', 'ConstraintsIntersection', 'SingleValueConstraint', 'ValueRangeConstraint', 'ConstraintsUnion', 'ValueSizeConstraint') (hh3c_common,) = mibBuilder.importSymbols('HH3C-OID-MIB', 'hh3cCommon') (if_index, if_descr) = mibBuilder.importSymbols('IF-MIB', 'ifIndex', 'ifDescr') (module_compliance, notification_group) = mibBuilder.importSymbols('SNMPv2-CONF', 'ModuleCompliance', 'NotificationGroup') (unsigned32, ip_address, object_identity, bits, module_identity, counter32, integer32, gauge32, mib_identifier, notification_type, mib_scalar, mib_table, mib_table_row, mib_table_column, counter64, iso, time_ticks) = mibBuilder.importSymbols('SNMPv2-SMI', 'Unsigned32', 'IpAddress', 'ObjectIdentity', 'Bits', 'ModuleIdentity', 'Counter32', 'Integer32', 'Gauge32', 'MibIdentifier', 'NotificationType', 'MibScalar', 'MibTable', 'MibTableRow', 'MibTableColumn', 'Counter64', 'iso', 'TimeTicks') (truth_value, display_string, mac_address, textual_convention) = mibBuilder.importSymbols('SNMPv2-TC', 'TruthValue', 'DisplayString', 'MacAddress', 'TextualConvention') hh3c_dldp2 = module_identity((1, 3, 6, 1, 4, 1, 25506, 2, 117)) hh3cDldp2.setRevisions(('2011-12-26 15:30',)) if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0): if mibBuilder.loadTexts: hh3cDldp2.setRevisionsDescriptions(('Initial version of this MIB module.',)) if mibBuilder.loadTexts: hh3cDldp2.setLastUpdated('201112261530Z') if mibBuilder.loadTexts: hh3cDldp2.setOrganization('Hangzhou H3C Technologies. Co., Ltd.') if mibBuilder.loadTexts: hh3cDldp2.setContactInfo('Platform Team Hangzhou H3C Technologies. Co., Ltd. Hai-Dian District Beijing P.R. China http://www.h3c.com Zip: 100085') if mibBuilder.loadTexts: hh3cDldp2.setDescription('Device Link Detection Protocol (DLDP) MIB. Device Link Detection Protocol is a private Layer 2 protocol, which can be used to detect and shut down unidirectional links (fiber or copper links) to avoid network problems.') hh3c_dldp2_scalar_group = mib_identifier((1, 3, 6, 1, 4, 1, 25506, 2, 117, 1)) hh3c_dldp2_global_enable = mib_scalar((1, 3, 6, 1, 4, 1, 25506, 2, 117, 1, 1), truth_value()).setMaxAccess('readwrite') if mibBuilder.loadTexts: hh3cDldp2GlobalEnable.setStatus('current') if mibBuilder.loadTexts: hh3cDldp2GlobalEnable.setDescription('Enable(true) or disable(false) DLDP on the device.') hh3c_dldp2_interval = mib_scalar((1, 3, 6, 1, 4, 1, 25506, 2, 117, 1, 2), integer32().subtype(subtypeSpec=value_range_constraint(1, 100)).clone(5)).setUnits('second').setMaxAccess('readwrite') if mibBuilder.loadTexts: hh3cDldp2Interval.setStatus('current') if mibBuilder.loadTexts: hh3cDldp2Interval.setDescription('Indicates the advertisement packet sending interval.') hh3c_dldp2_auth_mode = mib_scalar((1, 3, 6, 1, 4, 1, 25506, 2, 117, 1, 3), integer32().subtype(subtypeSpec=constraints_union(single_value_constraint(1, 2, 3, 4))).clone(namedValues=named_values(('unknown', 1), ('none', 2), ('simple', 3), ('md5', 4))).clone('none')).setMaxAccess('readwrite') if mibBuilder.loadTexts: hh3cDldp2AuthMode.setStatus('current') if mibBuilder.loadTexts: hh3cDldp2AuthMode.setDescription('Indicates the authentication mode. unknown: cannot be determined for some reason. none: not authenticated. simple: authenticated by a clear text password. md5: authenticated by MD5 digest.') hh3c_dldp2_auth_password = mib_scalar((1, 3, 6, 1, 4, 1, 25506, 2, 117, 1, 4), octet_string().subtype(subtypeSpec=value_size_constraint(0, 16))).setMaxAccess('readwrite') if mibBuilder.loadTexts: hh3cDldp2AuthPassword.setStatus('current') if mibBuilder.loadTexts: hh3cDldp2AuthPassword.setDescription('Indicates the authentication password. Setting the password to a zero-length octet string means deleting the password. When read, it always returns a zero-length octet string.') hh3c_dldp2_uni_shutdown = mib_scalar((1, 3, 6, 1, 4, 1, 25506, 2, 117, 1, 5), integer32().subtype(subtypeSpec=constraints_union(single_value_constraint(1, 2, 3))).clone(namedValues=named_values(('unknown', 1), ('auto', 2), ('manual', 3))).clone('auto')).setMaxAccess('readwrite') if mibBuilder.loadTexts: hh3cDldp2UniShutdown.setStatus('current') if mibBuilder.loadTexts: hh3cDldp2UniShutdown.setDescription('Indicates the shutdown mode when a unidirectional link has been detected. unknown: cannot be determined for some reason. auto: the port will be shutdown automatically. manual: the port must be shut down manually.') hh3c_dldp2_table_group = mib_identifier((1, 3, 6, 1, 4, 1, 25506, 2, 117, 2)) hh3c_dldp2_port_config_table = mib_table((1, 3, 6, 1, 4, 1, 25506, 2, 117, 2, 1)) if mibBuilder.loadTexts: hh3cDldp2PortConfigTable.setStatus('current') if mibBuilder.loadTexts: hh3cDldp2PortConfigTable.setDescription('This table contains all ports that support DLDP.') hh3c_dldp2_port_config_entry = mib_table_row((1, 3, 6, 1, 4, 1, 25506, 2, 117, 2, 1, 1)).setIndexNames((0, 'IF-MIB', 'ifIndex')) if mibBuilder.loadTexts: hh3cDldp2PortConfigEntry.setStatus('current') if mibBuilder.loadTexts: hh3cDldp2PortConfigEntry.setDescription('This entry describes a port that supports DLDP.') hh3c_dldp2_port_enable = mib_table_column((1, 3, 6, 1, 4, 1, 25506, 2, 117, 2, 1, 1, 1), truth_value()).setMaxAccess('readwrite') if mibBuilder.loadTexts: hh3cDldp2PortEnable.setStatus('current') if mibBuilder.loadTexts: hh3cDldp2PortEnable.setDescription('Enable(true) or disable(false) DLDP on a port.') hh3c_dldp2_port_status_table = mib_table((1, 3, 6, 1, 4, 1, 25506, 2, 117, 2, 2)) if mibBuilder.loadTexts: hh3cDldp2PortStatusTable.setStatus('current') if mibBuilder.loadTexts: hh3cDldp2PortStatusTable.setDescription('This table contains all ports enabled with DLDP.') hh3c_dldp2_port_status_entry = mib_table_row((1, 3, 6, 1, 4, 1, 25506, 2, 117, 2, 2, 1)).setIndexNames((0, 'IF-MIB', 'ifIndex')) if mibBuilder.loadTexts: hh3cDldp2PortStatusEntry.setStatus('current') if mibBuilder.loadTexts: hh3cDldp2PortStatusEntry.setDescription('This entry describes a port enabled with DLDP.') hh3c_dldp2_port_oper_status = mib_table_column((1, 3, 6, 1, 4, 1, 25506, 2, 117, 2, 2, 1, 1), integer32().subtype(subtypeSpec=constraints_union(single_value_constraint(1, 2, 3, 4, 5))).clone(namedValues=named_values(('unknown', 1), ('initial', 2), ('inactive', 3), ('unidirectional', 4), ('bidirectional', 5)))).setMaxAccess('readonly') if mibBuilder.loadTexts: hh3cDldp2PortOperStatus.setStatus('current') if mibBuilder.loadTexts: hh3cDldp2PortOperStatus.setDescription("Indicates the DLDP operating status on the port. unknown: cannot be determined for some reason. initial: DLDP is not globally enabled. inactive: physical status of the port is down. unidirectional: all neighbors of the port are in 'unconfirmed' status. bidirectional: more than one neighbor of the port is in 'confirmed' status.") hh3c_dldp2_port_link_status = mib_table_column((1, 3, 6, 1, 4, 1, 25506, 2, 117, 2, 2, 1, 2), integer32().subtype(subtypeSpec=constraints_union(single_value_constraint(1, 2, 3))).clone(namedValues=named_values(('unknown', 1), ('down', 2), ('up', 3)))).setMaxAccess('readonly') if mibBuilder.loadTexts: hh3cDldp2PortLinkStatus.setStatus('current') if mibBuilder.loadTexts: hh3cDldp2PortLinkStatus.setDescription("Indicates the DLDP link status of the port. unknown: cannot be determined for some reason. down: the DLDP link status of the port is down. up: the DLDP link status of the port is up. If the port operating status is not 'inactive', 'unidirectional', or 'bidirectional', it always returns 'unknown'.") hh3c_dldp2_neighbor_table = mib_table((1, 3, 6, 1, 4, 1, 25506, 2, 117, 2, 3)) if mibBuilder.loadTexts: hh3cDldp2NeighborTable.setStatus('current') if mibBuilder.loadTexts: hh3cDldp2NeighborTable.setDescription("This table contains all port's neighbors.") hh3c_dldp2_neighbor_entry = mib_table_row((1, 3, 6, 1, 4, 1, 25506, 2, 117, 2, 3, 1)).setIndexNames((0, 'IF-MIB', 'ifIndex'), (0, 'HH3C-DLDP2-MIB', 'hh3cDldp2NeighborBridgeMac'), (0, 'HH3C-DLDP2-MIB', 'hh3cDldp2NeighborPortIndex')) if mibBuilder.loadTexts: hh3cDldp2NeighborEntry.setStatus('current') if mibBuilder.loadTexts: hh3cDldp2NeighborEntry.setDescription("This entry describes a port's neighbors.") hh3c_dldp2_neighbor_bridge_mac = mib_table_column((1, 3, 6, 1, 4, 1, 25506, 2, 117, 2, 3, 1, 1), mac_address()) if mibBuilder.loadTexts: hh3cDldp2NeighborBridgeMac.setStatus('current') if mibBuilder.loadTexts: hh3cDldp2NeighborBridgeMac.setDescription('Indicates the bridge MAC address of a neighbor.') hh3c_dldp2_neighbor_port_index = mib_table_column((1, 3, 6, 1, 4, 1, 25506, 2, 117, 2, 3, 1, 2), integer32().subtype(subtypeSpec=value_range_constraint(1, 2147483647))) if mibBuilder.loadTexts: hh3cDldp2NeighborPortIndex.setStatus('current') if mibBuilder.loadTexts: hh3cDldp2NeighborPortIndex.setDescription('Indicates the port index of a neighbor.') hh3c_dldp2_neighbor_status = mib_table_column((1, 3, 6, 1, 4, 1, 25506, 2, 117, 2, 3, 1, 3), integer32().subtype(subtypeSpec=constraints_union(single_value_constraint(1, 2, 3))).clone(namedValues=named_values(('unknown', 1), ('unconfirmed', 2), ('confirmed', 3)))).setMaxAccess('readonly') if mibBuilder.loadTexts: hh3cDldp2NeighborStatus.setStatus('current') if mibBuilder.loadTexts: hh3cDldp2NeighborStatus.setDescription('Indicates the status of a neighbor. unknown: cannot be determined for some reason. unconfirmed: unidirectional communication between the port and its neighbor. confirmed: bidirectional communication between the port and its neighbor.') hh3c_dldp2_neighbor_aging_time = mib_table_column((1, 3, 6, 1, 4, 1, 25506, 2, 117, 2, 3, 1, 4), integer32()).setUnits('second').setMaxAccess('readonly') if mibBuilder.loadTexts: hh3cDldp2NeighborAgingTime.setStatus('current') if mibBuilder.loadTexts: hh3cDldp2NeighborAgingTime.setDescription("Indicates the aging time of a neighbor. If the neighbor status is not 'confirmed', it always returns 0.") hh3c_dldp2_trap_bind_objects = mib_identifier((1, 3, 6, 1, 4, 1, 25506, 2, 117, 3)) hh3c_dldp2_trap = mib_identifier((1, 3, 6, 1, 4, 1, 25506, 2, 117, 4)) hh3c_dldp2_trap_prefix = mib_identifier((1, 3, 6, 1, 4, 1, 25506, 2, 117, 4, 0)) hh3c_dldp2_trap_uni_link = notification_type((1, 3, 6, 1, 4, 1, 25506, 2, 117, 4, 0, 1)).setObjects(('IF-MIB', 'ifIndex'), ('IF-MIB', 'ifDescr')) if mibBuilder.loadTexts: hh3cDldp2TrapUniLink.setStatus('current') if mibBuilder.loadTexts: hh3cDldp2TrapUniLink.setDescription('This trap is generated when DLDP detects a unidirectional link, ifIndex and ifDescr identify the port.') hh3c_dldp2_trap_bid_link = notification_type((1, 3, 6, 1, 4, 1, 25506, 2, 117, 4, 0, 2)).setObjects(('IF-MIB', 'ifIndex'), ('IF-MIB', 'ifDescr')) if mibBuilder.loadTexts: hh3cDldp2TrapBidLink.setStatus('current') if mibBuilder.loadTexts: hh3cDldp2TrapBidLink.setDescription('This trap is generated when DLDP detects a bidirectional link, ifIndex and ifDescr identify the port.') mibBuilder.exportSymbols('HH3C-DLDP2-MIB', hh3cDldp2PortOperStatus=hh3cDldp2PortOperStatus, hh3cDldp2UniShutdown=hh3cDldp2UniShutdown, hh3cDldp2NeighborStatus=hh3cDldp2NeighborStatus, hh3cDldp2TrapBidLink=hh3cDldp2TrapBidLink, hh3cDldp2PortConfigTable=hh3cDldp2PortConfigTable, hh3cDldp2TrapPrefix=hh3cDldp2TrapPrefix, hh3cDldp2PortEnable=hh3cDldp2PortEnable, hh3cDldp2GlobalEnable=hh3cDldp2GlobalEnable, hh3cDldp2NeighborAgingTime=hh3cDldp2NeighborAgingTime, hh3cDldp2PortStatusEntry=hh3cDldp2PortStatusEntry, PYSNMP_MODULE_ID=hh3cDldp2, hh3cDldp2NeighborEntry=hh3cDldp2NeighborEntry, hh3cDldp2=hh3cDldp2, hh3cDldp2PortConfigEntry=hh3cDldp2PortConfigEntry, hh3cDldp2NeighborTable=hh3cDldp2NeighborTable, hh3cDldp2NeighborPortIndex=hh3cDldp2NeighborPortIndex, hh3cDldp2Trap=hh3cDldp2Trap, hh3cDldp2PortLinkStatus=hh3cDldp2PortLinkStatus, hh3cDldp2NeighborBridgeMac=hh3cDldp2NeighborBridgeMac, hh3cDldp2TrapBindObjects=hh3cDldp2TrapBindObjects, hh3cDldp2AuthMode=hh3cDldp2AuthMode, hh3cDldp2PortStatusTable=hh3cDldp2PortStatusTable, hh3cDldp2ScalarGroup=hh3cDldp2ScalarGroup, hh3cDldp2AuthPassword=hh3cDldp2AuthPassword, hh3cDldp2Interval=hh3cDldp2Interval, hh3cDldp2TableGroup=hh3cDldp2TableGroup, hh3cDldp2TrapUniLink=hh3cDldp2TrapUniLink)