query
stringlengths
9
3.4k
document
stringlengths
9
87.4k
metadata
dict
negatives
listlengths
4
101
negative_scores
listlengths
4
101
document_score
stringlengths
3
10
document_rank
stringclasses
102 values
Initialize this game state and set the current player based on is_p1_turn.
def __init__(self, is_p1_turn: bool, side_length: int) -> None: super().__init__(is_p1_turn) self.side_length = side_length # ISSUE: what if node is more than 26 --> no need to handle side more than 5 # construct a list of uppercase and lower case letters alph_lst_upper = list(string.ascii_uppercase) alph_lst_lower = list(string.ascii_lowercase) # alph_lst has a length of 52 alph_lst = alph_lst_upper + alph_lst_lower # assign original value for each ley-line hori_result = [] for i in range(side_length + 1): hori_result.append("@") left_result = [] for i in range(side_length + 1): left_result.append("@") right_result = [] for i in range(side_length + 1): right_result.append("@") self.hori_result = hori_result self.left_result = left_result self.right_result = right_result self.hori_lst = [] self.left_lst = [] self.right_lst = [] # construct horizontal ley-lines n = 2 start_index = 0 end_index = 0 while n <= side_length + 1: end_index = start_index + n self.hori_lst.append(alph_lst[start_index:end_index]) start_index = end_index n += 1 end_index = start_index + side_length self.hori_lst.append(alph_lst[start_index:end_index]) # copy hori_lst hori_copy = [] for item in self.hori_lst: hori_copy.append(item) # construct left ley-lines for i in range(side_length + 1): temp = [] for lst in hori_copy[:len(hori_copy) - 1]: if len(lst) > i: temp.append(lst[i]) self.left_lst.append(temp) for i in range(1, side_length + 1): self.left_lst[i].append(hori_copy[-1][i - 1]) # construct right ley-lines for i in range(-1, side_length * (-1) - 2, -1): temp = [] for lst in hori_copy[:len(hori_copy) - 1]: if len(lst) >= i * (-1): temp.append(lst[i]) self.right_lst.append(temp) self.right_lst = self.right_lst[::-1] for i in range(side_length): self.right_lst[i].append(hori_copy[-1][i])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_first_player(self):\n if self.player2.won_previous:\n self.current_player = self.player2\n else: self.current_player = self.player1", "def __init__(self, is_p1_turn, current_total):\n self.p1_turn = is_p1_turn\n self.current_total = current_total", "def init_turn(...
[ "0.69196415", "0.69193554", "0.65942466", "0.65815157", "0.65624744", "0.6533628", "0.6411285", "0.6408902", "0.63886917", "0.63440305", "0.62498075", "0.6223506", "0.6214494", "0.6212468", "0.6181791", "0.61318034", "0.61269957", "0.6113165", "0.6105516", "0.60997057", "0.60...
0.0
-1
Return a string representation of the current state of the game.
def __str__(self) -> str: side = self.side_length hori_lst = self.hori_lst hori_result = self.hori_result left_lst = self.left_lst left_result = self.left_result right_lst = self.right_lst right_result = self.right_result total_line='' for i in range(2 * side + 5): # empty line string line = '' if i % 2 == 0: lineindex = int(i / 2) if lineindex <= side: # get the first 2 left result if lineindex == 0: # print('first line') for ia in range(3*(side+1)): line += ' ' line += left_result[0] line += ' ' line += left_result[1] # general case of combing the results and list together else: if lineindex == side: line += ' ' for ib in range(side - lineindex): line += ' ' line += hori_result[lineindex - 1] for ic in range(len(hori_lst[lineindex - 1])): line += ' - ' line += hori_lst[lineindex - 1][ic] if lineindex != side: line += ' ' line += left_result[lineindex + 1] else: if lineindex == side + 1: # for id in range(): line += ' ' line += hori_result[side] for ie in range(side): line += ' - ' line += hori_lst[side][ie] line += ' ' line += right_result[side] else: # print the last row for all other right resutls # print('right results') for ig in range(9): line += ' ' for ih in range(side): line += right_result[ih] line += ' ' total_line += line + '\n' else: # print stuff for the '/' lineindex2 = int(i / 2) if lineindex2 == 0: for iA in range(3*side+1): line += ' ' line += ' / ' line += ' ' line += " / " elif lineindex2 < side: for iA in range(3 * (1 + side - lineindex2)): line += ' ' # print('lineindex2: '+str(lineindex2)+' '+str(3*(1+side-lineindex2))) for iB in range(lineindex2 + 1): line += '/ \\ ' line += '/' elif lineindex2 == side: #for iC in range(side+1): line += ' ' for iD in range(side): line += '\\ / ' line += '\\' elif lineindex2 == side + 1: for iE in range(8): line += ' ' for iG in range(side): line += '\\ ' total_line += line + '\n' return total_line
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def stateString(self):\n return self._mdp.stateString(self._cur_state);", "def __str__(self):\n s=\"\"\n for y in range(0,HEIGHT):\n for x in range(0,WIDTH):\n s+=str(self.gameState[x,y])\n return s", "def __str__(self):\n s = \"\"\n for r in ...
[ "0.83233804", "0.81567377", "0.8026721", "0.7843948", "0.78436744", "0.7827769", "0.775942", "0.77437556", "0.76473355", "0.7591848", "0.7476206", "0.7409211", "0.7405296", "0.7345735", "0.7345735", "0.7345735", "0.7345735", "0.7345735", "0.7345735", "0.7345735", "0.7345735",...
0.0
-1
Return all possible moves that can be applied to this state.
def get_possible_moves(self) -> list: result = [] for lst in self.hori_lst: for item in lst: if item.isalpha(): result.append(item) # add nodes to result if it's not taken and its line is not taken # for i in range(len(self.hori_lst)): # if not self.hori_result[i].isdigit(): # for item in self.hori_lst[i]: # if not item.isdigit(): # result.append(item) # # remove the node from result if its line has been taken # for i in range(len(self.left_lst)): # if self.left_result[i].isdigit(): # for item in self.left_lst[i]: # if item in result: # result.remove(item) # # remove the node from result if its line has been taken # for i in range(len(self.right_lst)): # if self.right_result[i].isdigit(): # for item in self.right_lst[i]: # if item in result: # result.remove(item) return result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def possible_moves(self):\n states = []\n possible_floors = self.possible_floors()\n possible_items = self.possible_items()\n\n for fl in possible_floors:\n for items in possible_items:\n new_floors = deepcopy(self.floors)\n for item in items:\n ...
[ "0.7898512", "0.7898361", "0.7858664", "0.7847656", "0.77333015", "0.7536234", "0.7532586", "0.7498494", "0.7485024", "0.73975164", "0.7325457", "0.7318799", "0.7273196", "0.717939", "0.71593887", "0.7139039", "0.71370745", "0.7127056", "0.71068263", "0.70893526", "0.70703286...
0.6583351
68
Return 'p1' if the current player is Player 1, and 'p2' if the current player is Player 2.
def get_current_player_name(self) -> str: if self.p1_turn: return 'p1' return 'p2'
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def other_player(cls, player):\n return 0 if player == 1 else 1", "def checkval(self, P1, P2, winningval):\n if P1 == winningval:\n return \"Player 1\"\n elif P2 == winningval:\n return \"Player 2\"", "def get_current_player(player_one_turn):\n \n # Get appropri...
[ "0.7322427", "0.719487", "0.71915984", "0.71570975", "0.7128614", "0.7037134", "0.7009901", "0.69278514", "0.6833396", "0.6795355", "0.6771366", "0.6737287", "0.64921343", "0.6477016", "0.63291126", "0.63171285", "0.62973", "0.62973", "0.62973", "0.62903047", "0.6243002", "...
0.73625857
0
Return the GameState that results from applying move to this GameState.
def make_move(self, move: Any) -> 'StonehengeState': if type(move) == str: new_state = StonehengeState(not self.p1_turn, self.side_length) # copy the board information from current state # make copy of current state information hori_lst_copy = [] for lst in self.hori_lst: temp = [] for item in lst: temp.append(item) hori_lst_copy.append(temp) left_lst_copy = [] for lst in self.left_lst: temp = [] for item in lst: temp.append(item) left_lst_copy.append(temp) right_lst_copy = [] for lst in self.right_lst: temp = [] for item in lst: temp.append(item) right_lst_copy.append(temp) hori_result_copy = [] for item in self.hori_result: hori_result_copy.append(item) left_result_copy = [] for item in self.left_result: left_result_copy.append(item) right_result_copy = [] for item in self.right_result: right_result_copy.append(item) new_state.hori_lst = hori_lst_copy new_state.hori_result = hori_result_copy new_state.left_lst = left_lst_copy new_state.left_result = left_result_copy new_state.right_lst = right_lst_copy new_state.right_result = right_result_copy # update the new state with str move # parallel nested list data structure lst = [new_state.hori_lst, new_state.left_lst, new_state.right_lst] result = [new_state.hori_result, new_state.left_result, new_state.right_result] # update the cell for i in range(len(lst)): for j in range(len(lst[i])): for k in range(len(lst[i][j])): if lst[i][j][k] == move: # should use the player name of last state, so opposite names if new_state.p1_turn: lst[i][j][k] = "2" else: lst[i][j][k] = "1" # update ley-line marks # the ley-line may belong to a player after this move p1_taken = 0 p2_taken = 0 if result[i][j] != "@": continue for item in lst[i][j]: if item == "1": p1_taken += 1 if item == "2": p2_taken += 1 if float(p1_taken) >= len(lst[i][j]) / 2: result[i][j] = "1" if float(p2_taken) >= len(lst[i][j]) / 2: result[i][j] = "2" ###### CHECK FOR SHALLOW COPY PROBLEM, IF ATTRIBUTE IS UPDATE IN NEW STATE return new_state
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_move(self, game_state: BotGameState) -> BotMove:\n return", "def result(self, board_state, move):\n # Create a copy of the current board state\n output_state = BoardState(other_state=board_state)\n # Swap pieces\n output_state.move_piece(move)\n # Eliminate piece...
[ "0.7312155", "0.72799444", "0.7227082", "0.71650064", "0.7120976", "0.6810444", "0.67236984", "0.66773134", "0.66662157", "0.66595423", "0.6642064", "0.6626734", "0.65865046", "0.65865046", "0.65685004", "0.65537447", "0.6546836", "0.65373135", "0.653719", "0.6526493", "0.652...
0.0
-1
Return whether move is a valid move for this GameState.
def is_valid_move(self, move: Any) -> bool: return move in self.get_possible_moves()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_valid_move(self, move):\n if self.game_state[move[0]][move[1]] is not None:\n return False\n return True", "def valid_bool(self):\n return bool(self.piece.validate_move(self.board, self))", "def is_valid_move(self, move):\n if type(move) == str:\n move...
[ "0.8282215", "0.8074329", "0.80526775", "0.7981014", "0.78986144", "0.78754056", "0.7871537", "0.78681695", "0.7756859", "0.7713244", "0.7703341", "0.76398927", "0.7610206", "0.75776756", "0.75525403", "0.75510347", "0.7432144", "0.73722756", "0.7332168", "0.72985274", "0.727...
0.8471639
0
Return a representation of this state (which can be used for equality testing).
def __repr__(self) -> Any: game_board = self.__str__() + "\n" current_player_info = "Is p1 the current player? " + str(self.p1_turn) result = game_board + current_player_info return result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def state(self):\n return str(self)", "def state(self):\r\n return str(self)", "def state(self):\n\n\t\treturn str(self)", "def __repr__( self ):\n\n return self.__class__.__name__ + \"( \" + repr(self.state) + \")\";", "def __repr__(self):\r\n r = str(self.current_instance_state())...
[ "0.81590044", "0.8067545", "0.7992787", "0.76251036", "0.75809264", "0.7424345", "0.7290553", "0.7284625", "0.7240699", "0.7135407", "0.7102532", "0.70908487", "0.7075042", "0.7063691", "0.7046428", "0.7046428", "0.7046428", "0.7046428", "0.7046428", "0.6999811", "0.6971935",...
0.0
-1
Return an estimate in interval [LOSE, WIN] of best outcome the current player can guarantee from state self.
def rough_outcome(self) -> float: # HUYNH YOU PRICK WHY THE FUCK DO YOU MAKE US WRITE THIS SHIT EVEN IT'S NOT USED ANYWHERE # pick move based on this may not be optimal but better than random # return 1 if win immediately # return -1 if all states reachable will result the other player win # return 0 if otherwise ??? what the fuck does this mean # look two states forward pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def rough_outcome(self) -> float:\n if is_win(self):\n return 1\n elif is_lose(self):\n return -1\n return 0", "def rough_outcome_strategy(game: Any) -> Any:\n current_state = game.current_state\n best_move = None\n best_outcome = -2 # Temporarily -- just so w...
[ "0.7338502", "0.7059583", "0.7059583", "0.7059583", "0.7020939", "0.6829376", "0.6702647", "0.6694379", "0.66560304", "0.6624065", "0.6616302", "0.6588654", "0.6566183", "0.6555869", "0.6547012", "0.6531186", "0.6518109", "0.6501664", "0.6488798", "0.6458663", "0.6427033", ...
0.73625016
0
Initialize this Game, using p1_starts to find who the first player is.
def __init__(self, p1_starts: bool) -> None: side_length = int(input("Enter the side length of the board: ")) self.current_state = StonehengeState(p1_starts, side_length)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def start(self):\n self.__init__()\n self.set_n_players()\n self.init_players()\n self.init_territory_selection_phase()\n self.init_troop_deployment_phase()\n # self.game_phase()", "def initGame(self):\n self.map = {}\n self.blocks = Group()\n self.C...
[ "0.6787327", "0.64845306", "0.64357686", "0.6349507", "0.62722546", "0.626561", "0.6244119", "0.61926365", "0.61749655", "0.61128175", "0.6096133", "0.6081823", "0.606986", "0.6065711", "0.6050611", "0.5989051", "0.59888893", "0.59368724", "0.592886", "0.5927789", "0.59158814...
0.5549503
75
Return the instructions for this Game.
def get_instructions(self) -> str: instruction = "Duck need to fill this blank _____, which I have no idea what it is #$%^&*" return instruction
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_instructions(self) -> str:\n instructions = \"Players take turns to occupy available positions \" \\\n \"on the \" \\\n \"board. Once half or more of a ley-line has been \" \\\n \"occupied\" \\\n \"one player, th...
[ "0.77113837", "0.7536874", "0.74031675", "0.6759023", "0.6670727", "0.6315557", "0.5983526", "0.5832342", "0.5808417", "0.57887095", "0.5746708", "0.5729109", "0.56965", "0.5680344", "0.5596628", "0.55811346", "0.55728334", "0.5472555", "0.5458725", "0.54577094", "0.5449291",...
0.66112
5
Return whether or not this game is over at state.
def is_over(self, state: StonehengeState) -> bool: total_result = state.hori_result + state.left_result + state.right_result total_line = len(total_result) p1_taken = 0 p2_taken = 0 # all_taken = True for item in total_result: if item == '1': p1_taken+=1 elif item =='2': p2_taken += 1 # else: # all_taken = False # print('p1 taken:' + str(p1_taken)) # print('p2 taken:' + str(p2_taken)) # print('p1_taken more than half?') # print(float(p1_taken) >= total_line/2) # print('p2_taken more than half?') # print(float(p2_taken) >= total_line/2) return float(p1_taken) >= total_line/2 or float(p2_taken) >= total_line/2
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def game_over(self) -> bool:\n return self.rstate.game_over()", "def is_over(self):\n return self.game.is_over()", "def is_game_over(self) -> bool:\n return self._is_game_over", "def game_over(self):\n # TODO: Define the game over condition for Adventure.\n # use self.over ...
[ "0.8707897", "0.8566029", "0.8421653", "0.8119031", "0.8112403", "0.81087774", "0.80789787", "0.79621136", "0.7939655", "0.78865045", "0.7864895", "0.7863609", "0.784667", "0.7836293", "0.78253675", "0.7812383", "0.76985806", "0.7698057", "0.7644452", "0.76256573", "0.7624914...
0.67353415
64
Return whether player has won the game.
def is_winner(self, player: str) -> bool: total_result = self.current_state.hori_result + self.current_state.left_result + self.current_state.right_result total_line = len(total_result) p1_taken = 0 p2_taken = 0 for item in total_result: if item == '1': p1_taken+=1 elif item == '2': p2_taken += 1 if player == "p1": return float(p1_taken) >= total_line/2 return float(p2_taken) >= total_line/2
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_game_won(self):\n return True", "def has_won(board, player):\r\n return False", "def has_won(board, player):\n return False", "def player_has_won(self):\n return len(self._words_guessed) == self._num_words", "def is_game_won(self):\n if self.game_is_tied():\n re...
[ "0.8601931", "0.8356279", "0.83288485", "0.81141126", "0.80790204", "0.78687596", "0.78639466", "0.7839697", "0.7838136", "0.76941895", "0.7626411", "0.75731826", "0.754925", "0.74468017", "0.74358803", "0.74049085", "0.7379903", "0.7379802", "0.7361152", "0.7361038", "0.7336...
0.69783485
42
Return the move that string represents. If string is not a move, return some invalid move.
def str_to_move(self, str1: str) -> Any: if not str1.strip().isalpha(): return -1 return str1.strip()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def str_to_move(self, string):\n if not string.strip().isalpha():\n return -1\n return string.strip()", "def str_to_move(self, string):\n if not string.strip().isdigit():\n return -1\n\n return int(string.strip())", "def get_move() -> str:\n msg = 'Enter a m...
[ "0.7124746", "0.7086057", "0.6942887", "0.6783822", "0.65344757", "0.6471586", "0.64489573", "0.6421416", "0.63550556", "0.62944734", "0.6270465", "0.6264515", "0.6182051", "0.60790235", "0.60481", "0.60470694", "0.60350966", "0.601529", "0.5998565", "0.5966165", "0.59147376"...
0.65770066
4
Test that the transform function works for binary data
def test_CredL2_1b1_transform_smoketest_transformer_stack_binary(self): np.random.seed(2525) Xdata = np.random.choice(np.array(['A', 'B', 'C', 'D']), size=(100, 5), p=[0.5, 0.3, 0.1, 0.1]) orig_columns = ['ColA', 'ColB', 'ColC', 'ColD', 'ColE'] Xdf = pd.DataFrame(Xdata, columns=orig_columns) Container_Xdf = Container(Xdf) np.random.seed(2525) yData = np.random.choice([0, 1], size=(100, 1), p=[0.9, 0.1]).ravel() conv = CredL2_1b1('rgl_C=[0.01,0.1];cmin=4;i2w=1') Z = Partition(size=100, folds=5, reps=5) Z.set(max_reps=1, max_folds=0) for weight in [False, True]: if weight: Container_Xdf.initialize({'weight': pandas.Series(np.ones(100))}) out = conv.fit(Container_Xdf, yData, Z) self.assertIsInstance(out, CredL2_1b1) out = conv.transform(Container_Xdf, yData, Z) self.assertIsInstance(out, Container) p = {'k': -1, 'r': 0} self.assertEqual(out(**p).shape[0], Xdf.shape[0]) self.assertEqual(out(**p).shape[1], 15) self.assertEqual(out.colnames(**p), ['DR_cred_' + i for i in orig_columns] + [ 'DR_cred_ColA_XX_ColB', 'DR_cred_ColA_XX_ColC', 'DR_cred_ColA_XX_ColD', 'DR_cred_ColA_XX_ColE', 'DR_cred_ColB_XX_ColC', 'DR_cred_ColB_XX_ColD', 'DR_cred_ColB_XX_ColE', 'DR_cred_ColC_XX_ColD', 'DR_cred_ColC_XX_ColE', 'DR_cred_ColD_XX_ColE']) out = conv.transformer_stack(Container_Xdf, yData, Z) self.assertIsInstance(out, Container) for p in Z: self.assertEqual(out(**p).shape[0], Xdf.shape[0]) self.assertEqual(out(**p).shape[1], 15) self.assertEqual(out.colnames(**p), ['DR_cred_' + i for i in orig_columns] + [ 'DR_cred_ColA_XX_ColB', 'DR_cred_ColA_XX_ColC', 'DR_cred_ColA_XX_ColD', 'DR_cred_ColA_XX_ColE', 'DR_cred_ColB_XX_ColC', 'DR_cred_ColB_XX_ColD', 'DR_cred_ColB_XX_ColE', 'DR_cred_ColC_XX_ColD', 'DR_cred_ColC_XX_ColE', 'DR_cred_ColD_XX_ColE']) if weight: self.assertEqual(np.all(out.get('weight') == np.ones(100)), True)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_transform(self):\n t = OneHotEncode(3)\n assert numpy.all(t.transform(0) == numpy.array((1.0, 0.0, 0.0)))\n assert numpy.all(t.transform(1) == numpy.array((0.0, 1.0, 0.0)))\n assert numpy.all(t.transform(2) == numpy.array((0.0, 0.0, 1.0)))\n with pytest.raises(AssertionE...
[ "0.68057483", "0.65271544", "0.64854556", "0.6451207", "0.637159", "0.6202632", "0.6188656", "0.6060267", "0.60558635", "0.60416764", "0.60387063", "0.60317004", "0.6017776", "0.59574693", "0.59446365", "0.59305394", "0.590178", "0.5877073", "0.5847465", "0.5844245", "0.58187...
0.0
-1
Test that the tranform functions functions for regression data
def test_CredL2_1b1_smoketest_transformer_stack_regression(self): np.random.seed(2525) Xdata = np.random.choice(np.array(['A', 'B', 'C', 'D']), size=(100, 5), p=[0.5, 0.3, 0.1, 0.1]) orig_columns = ['ColA', 'ColB', 'ColC', 'ColD', 'ColE'] Xdf = pd.DataFrame(Xdata, columns=orig_columns) Container_Xdf = Container(Xdf) np.random.seed(2525) yData = np.random.choice([0, 1, 2, 3, 4, 5, 6, 7], size=100, p =[0.7/2.22, 0.5/2.22, 0.35/2.22, 0.25/2.22, 0.17/2.22, 0.12/2.22, 0.08/2.22, 0.05/2.22]) conv = CredL2_1b1('rgl_a=[0.01,0.1];cmin=4') Z = Partition(size=100, folds=5, reps=5) Z.set(max_reps=1, max_folds=0) for weight in [False, True]: if weight: Container_Xdf.initialize({'weight': pandas.Series(np.ones(100))}) Container_Xdf.initialize({'weight': pandas.Series(np.ones(100))}) out = conv.fit(Container_Xdf, yData, Z) self.assertIsInstance(out, CredL2_1b1) out = conv.transform(Container_Xdf, yData, Z) self.assertIsInstance(out, Container) for p in Z: self.assertEqual(out(**p).shape[0], Xdf.shape[0]) self.assertEqual(out(**p).shape[1], len(orig_columns)) self.assertEqual(out.colnames(**p), ['DR_cred_' + i for i in orig_columns]) out = conv.transformer_stack(Container_Xdf, yData, Z) self.assertIsInstance(out, Container) for p in Z: self.assertEqual(out(**p).shape[0], Xdf.shape[0]) self.assertEqual(out(**p).shape[1], len(orig_columns)) self.assertEqual(out.colnames(**p), ['DR_cred_' + i for i in orig_columns]) if weight: self.assertEqual(np.all(out.get('weight') == np.ones(100)), True)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_transform_interface_repr(example_tsds: TSDataset) -> None:\n trend_transform = TrendTransform(in_column=\"target\", detrend_model=LinearRegression(), model=\"rbf\")\n out_column = f\"regressor_{trend_transform.__repr__()}\"\n result = trend_transform.fit_transform(example_tsds.df)\n for seg in...
[ "0.64185077", "0.6215986", "0.6215151", "0.6212859", "0.6164075", "0.60881317", "0.6073903", "0.60206157", "0.60000205", "0.5982403", "0.5937053", "0.5922052", "0.59196836", "0.5906585", "0.58948827", "0.5866362", "0.5859663", "0.5814201", "0.5783901", "0.57772166", "0.577146...
0.0
-1
Reference values taken from NIST data base
def test_transport_sanity(self): T = 400 cv_mole, W = 21005.045895231186, 28.014 species_name = "N2" data = ct_properties.ctThermoTransport("gri30.cti", verbose=False) data.evaluate_properties() i = data.gas.species_index(species_name) As, Ts, _, poly_mu, poly_kappa, log_poly_mu, log_poly_kappa= ct2foam_utils.fit_ct_transport(data) mu_s = tr_fitter.sutherland(T, As[i], Ts[i]) kappa_s=tr_fitter.euken(mu_s, cv_mole, W, R) mu_logp, kappa_logp = tr_fitter.eval_log_polynomial(log_poly_mu[i,:], log_poly_kappa[i,:], T) mu_p, kappa_p = tr_fitter.eval_polynomial(poly_mu[i,:], poly_kappa[i,:], T) # rough test whether they are in the same scale... mu_ref = 2.2217e-5 kappa_ref = 0.032205 self.assertTrue(np.abs(mu_s-mu_ref)/np.abs(mu_ref) < 0.07) self.assertTrue(np.abs(mu_p-mu_ref)/np.abs(mu_ref) < 0.01) self.assertTrue(np.abs(mu_logp-mu_ref)/np.abs(mu_ref) < 0.01) self.assertTrue(np.abs(kappa_s-kappa_ref)/np.abs(kappa_ref) < 0.05) self.assertTrue(np.abs(kappa_p-kappa_ref)/np.abs(kappa_ref) < 0.05) self.assertTrue(np.abs(kappa_logp-kappa_ref)/np.abs(kappa_ref) < 0.05)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def referenzdaten_einlesen(self):\n self.refdata = np.genfromtxt(self.referencefile, skip_header=1, usecols=np.asarray(self.referenzspalte))\n self.Referencedata = Reference()", "def prep_reference(self):\n\n # if basin\n if self.config.metric == 'basin':\n df = pd.read_csv...
[ "0.5812446", "0.57540804", "0.5708854", "0.56447667", "0.56308633", "0.5510093", "0.5462862", "0.54256", "0.53346974", "0.5308915", "0.52702427", "0.52666277", "0.52629995", "0.52512944", "0.52163595", "0.52109677", "0.5196691", "0.519309", "0.51905435", "0.51647764", "0.5161...
0.0
-1
This function help to build data that we need to train for CharCNN
def build_dataset(self, data_path, test_size): dataset, label_dataset = self.load_dataset(data_path) # shuffle dataset, label_dataset = shuffle(dataset, label_dataset, random_state = 2111) # split data size = int(len(dataset) * (1 - test_size)) self.x_train = dataset[:size] self.x_val = dataset[size:] self.y_train = np.array(label_dataset[:size]) self.y_val = np.array(label_dataset[size:]) self.vocab_size = len(self.x_train) # build tokenizer self.tokenizer = self.build_tokenizer(self.x_train, self.vocab_size) # Saving Tokenizer print('=============Saving Tokenizer================') print('Begin...') if not os.path.exists(self.vocab_folder): try: os.makedirs(self.vocab_folder) except OSError as e: raise IOError("Failed to create folders") tokenizer_json = self.tokenizer.to_json() with io.open(self.save_tokenizer_path, 'w', encoding= 'utf-8') as f: f.write(json.dumps(tokenizer_json, ensure_ascii= False)) print('Done!!!') # Saving label dict with open('label.json', 'w') as f: json.dump(self.label_dict, f) # get max_len self.max_len = self.get_max_len(self.x_train) # tokenizing self.x_train = np.array(self.tokenize(self.tokenizer, self.x_train, self.max_len)) self.x_val = np.array(self.tokenize(self.tokenizer,self.x_val, self.max_len)) return self.x_train, self.x_val, self.y_train, self.y_val
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def build_dataset(self):\n print(\"reading data of images currently , please wait......\")\n x_train, y_train, _ = get_images(self.train_directory)\n x_test, y_test, _ = get_images(self.test_directory)\n x_train, y_train = image_subset(self.num_classes, x_train, y_train)\n x_test...
[ "0.68234545", "0.6755744", "0.6743118", "0.6716415", "0.67143404", "0.6676263", "0.66579074", "0.65451914", "0.6450987", "0.6438156", "0.639055", "0.6383469", "0.6370643", "0.63556325", "0.63493556", "0.63293546", "0.6261715", "0.62448275", "0.6217389", "0.621433", "0.6206522...
0.61662716
23
Set common fields in layer to addressing dictonary.
def set_address_values(layer): cursor = arcpy.SearchCursor(layer) for row in cursor: layer_fields = arcpy.ListFields(layer) for x in range(len(layer_fields)): layer_fields[x] = layer_fields[x].name for key in address_dict: if key in layer_fields and address_dict.get(key) is None: address_dict[key] = row.getValue(key)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_attrs(self, **kwargs) -> None:\n self._obj.coords[GEO_MAP_COORD].attrs.update(**kwargs)", "def update_asop_dict(asop_dict,region,coords,color,all_settings):\n # Set unique color\n asop_dict['color'] = color\n\n # Apply any general user settings\n asop_dict['grid_desc'] = all_settings.g...
[ "0.57628345", "0.55964243", "0.55274314", "0.5520741", "0.5513947", "0.54532826", "0.54528964", "0.5352825", "0.53380233", "0.52978295", "0.52767116", "0.5263793", "0.5233308", "0.52296996", "0.51820993", "0.51604235", "0.51503444", "0.51213574", "0.5110379", "0.51047695", "0...
0.6680822
0
Only function in class.
def __init__(self, selected_address, side, ST_PREFIX, axis): if ST_PREFIX == "W" or ST_PREFIX == "E": self.axis_val = int(axis[0]) self.axis_dir = axis[2] self.Neighbor_GRID_Val = str(self.axis_val - 1000) self.axis_val_field = "AxisX_Val" self.axis_dir_field = "AxisX_Dir" else: self.axis_val = int(axis[1]) self.axis_dir = axis[3] self.Neighbor_GRID_Val = str(self.axis_val - 1000) self.axis_val_field = "AxisY_Val" self.axis_dir_field = "AxisY_Dir" self.selection = ( "{0}='{1}' AND {2}='{3}'".format( self.axis_val_field, self.Neighbor_GRID_Val, self.axis_dir_field, ST_PREFIX ) ) arcpy.SelectLayerByAttribute_management( "Addressing_Grid", "NEW_SELECTION", self.selection ) arcpy.CopyFeatures_management( "Addressing_Grid", "neighbor_grid" ) self.near_table = arcpy.GenerateNearTable_analysis( in_features=selected_address, near_features="neighbor_grid", out_table="Near_Table" ) self.cursor = arcpy.SearchCursor(self.near_table) Address_Dist = 0 for row in self.cursor: Address_Dist = int((row.getValue("NEAR_DIST") / 5280) * 1000) if side == "E" or side == "N": EorO_1 = "O" else: EorO_1 = "E" if (Address_Dist % 2) == 0: EorO_2 = "E" else: EorO_2 = "O" if EorO_1 == EorO_2: pass else: Address_Dist = (Address_Dist + 1) self.Address_Dist = Address_Dist self.HOUSENUM = self.axis_val + self.Address_Dist
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def func_in_class():\n return \"just a function hanging out in a class\"", "def someMethod (self):\n pass", "def __call__(self):\n pass", "def __call__(self):\n pass", "def independent_function():\n print(\"calling a function in exampleclass.py file that deos not belong to a ...
[ "0.7975919", "0.6815057", "0.6792388", "0.6792388", "0.67780167", "0.6729474", "0.6686155", "0.6603621", "0.6547703", "0.6465931", "0.6423527", "0.639462", "0.6383629", "0.6363027", "0.6363027", "0.6363027", "0.6363027", "0.6363027", "0.63594645", "0.63594645", "0.63387555", ...
0.0
-1
Get AWS ECS task information. For the puspose of getting the EC2 instance id by a given AWS ECS task name, for now, only the 'containerInstanceArn' is fetched from the AWS ECS task.
def get_tasks_information( task: str, list_tasks: str, cluster=CLUSTER_NAME, client=None, region=REGION, ): if not client: session = boto3.session.Session() client = session.client("ecs", region) try: # Get all tasks in the cluster. cluster_tasks = client.list_tasks(cluster=cluster)["taskArns"] logger.debug(f"[CLUSTERTASKS]: '{cluster_tasks}'.") tasks = client.describe_tasks(cluster=cluster, tasks=cluster_tasks)[ "tasks" ] logger.debug(f"[TASKS]: '{tasks}'.") # Filter for given task name. # Get instance id, container_instances = [] task_name = "" for task_ in tasks: task_definition = task_.get("taskDefinitionArn", "") if list_tasks: container_instances.append(task_definition) continue container_instance_arn = task_.get("containerInstanceArn", None) if container_instance_arn: if not list_tasks: if re.search(task, task_definition): container_instances.append(container_instance_arn) task_name = task_definition break else: container_instances.append(container_instance_arn) if list_tasks: return "\n".join(container_instances) instances = describe_instances_with_cluster( container_instances=container_instances, cluster=cluster, client=client, region=region, ) if not instances: return "" logger.info(f"Instance '{instances[0]}' runs task '{task_name}'.") return instances[0] except (botocore.exceptions.ClientError) as e: # TODO: Check right error code. if e.response["Error"]["Code"] == "ClusterNotFoundException": logger.error(f"Cluster '{cluster}' not found: {str(e)}.") else: logger.error(f"Error: {str(e)}") sys.exit(1)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get(profile, cluster, tasks):\n client = boto3client.get(\"ecs\", profile)\n params = {}\n params[\"cluster\"] = cluster\n params[\"tasks\"] = tasks\n return client.describe_tasks(**params)", "def get_task_details(self) -> task.TaskMetadata:\n return task.TaskMetadata(\n name...
[ "0.6348864", "0.598766", "0.5797471", "0.5780299", "0.57383114", "0.56886894", "0.56886894", "0.56886894", "0.56886894", "0.5631942", "0.55174756", "0.5450972", "0.5419298", "0.54183954", "0.54114175", "0.5400899", "0.53903484", "0.53873485", "0.5380392", "0.53680474", "0.536...
0.68460375
0
Provide cli arguments. When used as executable script with command line options.
def main(): # import aws_ecs_services.arguments as arguments from .arguments import get_cli_arguments # args = arguments.get_cli_arguments() args = get_cli_arguments() by_service_dns = False by_service_name = False by_task_name = False list_clusters = False only_cluster_instances = False only_ec2_instances = False list_running_services = False list_running_tasks = False list_services = False list_projects = False use_config = False debug = args.debug if debug: logger.setLevel(logging.DEBUG) logger.debug("Show DEBUG information.") stream_handler = logging.StreamHandler(sys.stdout) formatter = logging.Formatter(f"%(lineno)s: {logging.BASIC_FORMAT}") stream_handler.setFormatter(formatter) logger.addHandler(stream_handler) logger.propagate = False else: logger.setLevel(logging.INFO) # If a configuration file and a project are given,the configruation file is used. # Otherwise the cli ooptions are considerd. project = args.project # Variable replacement in config file uses '{service}'. service = args.service config = args.config if ( os.path.exists(config) and project or args.subcommand in ("list-configured-projects", "list-configured-services") ): logger.info(f"Loading config from: '{config}'.") if not os.path.exists(config): logger.error(f"No config file: '{config}'.") return 1 use_config = True if use_config: data = None try: with open(config, "r") as config_file: data = json.load(config_file) except (ValueError) as e: logger.error( f"Check the JSON sytanx in the config file '{config}': '{str(e)}'" ) return 1 logger.debug(f"Data: {data}") if not data or not isinstance(data, dict): logger.error(f"Could not load configuration: '{data}'.") return 1 if use_config: region = data.get("region", args.region) else: region = args.region if use_config: projects = data.get("projects", {}) if args.subcommand not in ("list-configured-projects"): if project not in projects: logger.error( f"Missing configuration for project: '{project}'. Choose from {list(projects.keys())}." ) return 1 project_config = projects.get(project, None) if not project_config: logger.error( f"Missing configuration for project: '{project}'. Choose from {list(projects.keys())}." ) return 1 region = project_config.get("region", region) cluster_name = project_config.get("cluster", "") # Variable replacement in config file uses '{cluster}'. cluster = cluster_name cluster_ = cluster # Get service-specific configuration. services = project_config.get("services", {}) service_config = None if services: service_config = services.get(service, None) logger.debug(f"Service config: {service_config}") if service_config: cluster_ = service_config.get("cluster", cluster_name) cluster_name = replace_config(cluster_, "cluster", locals()) else: cluster_name = args.cluster logger.info(f"Working in: {region}") session = boto3.session.Session() ecs_client = session.client("ecs", region) ec2_client = session.client("ec2", region) ssm_client = session.client("ssm", region) if args.subcommand == "by-service-dns": by_service_dns = True if use_config: service_dns = project_config.get("dns", "") service_dns_ = service_dns if service_config: service_dns_ = service_config.get("dns", service_dns) service_dns = replace_config(service_dns_, "service_dns", locals()) else: service_dns = args.dns if not service_dns: logger.error(f"DNS name missing.") return 1 output_info = args.output elif args.subcommand == "by-service-name": by_service_name = True if use_config: service_name = project_config.get("name", "") service_name_ = service_name if service_config: service_name_ = service_config.get("name", service_name) service_name = replace_config( service_name_, "service_name", locals() ) service_name = service_name if service_name else service else: service_name = args.name elif args.subcommand == "by-task-name": by_task_name = True if use_config: task_name = project_config.get("name", "") task_name_ = task_name if service_config: task_name_ = service_config.get("name", task_name) task_name = replace_config(task_name_, "task_name", locals()) task_name = task_name if task_name else service else: task_name = args.name elif args.subcommand == "list-ec2-instances": only_ec2_instances = True elif args.subcommand == "list-clusters": list_clusters = True elif args.subcommand == "list-instances": only_cluster_instances = True elif args.subcommand == "list-services": list_running_services = True service_name = None elif args.subcommand == "list-tasks": list_running_tasks = True task_name = None elif args.subcommand == "list-configured-services": list_services = True service_name = None elif args.subcommand == "list-configured-projects": list_projects = True service_name = None if list_projects: if not use_config: logger.error("Only available when using a configuration file.") return 1 if not projects: logger.error( "Could not load projects from configuration file: '{config}'." ) return 1 print(f"Found in {config}.") print(*list(projects.keys()), sep="\n") return # No 'cluster' necessary for 'list-clusters'. if not list_clusters and not only_ec2_instances and not cluster_name: logger.error(f"Cluster name missing.") return 1 if list_services: if not use_config: logger.error("Only available when using a configuration file.") return 1 if not services: logger.error( "Could not load services from configuration file: '{config}'." ) return 1 print(f"Found in {config}.") print(*services, sep="\n") return elif only_ec2_instances: instances = get_instances_form_ec2(client=ec2_client) print(json.dumps(instances)) return elif list_clusters: clusters = get_clusters(client=ecs_client) print("\n".join(clusters)) return elif only_cluster_instances: logger.info(f"Checking cluster: {cluster_name}") instance_ids = get_instance_ids_from_cluster( cluster=cluster_name, client=ecs_client ) print(" ".join(instance_ids)) return elif by_service_name or list_running_services: logger.info(f"Checking cluster: {cluster_name}") instance_ids = get_instance_ids_from_cluster( cluster=cluster_name, client=ecs_client ) instance_id = get_instance_id_by_service_name( instance_ids=instance_ids, service=service_name, list_services=list_running_services, client=ssm_client, region=region, ) return elif by_task_name or list_running_tasks: logger.info(f"Checking cluster: {cluster_name}") instance_ids = get_tasks_information( task=task_name, list_tasks=list_running_tasks, cluster=cluster_name, client=ecs_client, ) print(instance_ids) return elif by_service_dns: logger.info(f"Checking cluster: {cluster_name}") service_ip = get_host_ip(host_name=service_dns) logger.info(f"IP of {service_dns} is {service_ip}") logger.debug(f"Output: {output_info}.") if output_info == "service": print(service_ip) return else: logger.debug(f"Get instance IDs for cluster:' {cluster_name}'.") instance_ids = get_instance_ids_from_cluster( cluster=cluster_name, client=ecs_client ) logger.debug(instance_ids) logger.debug("Get instance details.") ( instance_private_ip, instance_private_dns, instance_id, ) = get_instance_info_by_service_dns( instance_ids=instance_ids, service_ip=service_ip, client=ec2_client, ) if output_info == "ip": print(instance_private_ip) return elif output_info == "id": print(instance_id) return elif output_info == "all": print(instance_private_ip, instance_id, instance_private_dns) return logger.error(f"Not the expected result - nothing accomplished.") return 1
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_cli_arguments(self):\n pass", "def command_line_arguments():\n\n try:\n parser = argparse.ArgumentParser(description='Log Handler/Cleaner/Copier for Idemia DocAuth')\n\n # Add required arguments.\n parser.add_argument('action', choices=['clean', 'download'], type=str, help=...
[ "0.7795223", "0.7400118", "0.7389782", "0.73847723", "0.7258737", "0.7255228", "0.7248011", "0.724467", "0.7233422", "0.717214", "0.710672", "0.7086328", "0.7038673", "0.70361656", "0.7034294", "0.70218986", "0.68577576", "0.6856456", "0.6839717", "0.6820217", "0.68195426", ...
0.0
-1
Perform all graphical tasks for this frame.
def draw(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def render(self):\n self.delete()\n self.__create_background(self._imfname)\n # XXX must be last after successor implementation, but works without this line\n #self.c.event_generate(\"<Configure>\")\n #self.c.update_idletasks()", "def __display(self):\n self.__rotate_mod...
[ "0.6894054", "0.67451495", "0.66122764", "0.6550248", "0.64933664", "0.64838165", "0.64499927", "0.64354634", "0.6388672", "0.638855", "0.6348826", "0.6347117", "0.6344852", "0.63217235", "0.6315397", "0.6307719", "0.6287468", "0.62729156", "0.62716115", "0.6268537", "0.62639...
0.60316306
54
Perform all calculations for the amount of time that has passed.
def update(self, dt): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def compute(self, duration: Optional[Number]) -> None:\n await envs.sleep(duration)\n self.total_compute_time += duration", "def _timed_execute(self):\n tstart = time.perf_counter()\n self._func(*self._func_args, **self._func_kwargs)\n tend = time.perf_counter() \n\n ...
[ "0.675693", "0.64293903", "0.6368792", "0.61246926", "0.6124316", "0.6109275", "0.6093881", "0.6072631", "0.6060954", "0.60540646", "0.60454965", "0.6036691", "0.6011635", "0.6011255", "0.59790087", "0.5965647", "0.5960139", "0.59283966", "0.5919275", "0.59172326", "0.5911589...
0.0
-1
automatically release blocks when blocks dict looses shape
def free_finalizer(self, dataset: dict): # for gc being late if dataset: if dataset['vrtx']: dataset['vrtx'].release() if dataset['indx']: dataset['indx'].release() dataset.clear()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def finalize_block_construction(self, pyomo_block):\n pass", "def blocks(self):\n pass", "def _prepare_blocks():\n\n counter = blocks[0]['freeStart']\n maxBlocks = blocks[0]['maxBlocks']\n while(counter < maxBlocks) :\n try:\n # print (mount['parent'] + '/linddata.' + s...
[ "0.69011474", "0.6140728", "0.601313", "0.59879786", "0.59604454", "0.58850497", "0.581188", "0.58049697", "0.5709238", "0.56697476", "0.5665943", "0.5652778", "0.5647419", "0.5641385", "0.5623683", "0.5622437", "0.56117296", "0.55557007", "0.5543001", "0.55420923", "0.554209...
0.50894725
94
Returns the details for a given plugin.
def load_details(self): response = self._server._api_request("GET", "/plugins/plugin/%d" % self.id, "") if response is not None: self.id = response["id"] self.name = response["name"] self.family_name = response["family_name"] self.attributes = response["attributes"] return True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getPluginInfo(pluginId):\n url = f\"https://develop.roblox.com/v1/plugins?pluginIds={pluginId}\"\n r = requests.get(url)\n j = json.loads(r.text)\n return j['data']", "def get_details(self):\n return PluginDetails(\n plugin_name=\"bad-string-detail-is-int\",\n ...
[ "0.72806084", "0.71445906", "0.7018347", "0.6907199", "0.6841461", "0.683811", "0.67773193", "0.6708696", "0.6622913", "0.6553286", "0.6524269", "0.6480507", "0.6399457", "0.63910574", "0.6374219", "0.63439155", "0.63216966", "0.630637", "0.62978387", "0.62750924", "0.626016"...
0.6764841
7
Geeft bericht of iemand lang genoeg is voor de attractie.
def lang_genoeg(lengte): return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def substantiate():", "def cliquer_sur_unité(self):", "def makeGerund(self):\r\n clean_s = self.cleanString(self.text)\r\n LoW = clean_s.split()\r\n for x in LoW: \r\n if 'ing' in x and x not in self.gerund: \r\n self.gerund[x] = 1\r\n elif 'ing' in x a...
[ "0.6085675", "0.60812867", "0.5822387", "0.5812007", "0.56937695", "0.56634116", "0.5626631", "0.56246907", "0.56246907", "0.56246907", "0.56246907", "0.56246907", "0.5621488", "0.5621488", "0.5549839", "0.55467236", "0.5458848", "0.5457804", "0.5457804", "0.54571193", "0.545...
0.7161606
0
This method is used to postprocess the form data. By default, it returns the raw `form.data` dictionary.
def process_step(self, form): #print(form.data) #print(form.data) #print(self) institution = {} inst_list = [] if self.steps.current == '1': institution['institution'] = form.data['1-0-institution'] institution['date_from'] = form.data['1-0-date_from'] institution['date_to'] = form.data['1-0-date_to'] inst_list.append(institution) inst_keys = dict(form.data.lists()) #Create dictionary dynamically for the other institutions incase more than two institutions are entered if inst_keys.get('1-NaN-institution') and type(inst_keys.get('1-NaN-institution')) is list: inst_list2 = [] #Add institutions for i,insti in enumerate(inst_keys.get('1-NaN-institution')): inst_i = {} #print(i) date_from = inst_keys['1-NaN-date_from'][i] date_to = inst_keys['1-NaN-date_to'][i] course_duration = inst_keys['1-NaN-course_duration'][i] inst_i['institution'] = insti inst_i['date_from'] = date_from inst_i['date_to'] = date_to inst_list2.append(inst_i) #print(inst_list2) inst_list.extend(inst_list2) #Create dictionary dynamically for the other institutions incase more than two institutions are entered if inst_keys.get('1-NaN-institution') and type(inst_keys.get('1-NaN-institution')) is not list: inst_0 = {} inst_0['institution'] = form.data['1-NaN-institution'] inst_0['date_from'] = form.data['1-NaN-date_from'] inst_0['date_to'] = form.data['1-NaN-date_to'] inst_0['course_duration'] = form.data['1-NaN-course_duration'] #inst_0['achievements'] = '' inst_list.append(inst_0) #Add the entered information to a session object self.request.session['institution'] = inst_list
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_post_data(self):\n if not self._form_data:\n self._form_data = async_to_sync(self.request.form)()\n return self._form_data", "def get_form_data(self) -> dict:\n with logging.LogCall(__file__, \"get_form_data\", self.__class__):\n return self.serialize()", "def...
[ "0.79507184", "0.76475173", "0.738958", "0.73285085", "0.7258168", "0.6955681", "0.6792374", "0.65343493", "0.65144867", "0.6490603", "0.6459408", "0.6415936", "0.63819605", "0.63031137", "0.6298642", "0.6277921", "0.62373143", "0.6232082", "0.62192225", "0.60904664", "0.6056...
0.0
-1
maps must match for joining to work
def mapsMatch(m1,m2): same = True f1 = file(m1,'r').readlines() f2 = file(m2,'r').readlines() for i, row in enumerate(f1): row = row.strip().split() row2 = f2[i].strip().split() if row[0] <> row2[0]: same = False break return same
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_mappingtojoin(self):\n usersaddresses = sql.join(users, addresses, users.c.user_id == addresses.c.user_id)\n m = mapper(User, usersaddresses, primary_key=[users.c.user_id])\n q = create_session().query(m)\n l = q.all()\n self.assert_result(l, User, *user_result[0:2])", ...
[ "0.6610568", "0.63814634", "0.61559236", "0.6086679", "0.58406264", "0.5825378", "0.5764346", "0.57383496", "0.57372516", "0.5714955", "0.56882495", "0.5687337", "0.5668063", "0.56529844", "0.56490266", "0.56440884", "0.56300807", "0.56268895", "0.5618193", "0.56174135", "0.5...
0.54061365
31
geno codes must be appended row (marker) wise in subject (.ind file) order
def joinRows(r1,r2,outfname): outf = open(outfname,'w') f1 = file(r1,'r') f2 = file(r2,'r') for row1 in f1: if row1.strip() > '': row2 = f2.next() outf.write('%s%s\n' % (row1.strip(),row2.strip())) outf.close()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def gencode_dic(gencode_file,gene_type_dic):\n gen_dic = {}\n for i in range(1,len(gencode_file)):\n words_gen = gencode_file[i].strip().split('\\t')\n chr_no = words_gen[2]\n trans_id = words_gen[1]\n cds_info = words_gen[13]\n cde_info = words_gen[14]\n gene_type =...
[ "0.5696215", "0.5680898", "0.5666527", "0.55099", "0.54899204", "0.5471716", "0.5390018", "0.53788966", "0.53691655", "0.5336136", "0.533134", "0.5327106", "0.5315752", "0.5311932", "0.52991354", "0.5275869", "0.52664924", "0.5263865", "0.5257725", "0.5240358", "0.52117866", ...
0.0
-1
individual data must be appended in same order as genos are being added
def joinInds(r1,r2,outfname): outf = open(outfname,'w') f1 = file(r1,'r') f2 = file(r2,'r') for row1 in f1: outf.write('%s\n' % (row1.strip())) for row1 in f2: outf.write('%s\n' % (row1.strip())) outf.close()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def apilar(self,dato):\r\n\t\tself.elementos.append(dato)\r\n\t\tself.len += 1", "def concatenate_data():", "def append(self, batch: Batch):", "def append(self, data):\n if self._expand_mode:\n new_keys = set(data.keys()) - self.keys & self._keys\n self._expand(new_keys)\n ...
[ "0.6317219", "0.62716526", "0.60660607", "0.5965007", "0.5827419", "0.58169585", "0.58123124", "0.57943267", "0.57887346", "0.5719474", "0.5695228", "0.5687933", "0.5677406", "0.56611586", "0.5614816", "0.5614328", "0.55467606", "0.5543412", "0.55387115", "0.55334795", "0.553...
0.0
-1
provide both input file names up to extension and outfile path including name up to extension
def dojoin(ipath1,ipath2,opath): r1 = '%s.map' % ipath1 r2 = '%s.map' % ipath2 if not mapsMatch(r1,r2): print '### maps %s and %s do not match' % (r1,r2) sys.exit(1) outpath = '%s.map' % opath shutil.copyfile(r1,outpath) r1 = '%s.eigenstratgeno' % ipath1 r2 = '%s.eigenstratgeno' % ipath2 outpath = '%s.eigenstratgeno' % opath joinRows(r1,r2,outpath) outpath = '%s.ind' % opath r1 = '%s.ind' % ipath1 r2 = '%s.ind' % ipath2 joinInds(r1,r2,outpath)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def ConvertFileName(cls,infile,band):\r\n try:\r\n import os\r\n except:\r\n raise ImportError(\"Can not find module os\")\r\n try:\r\n base = str.split(infile,\"_metadata.xml\")[0]\r\n print base\r\n ext=\"_band\"+str(band)+\".ntf\"\r\n ...
[ "0.66169494", "0.6289752", "0.62640893", "0.62422895", "0.6218007", "0.62115467", "0.61658305", "0.61075085", "0.6085868", "0.60749346", "0.60616755", "0.60532296", "0.60232437", "0.5990847", "0.5986506", "0.59429944", "0.5934633", "0.59320587", "0.5926914", "0.59200156", "0....
0.0
-1
Initialize your data structure here.
def __init__(self): self.queue = []
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _init_empty(self):\n self._data = []", "def __init__(self):\n self._data = []", "def __init__(self):\n self._data = []", "def __init__(self):\n self._data = []", "def __init__(self):\n self._data = []", "def __init__(self):\n self._data = []", "def __init__...
[ "0.7765608", "0.7645274", "0.7645274", "0.7645274", "0.7645274", "0.7645274", "0.7645274", "0.7595176", "0.75853467", "0.7558298", "0.7530608", "0.7530608", "0.7530608", "0.7530608", "0.7530608", "0.74971247", "0.74971247", "0.7478105", "0.7477832", "0.7477832", "0.7477832", ...
0.0
-1
Push element x to the back of queue.
def enqueue(self, x): self.queue.append(x) return self.queue
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def push(self, x): # time O(n)\n self.queue.append(x)\n for _ in range(len(self.queue)-1):\n self.queue.append(self.queue.popleft())", "def push(self, x: int) -> None:\n self.queue.append(x)\n for _ in range(len(self.queue)-1):\n self.queue.append(...
[ "0.8549472", "0.83887035", "0.8322536", "0.8316233", "0.8296685", "0.8249803", "0.82164633", "0.81946427", "0.81946427", "0.8059892", "0.79438967", "0.7901358", "0.7855948", "0.78556633", "0.78169864", "0.77913237", "0.77355105", "0.76975566", "0.767749", "0.76648164", "0.764...
0.7416999
31
Removes the element from in front of queue and returns that element.
def dequeue(self): return self.queue.pop(0)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def pop(self):\n self.queue.insert(len(self.queue), self.queue[0])\n self.queue.remove(self.queue[0])\n return self.queue.pop()", "def front(self):\n return self.queue[0] if not self.empty() else None", "def dequeue(self):\n if not self.front:\n raise AttributeErro...
[ "0.80894065", "0.7844948", "0.7837598", "0.7796653", "0.779448", "0.7678269", "0.76743865", "0.7673315", "0.7659757", "0.7606835", "0.75895596", "0.7558154", "0.7557646", "0.7546004", "0.7543471", "0.75360584", "0.7531314", "0.75289166", "0.752424", "0.7517075", "0.74940664",...
0.7609435
9
Get the front element.
def peek(self): return self.queue[0]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getFront(self):\n\t\tfront = self.queue[self.front]\n\t\treturn front\n\t\tpass", "def getFront(self):\n if not self.isEmpty():\n return self._data[0]\n else:\n return -1", "def front(self):\n return self.queue[0] if not self.empty() else None", "def peek_front(...
[ "0.82009244", "0.79261875", "0.7852642", "0.7791182", "0.77886623", "0.75723046", "0.752337", "0.7510604", "0.7478953", "0.7379256", "0.7379256", "0.734284", "0.7323573", "0.7320296", "0.7291721", "0.72772557", "0.7269801", "0.72657627", "0.7253295", "0.72355044", "0.72213495...
0.66313344
60
Returns whether the queue is empty.
def empty(self): return self.queue == []
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_empty(self):\n return len(self.the_queue) == 0", "def is_empty(self):\n return len(self.__queue) > 0", "def is_empty(self):\n return len(self.queue) == 0", "def is_empty(self):\n return len(self.queue) == 0", "def is_empty(self):\n return self.queue == []", "def ...
[ "0.9270249", "0.92506766", "0.923635", "0.923635", "0.9155843", "0.9098441", "0.9095006", "0.90922606", "0.90731245", "0.9053733", "0.89205354", "0.89137155", "0.8877591", "0.88436323", "0.88436323", "0.88038146", "0.8758203", "0.87577534", "0.87310195", "0.86462456", "0.8628...
0.8835949
15
Define the class balanced cross entropy loss to train the network
def class_balanced_cross_entropy_loss(output, label): labels = tf.cast(tf.greater(label, 0.5), tf.float32) num_labels_pos = tf.reduce_sum(labels) num_labels_neg = tf.reduce_sum(1.0 - labels) num_total = num_labels_pos + num_labels_neg output_gt_zero = tf.cast(tf.greater_equal(output, 0), tf.float32) loss_val = tf.multiply(output, (labels - output_gt_zero)) - tf.log( 1 + tf.exp(output - 2 * tf.multiply(output, output_gt_zero))) loss_pos = tf.reduce_sum(-tf.multiply(labels, loss_val)) loss_neg = tf.reduce_sum(-tf.multiply(1.0 - labels, loss_val)) final_loss = num_labels_neg / num_total * loss_pos + num_labels_pos / num_total * loss_neg return final_loss
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def setup_loss(self):\n self.loss = nn.CrossEntropyLoss(weight = self.to_device(self.datasetManager.class_weights))\n #self.loss = nn.CrossEntropyLoss()", "def cross_entropy_loss():\n return nn.CrossEntropyLoss()", "def _classification_loss(self, logits, labels, num_classes):\n labels = tf....
[ "0.7614255", "0.72014856", "0.7150281", "0.7139225", "0.7041426", "0.69943374", "0.69878465", "0.6987561", "0.69841796", "0.69580585", "0.69393665", "0.6922004", "0.6916779", "0.69158214", "0.691163", "0.691163", "0.6885535", "0.67836154", "0.6734367", "0.6716514", "0.6716514...
0.6909369
16
Add a Pseudocode Operation at the actual active buffer.
def AddPseudoCode(self, pcode): self.buffers[self.buffergrade].append(pcode)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_code(self, code):\n self.code += code", "def add_operation(self):\n arg1 = self.memory[self.memory[self._cursor + 1]]\n arg2 = self.memory[self.memory[self._cursor + 2]]\n arg3 = self.memory[self._cursor + 3]\n self.memory[arg3] = arg1 + arg2\n # print(f'Cursor: ...
[ "0.5870351", "0.5767791", "0.57653064", "0.5667283", "0.5466743", "0.54478973", "0.52387804", "0.52152646", "0.5208209", "0.5186463", "0.51665425", "0.51497793", "0.51468796", "0.5108849", "0.5103262", "0.50743014", "0.5067011", "0.50647295", "0.5024439", "0.50223887", "0.498...
0.7316713
0
Increment the BufferGrade and initialize a new empty buffer.
def IndentBuffer(self): self.buffergrade += 1 self.buffers[self.buffergrade] = []
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def fillBuffer():\n buff[bufferCounter].next = dataIn", "def __init__(self, capacity):\n self.experiences = RingBuf(capacity)", "def init_buffer(self):\n \n self.shape.buf = [pi3d.Buffer(self.shape, self.verts, self.texcoords, self.inds, self.norms)]\n self.shape.set_draw_det...
[ "0.58674264", "0.5741333", "0.5509345", "0.54187745", "0.537456", "0.53408396", "0.5315984", "0.5291879", "0.52720207", "0.5246771", "0.5140495", "0.51366794", "0.51127", "0.5102545", "0.5096625", "0.5072083", "0.5029141", "0.5018385", "0.4994677", "0.4994677", "0.4994677", ...
0.730155
0
Decrement the BufferGrade and pop out the buffer active before.
def DeIndentBuffer(self): if self.buffergrade == 0: raise Exception("You can't deindent more.") self.buffergrade -= 1 tmp = self.buffers[self.buffergrade + 1] del self.buffers[self.buffergrade + 1] return tmp
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def decrease(self):\n self.score -= self.score", "def decrement(self):\n self.data[self.pointer] -= 1\n self.data[self.pointer] %= 256", "def IndentBuffer(self):\n self.buffergrade += 1\n self.buffers[self.buffergrade] = []", "def RemoveGrade(self, grade):\n if not s...
[ "0.62998307", "0.6187696", "0.6057455", "0.5972686", "0.58809793", "0.578995", "0.5724787", "0.57013", "0.5689314", "0.5660951", "0.5610543", "0.5608327", "0.55659264", "0.55524766", "0.5540197", "0.55076224", "0.5456718", "0.5449712", "0.5409593", "0.54038453", "0.53746724",...
0.71859
0
Will return the shared buffer of all the self subclasses.
def GetMainBuffer(self): tmp = self.buffers[0] self.buffers[0] = [] return tmp
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getAll(self):\n return self.dataBuffer", "def read_shared(self):\n return self._read_all()", "def read_shared(self):\n return self._read_all()", "def common(self):\n return self._common", "def get_bases(self):\n # TODO: subclassing\n return (self.py_class,)", ...
[ "0.58079183", "0.5729144", "0.5729144", "0.5667983", "0.5634361", "0.5608551", "0.5481564", "0.54492664", "0.5438543", "0.5438543", "0.5438543", "0.5422512", "0.54199225", "0.5405491", "0.539883", "0.53901875", "0.53715754", "0.53574383", "0.53372616", "0.5331279", "0.5319714...
0.5415913
13
Get a reference to the actual buffer activated.
def RefBuffer(self): return self.buffers[self.buffergrade]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def current_buffer(self):\n return self.layout.current_buffer", "def getBuffer(self):\n return self.buffer", "def buffer_backend(cls, *args, **kwargs):\n return cls._buffer_context", "def current_buffer_app(self):\n return self.session.current_buffer", "def buffer(self):\n ...
[ "0.70962286", "0.69201726", "0.67237633", "0.6671908", "0.6613722", "0.62484485", "0.61964005", "0.61714095", "0.61454296", "0.61443", "0.6099056", "0.60289884", "0.5986562", "0.5955822", "0.5955822", "0.5955822", "0.59430313", "0.58911574", "0.5882561", "0.5830065", "0.58167...
0.7871913
0
Track a code indentation index for successive utilization.
def TrackIfIndex(self, index): self.indentindex.append(index)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def increase_code_indent(self) -> None:\n self._parent_node.increase_code_indent()", "def _increaseindentation(self):\n self._indentlist.append(self._curindent)\n if not self._equalsigns[-1]:\n self._curindent = self._curindent + self._indent", "def getIndentationLevel(self, cod...
[ "0.61810654", "0.60863245", "0.5817759", "0.5709027", "0.5614261", "0.5614261", "0.5520226", "0.55129415", "0.54851073", "0.54282254", "0.53754544", "0.53624535", "0.532262", "0.52931166", "0.5262821", "0.52572817", "0.5244937", "0.52138895", "0.5192445", "0.5161051", "0.5153...
0.70099694
0
Get the last code indentation index tracked as reference.
def GetIfIndex(self): return self.indentindex[-1]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def last_index(self) -> int:\n return self._last_index", "def indentation_level(self):\n return self._indentation_levels[-1]", "def last_sequence_ind(self,):\n return self.last_sequence_ind_", "def _get_last_code_line():\n return max(_code_lines) + 2", "def get_last_index(self):\n ...
[ "0.6989419", "0.68731844", "0.67984694", "0.65894204", "0.64901143", "0.64773256", "0.63904345", "0.6377186", "0.6306196", "0.6305273", "0.62961644", "0.62961644", "0.62961644", "0.62961644", "0.62961644", "0.62961644", "0.6286268", "0.62604773", "0.6259666", "0.61679256", "0...
0.66052157
3
Pop (get and remove) the last code indentation index tracked.
def PopIfIndex(self): return self.indentindex.pop()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _decreaseindentation(self):\n self._curindent = self._indentlist.pop()", "def pop(self) -> int:\n return self.stack.pop()", "def pop(self) -> int:\n return self._stack.pop()", "def pop(self) -> int:\n for i in range(len(self.stack) - 1):\n self.stack.append(self.sta...
[ "0.687736", "0.662047", "0.65572536", "0.64603764", "0.6362599", "0.6348772", "0.63447845", "0.630998", "0.6291612", "0.6259269", "0.624546", "0.61962366", "0.6184571", "0.61161476", "0.611117", "0.60742784", "0.6037302", "0.60337037", "0.60090023", "0.59744895", "0.5953556",...
0.802693
0
Initialization of protected Operation Object attribute for subclasses.
def __init__(self): self._OPERATION = None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self):\n\n self.operations = {}", "def _init(self):\n raise NotImplementedError", "def __init__(self):\r\n self.operation_map = {}", "def init(self):\n raise NotImplementedError", "def init(self):\n raise NotImplementedError", "def __init__(self):\n ...
[ "0.69042325", "0.6817324", "0.66968143", "0.65770996", "0.65770996", "0.6513839", "0.6513839", "0.6513839", "0.6513839", "0.64662194", "0.6441285", "0.642362", "0.6404217", "0.6404217", "0.6404217", "0.6404217", "0.6404217", "0.6404217", "0.6404217", "0.6404217", "0.6404217",...
0.7562495
0
Get the Operation Object generated by the command.
def getOp(self): return self._OPERATION
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_operation_obect(self, method):\n pass", "def get_operation(\n self,\n ) -> Callable[[operations_pb2.GetOperationRequest], operations_pb2.Operation]:\n # Generate a \"stub function\" on-the-fly which will actually make\n # the request.\n # gRPC handles serialization a...
[ "0.6874343", "0.67712283", "0.67712283", "0.67712283", "0.67545724", "0.6746998", "0.6746998", "0.66104776", "0.65664905", "0.6563017", "0.6563017", "0.6541429", "0.6439676", "0.64120996", "0.64114374", "0.640891", "0.64030427", "0.6372547", "0.6367869", "0.6367869", "0.63323...
0.71553755
0
Creates a temporary image for manipulation, and handles optional RGB conversion.
def _create_tmp_image(self, content): content.seek(0) image = Image.open(content) if self.force_rgb and image.mode not in ('L', 'RGB', 'RGBA'): image = image.convert('RGB') return image
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def temporary_unsupported_image(self):\n image = Image.new('RGB', (1, 1))\n tmp_file = tempfile.NamedTemporaryFile(suffix='.ppm')\n image.save(tmp_file, 'ppm')\n # important because after save(),\n # the fp is already at the end of the file\n tmp_file.seek(0) # retrieves ...
[ "0.67979735", "0.6447679", "0.6425846", "0.6331001", "0.6304116", "0.6232688", "0.6175286", "0.60538095", "0.6020988", "0.6015573", "0.59706867", "0.59613866", "0.5947222", "0.5861616", "0.5856823", "0.5853021", "0.5808916", "0.5805031", "0.57975703", "0.5789881", "0.57089484...
0.70588124
0
Returns image data as a ``ContentFile``.
def _create_content_file(self, content): io = BytesIO() content.save(io, self._normalize_format(), quality=self.quality) return ContentFile(io.getvalue())
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getImgContentFile(img):\n format, imgstr = img.split(';base64,')\n ext = format.split('/')[-1]\n file = ContentFile(base64.b64decode(imgstr), name='temp.' + ext)\n return file", "def contents(self):\n if not self._contents:\n if self._path:\n # Read file into memo...
[ "0.74447596", "0.6932529", "0.67421204", "0.67015547", "0.65835774", "0.6578695", "0.6555087", "0.6495914", "0.63615793", "0.62989324", "0.6253196", "0.6239247", "0.6239247", "0.62345475", "0.61542207", "0.61115354", "0.6059986", "0.6055182", "0.6054496", "0.60452163", "0.601...
0.63306314
9
Resizes a valid image, and returns as a Django ``ContentFile``.
def generate(self, content): tmp = self._create_tmp_image(content) rendered = self._render(tmp) return self._create_content_file(rendered)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def resize_img(self,scale=1):\n reduced = self.image.reduce((scale,scale))\n reduced.save(\"../edited/{}\".format(self.image.filename))\n\n reduced = Image.open(\"../edited/{}\".format(self.image.filename))\n return reduced", "def resize_image(first_image, width, height):\n resizin...
[ "0.6446886", "0.63090426", "0.6298052", "0.61945766", "0.60929877", "0.6076873", "0.60629976", "0.6004527", "0.5964197", "0.59304476", "0.59220564", "0.5894676", "0.58834445", "0.5864532", "0.58378077", "0.58318716", "0.5828034", "0.58193", "0.57947105", "0.5785676", "0.57840...
0.0
-1
Renders the image. Override this method when creating a custom renderer.
def _render(self, image): raise NotImplementedError('Override this method to render images!')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def render(self) -> None:\n if self.native_rendering:\n self._render()\n else:\n self.renderer.render_image(self.get_rendered_image())", "def _render(self):\n self.dirty = False\n self.image = self.font.render(self._text, self.aa, self.color_fg)\n self.rec...
[ "0.83629304", "0.7514363", "0.738157", "0.71521425", "0.7143713", "0.7124317", "0.7027804", "0.69824153", "0.697108", "0.69224477", "0.69172174", "0.69109416", "0.68451834", "0.676605", "0.6748223", "0.66155213", "0.660075", "0.660075", "0.65852135", "0.64857286", "0.64756054...
0.86215574
0
Version management for migrations.
def __eq__(self, other): return ( self.bleed == other.bleed and self.width == other.width and self.height == other.height )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def migration():", "def migrate(cr, version):\n pass", "def version(self):\r\n print migration.db_version()", "def post_migrations(self):", "def _run_migrations(self, current_migration_version: int):\n logger.debug(\"Checking for necessary database migrations...\")\n\n while current...
[ "0.7271674", "0.6912402", "0.6781953", "0.66197056", "0.6540998", "0.64763844", "0.63564444", "0.6351916", "0.6346999", "0.627028", "0.6214616", "0.61989695", "0.61721605", "0.61721605", "0.6119614", "0.6112119", "0.60870755", "0.6083832", "0.6082907", "0.6082181", "0.5973878...
0.0
-1
Version management for migrations.
def __eq__(self, other): return ( self.constrain == other.constrain and self.width == other.width and self.height == other.height and self.upscale == other.upscale )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def migration():", "def migrate(cr, version):\n pass", "def version(self):\r\n print migration.db_version()", "def post_migrations(self):", "def _run_migrations(self, current_migration_version: int):\n logger.debug(\"Checking for necessary database migrations...\")\n\n while current...
[ "0.7271674", "0.6912402", "0.6781953", "0.66197056", "0.6540998", "0.64763844", "0.63564444", "0.6351916", "0.6346999", "0.627028", "0.6214616", "0.61989695", "0.61721605", "0.61721605", "0.6119614", "0.6112119", "0.60870755", "0.6083832", "0.6082907", "0.6082181", "0.5973878...
0.0
-1
Version management for migrations.
def __eq__(self, other): return ( self.bg_color == other.bg_color and self.width == other.width and self.height == other.height )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def migration():", "def migrate(cr, version):\n pass", "def version(self):\r\n print migration.db_version()", "def post_migrations(self):", "def _run_migrations(self, current_migration_version: int):\n logger.debug(\"Checking for necessary database migrations...\")\n\n while current...
[ "0.7271674", "0.6912402", "0.6781953", "0.66197056", "0.6540998", "0.64763844", "0.63564444", "0.6351916", "0.6346999", "0.627028", "0.6214616", "0.61989695", "0.61721605", "0.61721605", "0.6119614", "0.6112119", "0.60870755", "0.6083832", "0.6082907", "0.6082181", "0.5973878...
0.0
-1
Normalize, pad and batch the input images.
def preprocess_image(self, batched_inputs): images = [x.to(self.device) for x in batched_inputs] norms = [self.normalizer(x) for x in images] size = (norms[0].shape[1],norms[0].shape[2]) images = ImageList.from_tensors(norms, self.backbone.size_divisibility) return images, size
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def normalize_images(data, blend_cat, Args):\n im = data['X_train']['blend_image']\n std = np.std(im)\n mean = np.mean(im)\n data['X_train']['blend_image'] = (im - mean) / std\n data['X_val']['blend_image'] = (data['X_val']['blend_image'] - mean) / std\n data['X_train'] = normalize_other_inputs(d...
[ "0.7097169", "0.66408646", "0.65812963", "0.6465365", "0.6464898", "0.6406874", "0.63755596", "0.63661265", "0.6324502", "0.6318984", "0.63017005", "0.6297598", "0.6261444", "0.62284863", "0.6226007", "0.6213169", "0.6194187", "0.6189247", "0.61866045", "0.6185736", "0.618346...
0.67390656
1
Read a key file, if the key file does not exist create one
def keys(self) -> None: path = Path('./config/key') global key # If the file path does not exist, create one if not path.exists(): os.makedirs(path) while True: # read key.key file try: file = open(path / 'key.key', 'rb') key = file.read() file.close # when key.key file does not exist. Create one except FileNotFoundError: key = Fernet.generate_key() file = open(path / 'key.key', 'wb') file.write(key) file.close() continue break
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_key(key_name):\n if not p.exists(key_name):\n write_key(key_name)\n\n return open(key_name, \"rb\").read()", "def read_key(self, keyfile_name):\n\n with open(keyfile_name, 'rb') as f:\n self.key = f.read()\n self.cryptor = Fernet(self.key)", "def readKey(self,...
[ "0.75079066", "0.70167714", "0.6591259", "0.6573006", "0.65389", "0.6534434", "0.6523295", "0.6516931", "0.6431083", "0.64273334", "0.64232206", "0.6402406", "0.6346148", "0.63321847", "0.62482345", "0.62318903", "0.6229613", "0.61933273", "0.6170967", "0.6168514", "0.616238"...
0.60398936
28
Ensure that apigateway v1 and apigateway v2 actions are both present in the ses namespace
def test_services_with_multiple_pages_apigateway(self): # API Gateway Management V1: https://docs.aws.amazon.com/service-authorization/latest/reference/list_amazonapigatewaymanagement.html self.assertTrue("apigateway:AddCertificateToDomain" in self.all_actions) self.assertTrue("apigateway:RemoveCertificateFromDomain" in self.all_actions) self.assertTrue("apigateway:SetWebACL" in self.all_actions) # API Gateway Management V2: https://docs.aws.amazon.com/service-authorization/latest/reference/list_amazonapigatewaymanagement.html # API Gateway V2 doesn't have any unique actions in but it does have some unique resource types. Let's make sure those resource types are in the IAM Definition. # Resource types unique to API Gateway V2: resource_types = get_arn_types_for_service("apigateway") resource_types = list(resource_types.keys()) self.assertTrue("AccessLogSettings" in resource_types) # Resource types unique to API Gateway V1: self.assertTrue("RestApi" in resource_types)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_gh_226_elasticloadbalancing_v1_and_v2(self):\n results = get_actions_for_service(\"elasticloadbalancing\")\n # print(json.dumps(results, indent=4))\n lb_v1_only_action = \"elasticloadbalancing:CreateTargetGroup\"\n lb_v2_only_action = \"elasticloadbalancing:SetSecurityGroups\"\...
[ "0.59183055", "0.57958144", "0.5341116", "0.51459986", "0.5056637", "0.5025083", "0.49718618", "0.49709255", "0.49639255", "0.4939532", "0.48318732", "0.47402886", "0.47391623", "0.47184974", "0.4717983", "0.47155645", "0.47113252", "0.4703302", "0.46926475", "0.46769983", "0...
0.5989848
0
Ensure that awsmarketplace actions from all the different awsmarketplace SAR pages are present in the IAM definition.
def test_services_with_multiple_pages_aws_marketplace(self): # Overlap: AWS Marketplace, Marketplace Catalog, and AWS Marketplace Entitlement service, AWS Marketplace Image Building Service, AWS Marketplace Metering Service, AWS Marketplace Private Marketplace, and AWS Marketplace Procurement Systems # AWS Marketplace: https://docs.aws.amazon.com/service-authorization/latest/reference/list_awsmarketplace.html self.assertTrue("aws-marketplace:AcceptAgreementApprovalRequest" in self.all_actions) # AWS Marketplace Catalog: https://docs.aws.amazon.com/service-authorization/latest/reference/list_awsmarketplacecatalog.html self.assertTrue("aws-marketplace:CancelChangeSet" in self.all_actions) # AWS Marketplace Entitlement Service: https://docs.aws.amazon.com/service-authorization/latest/reference/list_awsmarketplaceentitlementservice.html self.assertTrue("aws-marketplace:GetEntitlements" in self.all_actions) # AWS Marketplace Image Building Service: https://docs.aws.amazon.com/service-authorization/latest/reference/list_awsmarketplaceimagebuildingservice.html self.assertTrue("aws-marketplace:DescribeBuilds" in self.all_actions) # AWS Marketplace Metering Service: https://docs.aws.amazon.com/service-authorization/latest/reference/list_awsmarketplacemeteringservice.html self.assertTrue("aws-marketplace:BatchMeterUsage" in self.all_actions) # AWS Marketplace Private Marketplace: https://docs.aws.amazon.com/service-authorization/latest/reference/list_awsmarketplaceprivatemarketplace.html self.assertTrue("aws-marketplace:AssociateProductsWithPrivateMarketplace" in self.all_actions) # AWS Marketplace Procurement Systems: https://docs.aws.amazon.com/service-authorization/latest/reference/list_awsmarketplaceprocurementsystemsintegration.html self.assertTrue("aws-marketplace:DescribeProcurementSystemConfiguration" in self.all_actions) results = get_actions_for_service("aws-marketplace") actions = [ "aws-marketplace:AcceptAgreementApprovalRequest", "aws-marketplace:BatchMeterUsage", "aws-marketplace:CancelAgreementRequest", "aws-marketplace:CancelChangeSet", "aws-marketplace:CompleteTask", "aws-marketplace:DescribeAgreement", "aws-marketplace:DescribeBuilds", "aws-marketplace:DescribeChangeSet", "aws-marketplace:DescribeEntity", "aws-marketplace:DescribeProcurementSystemConfiguration", "aws-marketplace:DescribeTask", "aws-marketplace:GetAgreementApprovalRequest", "aws-marketplace:GetAgreementRequest", "aws-marketplace:GetAgreementTerms", "aws-marketplace:GetEntitlements", "aws-marketplace:ListAgreementApprovalRequests", "aws-marketplace:ListAgreementRequests", "aws-marketplace:ListBuilds", "aws-marketplace:ListChangeSets", "aws-marketplace:ListEntities", "aws-marketplace:ListTasks", "aws-marketplace:MeterUsage", "aws-marketplace:PutProcurementSystemConfiguration", "aws-marketplace:RegisterUsage", "aws-marketplace:RejectAgreementApprovalRequest", "aws-marketplace:ResolveCustomer", "aws-marketplace:SearchAgreements", "aws-marketplace:StartBuild", "aws-marketplace:StartChangeSet", "aws-marketplace:Subscribe", "aws-marketplace:Unsubscribe", "aws-marketplace:UpdateAgreementApprovalRequest", "aws-marketplace:UpdateTask", "aws-marketplace:ViewSubscriptions", ] for action in actions: self.assertTrue(action in results)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_get_actions_with_arn_type_and_access_level_case_3(self):\n desired_output = [\n 's3:PutAccountPublicAccessBlock',\n 's3:PutAccessPointPublicAccessBlock'\n ]\n output = get_actions_with_arn_type_and_access_level(\n # \"ram\", \"resource-share\", \"Write...
[ "0.58062184", "0.57554114", "0.5695702", "0.5673701", "0.5447731", "0.5414008", "0.53181934", "0.5090683", "0.5057209", "0.49688128", "0.49623215", "0.49491638", "0.49443293", "0.49354288", "0.4900723", "0.48934472", "0.48312107", "0.48288488", "0.47950754", "0.4768656", "0.4...
0.66148233
0
Ensure that greengrass v1 and greengrass v2 actions are both present in the greengrass namespace
def test_services_with_multiple_pages_greengrass(self): # Greengrass V1: https://docs.aws.amazon.com/service-authorization/latest/reference/list_awsiotgreengrass.html self.assertTrue("greengrass:CreateResourceDefinition" in self.all_actions) # Greengrass V2: https://docs.aws.amazon.com/service-authorization/latest/reference/list_awsiotgreengrassv2.html self.assertTrue("greengrass:CreateComponentVersion" in self.all_actions) results = get_actions_for_service("greengrass") actions = [ "greengrass:AssociateRoleToGroup", "greengrass:CreateConnectorDefinition", "greengrass:CreateConnectorDefinitionVersion", "greengrass:CreateCoreDefinition", "greengrass:CreateCoreDefinitionVersion", "greengrass:CreateDeviceDefinition", "greengrass:CreateDeviceDefinitionVersion", "greengrass:CreateFunctionDefinition", "greengrass:CreateFunctionDefinitionVersion", "greengrass:CreateGroup", "greengrass:CreateGroupCertificateAuthority", "greengrass:CreateGroupVersion", "greengrass:CreateLoggerDefinition", "greengrass:CreateLoggerDefinitionVersion", "greengrass:CreateResourceDefinition", "greengrass:CreateResourceDefinitionVersion", "greengrass:CreateSoftwareUpdateJob", "greengrass:CreateSubscriptionDefinition", "greengrass:CreateSubscriptionDefinitionVersion", "greengrass:DeleteConnectorDefinition", "greengrass:DeleteCoreDefinition", "greengrass:DeleteDeviceDefinition", "greengrass:DeleteFunctionDefinition", "greengrass:DeleteGroup", "greengrass:DeleteLoggerDefinition", "greengrass:DeleteResourceDefinition", "greengrass:DeleteSubscriptionDefinition", "greengrass:DisassociateRoleFromGroup", "greengrass:Discover", "greengrass:GetAssociatedRole", "greengrass:GetBulkDeploymentStatus", "greengrass:GetConnectorDefinition", "greengrass:GetConnectorDefinitionVersion", "greengrass:GetCoreDefinition", "greengrass:GetCoreDefinitionVersion", "greengrass:GetDeploymentStatus", "greengrass:GetDeviceDefinition", "greengrass:GetDeviceDefinitionVersion", "greengrass:GetFunctionDefinition", "greengrass:GetFunctionDefinitionVersion", "greengrass:GetGroup", "greengrass:GetGroupCertificateAuthority", "greengrass:GetGroupCertificateConfiguration", "greengrass:GetGroupVersion", "greengrass:GetLoggerDefinition", "greengrass:GetLoggerDefinitionVersion", "greengrass:GetResourceDefinition", "greengrass:GetResourceDefinitionVersion", "greengrass:GetSubscriptionDefinition", "greengrass:GetSubscriptionDefinitionVersion", "greengrass:GetThingRuntimeConfiguration", "greengrass:ListBulkDeploymentDetailedReports", "greengrass:ListBulkDeployments", "greengrass:ListConnectorDefinitionVersions", "greengrass:ListConnectorDefinitions", "greengrass:ListCoreDefinitionVersions", "greengrass:ListCoreDefinitions", "greengrass:ListDeviceDefinitionVersions", "greengrass:ListDeviceDefinitions", "greengrass:ListFunctionDefinitionVersions", "greengrass:ListFunctionDefinitions", "greengrass:ListGroupCertificateAuthorities", "greengrass:ListGroupVersions", "greengrass:ListGroups", "greengrass:ListLoggerDefinitionVersions", "greengrass:ListLoggerDefinitions", "greengrass:ListResourceDefinitionVersions", "greengrass:ListResourceDefinitions", "greengrass:ListSubscriptionDefinitionVersions", "greengrass:ListSubscriptionDefinitions", "greengrass:ResetDeployments", "greengrass:StartBulkDeployment", "greengrass:StopBulkDeployment", "greengrass:UpdateConnectorDefinition", "greengrass:UpdateCoreDefinition", "greengrass:UpdateDeviceDefinition", "greengrass:UpdateFunctionDefinition", "greengrass:UpdateGroup", "greengrass:UpdateGroupCertificateConfiguration", "greengrass:UpdateLoggerDefinition", "greengrass:UpdateResourceDefinition", "greengrass:UpdateSubscriptionDefinition", "greengrass:UpdateThingRuntimeConfiguration" ] for action in actions: self.assertTrue(action in results) # if action not in results: # print(action)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_gh_226_elasticloadbalancing_v1_and_v2(self):\n results = get_actions_for_service(\"elasticloadbalancing\")\n # print(json.dumps(results, indent=4))\n lb_v1_only_action = \"elasticloadbalancing:CreateTargetGroup\"\n lb_v2_only_action = \"elasticloadbalancing:SetSecurityGroups\"\...
[ "0.62013024", "0.5185163", "0.51849663", "0.50805354", "0.50188154", "0.49678308", "0.49544063", "0.49272656", "0.49225903", "0.49052584", "0.4849131", "0.48472935", "0.47611517", "0.4737386", "0.47336262", "0.46869755", "0.46836528", "0.4678428", "0.46226096", "0.46140948", ...
0.6528508
0
Ensure that elb v1 and elb v2 actions are both present in the elasticloadbalancing namespace
def test_services_with_multiple_pages_elb(self): results = get_actions_for_service("elasticloadbalancing") actions = [ "elasticloadbalancing:ApplySecurityGroupsToLoadBalancer", "elasticloadbalancing:AttachLoadBalancerToSubnets", "elasticloadbalancing:ConfigureHealthCheck", "elasticloadbalancing:CreateAppCookieStickinessPolicy", "elasticloadbalancing:CreateLBCookieStickinessPolicy", "elasticloadbalancing:CreateLoadBalancerListeners", "elasticloadbalancing:CreateLoadBalancerPolicy", "elasticloadbalancing:DeleteLoadBalancerListeners", "elasticloadbalancing:DeleteLoadBalancerPolicy", "elasticloadbalancing:DeregisterInstancesFromLoadBalancer", "elasticloadbalancing:DescribeInstanceHealth", "elasticloadbalancing:DescribeLoadBalancerPolicies", "elasticloadbalancing:DescribeLoadBalancerPolicyTypes", "elasticloadbalancing:DetachLoadBalancerFromSubnets", "elasticloadbalancing:DisableAvailabilityZonesForLoadBalancer", "elasticloadbalancing:EnableAvailabilityZonesForLoadBalancer", "elasticloadbalancing:RegisterInstancesWithLoadBalancer", "elasticloadbalancing:SetLoadBalancerListenerSSLCertificate", "elasticloadbalancing:SetLoadBalancerPoliciesForBackendServer", "elasticloadbalancing:SetLoadBalancerPoliciesOfListener", ] for action in actions: self.assertTrue(action in results)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_gh_226_elasticloadbalancing_v1_and_v2(self):\n results = get_actions_for_service(\"elasticloadbalancing\")\n # print(json.dumps(results, indent=4))\n lb_v1_only_action = \"elasticloadbalancing:CreateTargetGroup\"\n lb_v2_only_action = \"elasticloadbalancing:SetSecurityGroups\"\...
[ "0.7524008", "0.5502818", "0.53605366", "0.5155062", "0.50617534", "0.5045359", "0.49514994", "0.4864293", "0.48232004", "0.47804296", "0.47697622", "0.4720676", "0.47133136", "0.4713231", "0.47080573", "0.4707765", "0.47045755", "0.46887946", "0.46873748", "0.4683964", "0.46...
0.5852793
1
Ensure that lex v1 and lex v2 actions are both present in the lex namespace
def test_services_with_multiple_pages_lex(self): # Lex V1: https://docs.aws.amazon.com/service-authorization/latest/reference/list_amazonlex.html self.assertTrue("lex:DeleteUtterances" in self.all_actions) # Lex V2: https://docs.aws.amazon.com/service-authorization/latest/reference/list_amazonlexv2.html self.assertTrue("lex:ListBotLocales" in self.all_actions) results = get_actions_for_service("lex") actions = [ "lex:CreateIntentVersion", "lex:CreateSlotTypeVersion", "lex:DeleteBotChannelAssociation", "lex:DeleteIntentVersion", "lex:DeleteSlotTypeVersion", "lex:GetBot", "lex:GetBotAlias", "lex:GetBotAliases", "lex:GetBotChannelAssociation", "lex:GetBotChannelAssociations", "lex:GetBotVersions", "lex:GetBots", "lex:GetBuiltinIntent", "lex:GetBuiltinIntents", "lex:GetBuiltinSlotTypes", "lex:GetExport", "lex:GetImport", "lex:GetIntent", "lex:GetIntentVersions", "lex:GetIntents", "lex:GetMigration", "lex:GetMigrations", "lex:GetSlotType", "lex:GetSlotTypeVersions", "lex:GetSlotTypes", "lex:GetUtterancesView", "lex:PostContent", "lex:PostText", "lex:PutBot", "lex:PutBotAlias", "lex:PutIntent", "lex:PutSlotType", "lex:StartMigration", ] for action in actions: self.assertTrue(action in results)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_user_grammar_actions():\n grammar = \"\"\"\n S: A B C;\n @nonterm_action\n C: A B;\n A: \"a\";\n @term_action\n B: \"b\";\n \"\"\"\n\n called = [False, False]\n\n def nonterm_action(_, __):\n called[0] = True\n\n def term_action(_, __):\n called[1] = True\n\n...
[ "0.5523519", "0.54551274", "0.52533966", "0.4947026", "0.49150628", "0.4877916", "0.4869475", "0.4861803", "0.47912464", "0.47661808", "0.47431847", "0.47416365", "0.4730279", "0.47081596", "0.46722156", "0.46685877", "0.46628264", "0.46338037", "0.463026", "0.46180958", "0.4...
0.6062302
0
Ensure that Kinesis Analytics V1 actions are both present in the ses namespace
def test_services_with_multiple_pages_kinesis_analytics(self): # Kinesis Analytics V1 results = get_actions_for_service("kinesisanalytics") actions = [ "kinesisanalytics:GetApplicationState", # Only in v1, not v2 "kinesisanalytics:ListApplications", # In both ] for action in actions: self.assertTrue(action in results)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_kafka_action_names_overlap_issue(self):\n # Kafka actions used to be in two pages but are now one. This verifies the current state.\n # results = get_actions_for_service(\"kafka\")\n # print(results)\n actions = [\n \"kafka:BatchAssociateScramSecret\",\n \...
[ "0.57531", "0.5480236", "0.5119307", "0.5114083", "0.50098765", "0.49987218", "0.4811319", "0.47456703", "0.4734747", "0.4714206", "0.4702901", "0.4696541", "0.46635452", "0.46622923", "0.4657708", "0.46406722", "0.46395832", "0.45904428", "0.45893326", "0.45853838", "0.45726...
0.55055636
1
Ensure that ses v1 and ses v2 actions are both present in the ses namespace
def test_services_with_multiple_pages_ses(self): # SES V1: https://docs.aws.amazon.com/service-authorization/latest/reference/list_amazonses.html self.assertTrue("ses:PutIdentityPolicy" in self.all_actions) # SES V2: https://docs.aws.amazon.com/service-authorization/latest/reference/list_amazonsimpleemailservicev2.html self.assertTrue("ses:ListImportJobs" in self.all_actions) results = get_actions_for_service("ses") actions = [ "ses:CloneReceiptRuleSet", "ses:CreateConfigurationSetTrackingOptions", "ses:CreateReceiptFilter", "ses:CreateReceiptRule", "ses:CreateReceiptRuleSet", "ses:CreateTemplate", "ses:DeleteConfigurationSetTrackingOptions", "ses:DeleteIdentity", "ses:DeleteIdentityPolicy", "ses:DeleteReceiptFilter", "ses:DeleteReceiptRule", "ses:DeleteReceiptRuleSet", "ses:DeleteTemplate", "ses:DeleteVerifiedEmailAddress", "ses:DescribeActiveReceiptRuleSet", "ses:DescribeConfigurationSet", "ses:DescribeReceiptRule", "ses:DescribeReceiptRuleSet", "ses:GetAccountSendingEnabled", "ses:GetIdentityDkimAttributes", "ses:GetIdentityMailFromDomainAttributes", "ses:GetIdentityNotificationAttributes", "ses:GetIdentityPolicies", "ses:GetIdentityVerificationAttributes", "ses:GetSendQuota", "ses:GetSendStatistics", "ses:GetTemplate", "ses:ListIdentities", "ses:ListIdentityPolicies", "ses:ListReceiptFilters", "ses:ListReceiptRuleSets", "ses:ListTemplates", "ses:ListVerifiedEmailAddresses", "ses:PutIdentityPolicy", "ses:ReorderReceiptRuleSet", "ses:SendBounce", "ses:SendBulkTemplatedEmail", "ses:SendRawEmail", "ses:SendTemplatedEmail", "ses:SetActiveReceiptRuleSet", "ses:SetIdentityDkimEnabled", "ses:SetIdentityFeedbackForwardingEnabled", "ses:SetIdentityHeadersInNotificationsEnabled", "ses:SetIdentityMailFromDomain", "ses:SetIdentityNotificationTopic", "ses:SetReceiptRulePosition", "ses:TestRenderTemplate", "ses:UpdateAccountSendingEnabled", "ses:UpdateConfigurationSetReputationMetricsEnabled", "ses:UpdateConfigurationSetSendingEnabled", "ses:UpdateConfigurationSetTrackingOptions", "ses:UpdateReceiptRule", "ses:UpdateTemplate", "ses:VerifyDomainDkim", "ses:VerifyDomainIdentity", "ses:VerifyEmailAddress", "ses:VerifyEmailIdentity", ] for action in actions: self.assertTrue(action in results)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_gh_226_elasticloadbalancing_v1_and_v2(self):\n results = get_actions_for_service(\"elasticloadbalancing\")\n # print(json.dumps(results, indent=4))\n lb_v1_only_action = \"elasticloadbalancing:CreateTargetGroup\"\n lb_v2_only_action = \"elasticloadbalancing:SetSecurityGroups\"\...
[ "0.5713234", "0.550883", "0.5373594", "0.53643465", "0.5290291", "0.5130985", "0.5121447", "0.48156258", "0.479505", "0.479243", "0.47852328", "0.47827813", "0.47478974", "0.4735454", "0.4725077", "0.47070414", "0.46997523", "0.4678125", "0.46708792", "0.4640849", "0.46401328...
0.62560695
0
Other missing actions from GH 393
def test_other_iam_data_fixes_in_GH_393(self): # Cassandra: https://docs.aws.amazon.com/service-authorization/latest/reference/list_amazonkeyspacesforapachecassandra.html results = get_actions_for_service("cassandra") self.assertTrue("cassandra:Restore" in results) # Comprehend Medical: https://docs.aws.amazon.com/service-authorization/latest/reference/list_amazoncomprehendmedical.html results = get_actions_for_service("comprehendmedical") # print(results) actions = [ "comprehendmedical:DescribeEntitiesDetectionV2Job", "comprehendmedical:DescribeICD10CMInferenceJob", "comprehendmedical:DescribePHIDetectionJob", "comprehendmedical:DescribeRxNormInferenceJob", # "comprehendmedical:DescribeSNOMEDCTInferenceJob", # Not in SAR "comprehendmedical:DetectEntitiesV2", "comprehendmedical:InferICD10CM", "comprehendmedical:InferRxNorm", # "comprehendmedical:InferSNOMEDCT", # Not in SAR "comprehendmedical:ListEntitiesDetectionV2Jobs", "comprehendmedical:ListICD10CMInferenceJobs", "comprehendmedical:ListPHIDetectionJobs", "comprehendmedical:ListRxNormInferenceJobs", # "comprehendmedical:ListSNOMEDCTInferenceJobs", # Not in SAR "comprehendmedical:StartEntitiesDetectionV2Job", "comprehendmedical:StartICD10CMInferenceJob", "comprehendmedical:StartPHIDetectionJob", "comprehendmedical:StartRxNormInferenceJob", "comprehendmedical:StopEntitiesDetectionV2Job", "comprehendmedical:StopICD10CMInferenceJob", ] for action in actions: # if action not in results: # print(action) self.assertTrue(action in results) # Compute Optimizer results = get_actions_for_service("compute-optimizer") actions = [ "compute-optimizer:DeleteRecommendationPreferences", "compute-optimizer:ExportEBSVolumeRecommendations", "compute-optimizer:ExportLambdaFunctionRecommendations", "compute-optimizer:GetEffectiveRecommendationPreferences", "compute-optimizer:GetEnrollmentStatusesForOrganization", "compute-optimizer:GetLambdaFunctionRecommendations", "compute-optimizer:GetRecommendationPreferences", "compute-optimizer:PutRecommendationPreferences", ] for action in actions: self.assertTrue(action in results) # DataSync results = get_actions_for_service("datasync") actions = [ "datasync:UpdateLocationNfs", "datasync:UpdateLocationObjectStorage", "datasync:UpdateLocationSmb", "datasync:UpdateTaskExecution" ] for action in actions: self.assertTrue(action in results) # Account Management results = get_actions_for_service("account") actions = [ "account:DeleteAlternateContact", "account:GetAlternateContact", "account:PutAlternateContact", ] for action in actions: self.assertTrue(action in results) # AWS IAM Access Analyzer results = get_actions_for_service("access-analyzer") actions = [ "access-analyzer:CancelPolicyGeneration", "access-analyzer:CreateAccessPreview", "access-analyzer:GetAccessPreview", "access-analyzer:GetGeneratedPolicy", "access-analyzer:ListAccessPreviewFindings", "access-analyzer:ListAccessPreviews", "access-analyzer:ListPolicyGenerations", "access-analyzer:StartPolicyGeneration", "access-analyzer:ValidatePolicy", ] for action in actions: self.assertTrue(action in results) # Elemental Activations results = get_actions_for_service("elemental-activations") actions = [ "elemental-activations:CompleteAccountRegistration", "elemental-activations:StartAccountRegistration" ] for action in actions: self.assertTrue(action in results) # OpenSearch results = get_actions_for_service("es") actions = [ "es:DescribeDomainChangeProgress", ] for action in actions: self.assertTrue(action in results) # Location results = get_actions_for_service("geo") actions = [ "geo:CalculateRouteMatrix", ] for action in actions: self.assertTrue(action in results) # Amazon Managed Grafana results = get_actions_for_service("grafana") actions = [ "grafana:DescribeWorkspaceAuthentication", "grafana:UpdateWorkspaceAuthentication", ] for action in actions: self.assertTrue(action in results) # EC2 Image Builder results = get_actions_for_service("imagebuilder") actions = [ "imagebuilder:ImportVmImage", ] for action in actions: self.assertTrue(action in results) # Timestream results = get_actions_for_service("timestream") actions = [ "timestream:CreateScheduledQuery", "timestream:DeleteScheduledQuery", "timestream:DescribeScheduledQuery", "timestream:ExecuteScheduledQuery", "timestream:ListScheduledQueries", "timestream:UpdateScheduledQuery", ] for action in actions: self.assertTrue(action in results) # AWS Transfer Family results = get_actions_for_service("transfer") actions = [ "transfer:CreateAccess", "transfer:CreateWorkflow", "transfer:DeleteAccess", "transfer:DeleteWorkflow", "transfer:DescribeAccess", "transfer:DescribeExecution", "transfer:DescribeWorkflow", "transfer:ListAccesses", "transfer:ListExecutions", "transfer:ListWorkflows", "transfer:SendWorkflowStepState", "transfer:UpdateAccess", ] for action in actions: self.assertTrue(action in results)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def actions() -> None:\n pass", "def actions():\n pass", "def test_10_unsupported_actions(self):\n\n def __count_pulled_packages(pth):\n self.pkgrepo(\"list -F tsv -H -s {0}\".format(pth))\n return len(self.output.splitlines())\n\n ...
[ "0.67267066", "0.65667135", "0.64318216", "0.6355506", "0.6210274", "0.6148939", "0.6109755", "0.60450846", "0.60199803", "0.5994511", "0.5994511", "0.59711343", "0.5918675", "0.591639", "0.59116274", "0.5877087", "0.5877087", "0.5775038", "0.57616264", "0.5738314", "0.571399...
0.60793984
7
Ensure that kafka actions are not overwritten in the IAM definition
def test_kafka_action_names_overlap_issue(self): # Kafka actions used to be in two pages but are now one. This verifies the current state. # results = get_actions_for_service("kafka") # print(results) actions = [ "kafka:BatchAssociateScramSecret", "kafka:BatchDisassociateScramSecret", "kafka:CreateClusterV2", "kafka:DeleteConfiguration", "kafka:DescribeClusterV2", "kafka:ListClustersV2", "kafka:ListConfigurationRevisions", "kafka:ListKafkaVersions", "kafka:ListScramSecrets", "kafka:RebootBroker", "kafka:UpdateBrokerType", "kafka:UpdateConfiguration", "kafka:UpdateConnectivity", "kafka:UpdateSecurity" ] for action in actions: self.assertTrue(action in self.all_actions)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def legal_actions(self):\n raise NotImplementedError", "def get_actions(self, request):\n actions = super().get_actions(request)\n if not settings.PUBLISHER_CODE:\n del actions['create_cwr']\n if 'delete_selected' in actions:\n del actions['delete_selected']\n ...
[ "0.57733667", "0.5674129", "0.5623118", "0.55654687", "0.54859173", "0.5416407", "0.53971905", "0.5386766", "0.5313959", "0.5262459", "0.52369434", "0.5227309", "0.5218352", "0.5207143", "0.51959413", "0.51855755", "0.5174657", "0.51633334", "0.51326454", "0.51265115", "0.506...
0.67827463
0
1. Maintain a decreasing stack by scanning nums from left to right. 2. Then scan the nums from right to left and calculate the maxWidth between each ramp.
def maxWidthRamp(self, nums: list[int]) -> int: maxWidth = 0 descStack = [] # Generate decreasing stack. for i, num in enumerate(nums): if not descStack or nums[descStack[-1]] > num: descStack.append(i) # Check elements from right to left. for j in reversed(range(len(nums))): while descStack and nums[descStack[-1]] <= nums[j]: maxWidth = max(maxWidth, j - descStack.pop()) return maxWidth
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def peg_width_per_levels(base_width):\n limiter = 2\n decrementer = -2\n decrementing_width = int(base_width)\n peg_count_per_level = []\n while decrementing_width >= limiter:\n peg_count_per_level.append(int(decrementing_width))\n decrementing_width += decrementer\n return peg_coun...
[ "0.6181188", "0.61154115", "0.61118263", "0.58728755", "0.5702233", "0.5701698", "0.5680999", "0.55414397", "0.553379", "0.5523938", "0.55195713", "0.5513929", "0.5513566", "0.550096", "0.54771763", "0.5460938", "0.5459851", "0.5444232", "0.5441669", "0.54264915", "0.54041374...
0.8285866
0
A little function to make graphing less of a pain. Creates a plot with titles and axis labels. Adds a new line to a blank figure and labels it.
def plothusly(ax, x, y, *, xtitle='', ytitle='', datalabel='', title='', linestyle = '-', marker = ''): ax.set_xlabel(xtitle) ax.set_ylabel(ytitle) ax.set_title(title) out = ax.plot(x, y, zorder=1, label=datalabel, linestyle = linestyle, marker = marker) return out
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def newGraph(self, xlab, ylab):\r\n if (not self.doMPL):\r\n newGraph = Gnuplot(debug=0)\r\n\t #newGraph('set data style linespoints')\r\n\t newGraph.set_label('xlabel', xlab)\r\n\t newGraph.set_label('ylabel', ylab)\r\n return newGraph\r\n else:\r\n self.mplFigCou...
[ "0.73383814", "0.726306", "0.71619123", "0.7137723", "0.70683104", "0.70184547", "0.69500715", "0.6938889", "0.6920055", "0.69195294", "0.6891798", "0.67042345", "0.6702502", "0.66961604", "0.6673671", "0.66625386", "0.66425776", "0.66200614", "0.66146296", "0.6595933", "0.65...
0.6106391
57
A little function to make graphing less of a pain Adds a new line to a blank figure and labels it
def plothus(ax, x, y, *, datalabel='', linestyle = '-', marker = ''): out = ax.plot(x, y, zorder=1, label=datalabel, linestyle = linestyle, marker = marker) return out
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_graph(self, x, y, label):\n pyplot.plot(x[:len(y)], y[:len(x)], label=label)", "def line_graph():\r\n #create the data in an array\r\n xval = np.arange(0,6,(np.pi*(1./10)))\r\n yval = np.cos(xval)\r\n data = np.array([xval,yval])\r\n data = data.transpose()\r\n y = np.arange(-1,1...
[ "0.6918902", "0.6715607", "0.66300875", "0.6610081", "0.6605", "0.65108675", "0.6509052", "0.6483655", "0.64417446", "0.6436465", "0.64165217", "0.64094067", "0.6303501", "0.6279875", "0.62422055", "0.6156551", "0.61285985", "0.6121611", "0.6119316", "0.61107874", "0.61096317...
0.5919027
36
Given an input (instance of the BenchInput tuple), constructs and validates a disjunctive ChaumPedersen proof, returning the time (in seconds) to do each operation.
def chaum_pedersen_bench(bi: BenchInput) -> Tuple[float, float]: (keypair, r, s) = bi ciphertext = get_optional(elgamal_encrypt(0, r, keypair.public_key)) start1 = timer() proof = make_disjunctive_chaum_pedersen_zero( ciphertext, r, keypair.public_key, ONE_MOD_Q, s ) end1 = timer() valid = proof.is_valid(ciphertext, keypair.public_key, ONE_MOD_Q) end2 = timer() if not valid: raise Exception("Wasn't expecting an invalid proof during a benchmark!") return end1 - start1, end2 - end1
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def part2(input):\n ps = PlanetSystem(input)\n c = ps.total_cycle_time()\n return c", "def part_2():\n input_ = parse_input() + list(range(10, 1_000_001))\n cups = turn_input_into_cups(input_)\n cups = solve(cups, first_cup=cups[input_[0]], turns=10_000_000)\n\n return cups[1].next.number * ...
[ "0.57208085", "0.54804933", "0.5131725", "0.50099516", "0.49980843", "0.49875277", "0.4948482", "0.49447292", "0.49406472", "0.4887707", "0.4879715", "0.48475006", "0.48428223", "0.4833638", "0.48149478", "0.48010787", "0.4800391", "0.4798947", "0.47890905", "0.47886792", "0....
0.60869694
0
Placeholder function used just to warm up the parallel mapper prior to benchmarking.
def identity(x: int) -> int: return x
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def warmup():\n return ''", "def warmup():\n return ''", "def warmup():\n return ''", "def parallelizer(func, arg=False):\n if arg:\n func(arg)\n else:\n func()", "def pfmap(func, workers=8):\n return fmap(func)", "def mapper(fun: Callable[[str], Pin], /) -> None:", ...
[ "0.58914787", "0.58914787", "0.58914787", "0.58523786", "0.58267385", "0.5818383", "0.5795961", "0.5795961", "0.5604408", "0.5542775", "0.5542228", "0.54952943", "0.5452233", "0.5393733", "0.5384684", "0.5382945", "0.5378095", "0.5368914", "0.53356403", "0.52627987", "0.52194...
0.0
-1
Average of a list of numbers
def average(l: List[float]) -> float: n = len(l) if n == 0: return 0 return sum(l) / n
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def number_list_average(numbers):\n return sum(numbers)/len(numbers)", "def avg(list):\n return sum(list) / len(list)", "def avg(l):\n return (sum(l)/float(len(l)))", "def mean(num_list):\n i = 0\n num_sum = 0.0\n for item in num_list:\n num_sum += item\n i...
[ "0.88870585", "0.848456", "0.8434734", "0.8398049", "0.83776134", "0.8336529", "0.8316348", "0.83155566", "0.82814676", "0.8269709", "0.8267124", "0.8266494", "0.8249835", "0.82441944", "0.8243413", "0.8158804", "0.815238", "0.81390256", "0.80972695", "0.80799866", "0.8048972...
0.8089239
19
Standard deviation of a list of numbers
def std(l: List[float]) -> float: n = len(l) if n == 0: return 0 avg = average(l) return sqrt(sum([(avg - i) * (avg - i) for i in l]))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def standard_deviation(xs: List[float]) -> float:\n return math.sqrt(variance(xs))", "def standard_deviation(xs: List[float]) -> float:\n return math.sqrt(variance(xs))", "def standard_deviation(list):\n num_items = len(list)\n mean = sum(list) / num_items\n differences = [x - mean for x in list...
[ "0.85239553", "0.85239553", "0.82795185", "0.8250837", "0.811858", "0.8115698", "0.81116855", "0.80819494", "0.80761546", "0.8022738", "0.79791003", "0.7964581", "0.7951416", "0.7936604", "0.79159623", "0.7855638", "0.7839232", "0.7821115", "0.7801466", "0.7798936", "0.779461...
0.7768153
21
Test of function choosing if log rotation is needed
def test_need_to_rotate_log(self): self.assertTrue(need_to_rotate_log(0, 20, 'daily', 15, 'daily'), 'rotate log by time') self.assertFalse(need_to_rotate_log(10, 20, 'daily', 15, 'hourly'), 'do not rotate log by time') self.assertTrue(need_to_rotate_log(10, 20, 'daily', 25, None), 'rotate log by max size') self.assertFalse(need_to_rotate_log(10, 20, 'hourly', 5, 'hourly'), 'do not rotate log by min size')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_log_rotation(self):\n self.conveyer.execute(self.conveyer.log(\"{message: \\\"first\\\"}\"))\n self.conveyer.execute(self.conveyer.log(\"{message: \\\"second\\\"}\"))\n self.conveyer.execute(self.conveyer.log(\"{message: \\\"third\\\"}\"))\n filename = self.conveyer.rotate_logs...
[ "0.62220013", "0.61344224", "0.58783966", "0.58642936", "0.5808232", "0.5750104", "0.5715419", "0.5713662", "0.5678804", "0.56212765", "0.55951023", "0.5573171", "0.5567551", "0.5551365", "0.55413747", "0.5514498", "0.5495516", "0.5494864", "0.54912615", "0.5475333", "0.54750...
0.7284713
0
Test of conversion human like file size units to integer
def test_human_size_units_to_base(self): self.assertEqual(human_size_units_to_base(1), 1) self.assertEqual(human_size_units_to_base('1'), 1) self.assertEqual(human_size_units_to_base('1b'), 1) self.assertEqual(human_size_units_to_base('1k'), 1000) self.assertEqual(human_size_units_to_base('1kb'), 1000) self.assertEqual(human_size_units_to_base('1kib'), 1024) self.assertEqual(human_size_units_to_base('1KiB'), 1024) self.assertEqual(human_size_units_to_base('1KiB 1b'), 1025) self.assertEqual(human_size_units_to_base('1M'), 1000000) self.assertEqual(human_size_units_to_base('1Mi'), 1024*1024) self.assertEqual(human_size_units_to_base('1G'), 1000000000) self.assertEqual(human_size_units_to_base('1T'), 1000000000000) self.assertRaisesRegex(ValueError, 'Bad unit "a" in size parameter "1a".', human_size_units_to_base, '1a')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _file_scale(fn):\n s = utils.file_word(fn)\n try:\n n = int(s, 0)\n except ValueError:\n n = float(s)\n return n", "def test_human_size(self):\n\n human_size = pyCompressor.human_size\n\n self.assertEqual(human_size(2), \"2.0 bytes\")\n self.assertEqual(human_si...
[ "0.7208111", "0.70847607", "0.70484537", "0.7023813", "0.692497", "0.6893805", "0.6853747", "0.6853747", "0.68404585", "0.6745593", "0.66911465", "0.66774565", "0.6614509", "0.6607619", "0.659698", "0.65692264", "0.6559572", "0.6522625", "0.6506409", "0.65063375", "0.647743",...
0.6409324
27
Tests of try rotation without configuration
def test_process_log_without_configuration(self): with mock.patch('sys.stdout', new=io.StringIO()) as fake_stdout: compressors = process_log( datetime.datetime(year=2019, month=1, day=10, hour=21, minute=30), {}, 'hourly', '/tmp/pokus.log', 10 ) self.assertEqual(compressors, []) self.assertEqual(fake_stdout.getvalue(), 'Checking "/tmp/pokus.log"... rotation not needed.\n')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_rotated(self):\n self._calibration_test(\"rotated\")", "def test_skel_rotation_fail(self):\n cmds.file(f=1, new=1)\n cmds.mayaUSDImport(file=self.skel_file, ani=1)\n\n values = cmds.keyframe('joint1.rx', q=1, vc=1)\n self.assertNotAlmostEqual(0.0, values[-1])", "def...
[ "0.7053024", "0.6514648", "0.6510062", "0.64673233", "0.6354083", "0.6268674", "0.62312365", "0.6228066", "0.6227956", "0.6155046", "0.6153381", "0.61482006", "0.6102702", "0.6070507", "0.60431606", "0.6037593", "0.60291284", "0.60207057", "0.59471196", "0.593792", "0.5898857...
0.0
-1
Tests of try rotation without configuration
def test_process_log_without_target_configuration(self): with mock.patch('sys.stdout', new=io.StringIO()) as fake_stdout: compressors = process_log( datetime.datetime(year=2019, month=1, day=10, hour=21, minute=30), {'max_size': 0}, 'hourly', '/tmp/pokus.log', 10 ) self.assertEqual(compressors, []) self.assertEqual(fake_stdout.getvalue(), 'Checking "/tmp/pokus.log"... missing target in configuration.\n')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_rotated(self):\n self._calibration_test(\"rotated\")", "def test_skel_rotation_fail(self):\n cmds.file(f=1, new=1)\n cmds.mayaUSDImport(file=self.skel_file, ani=1)\n\n values = cmds.keyframe('joint1.rx', q=1, vc=1)\n self.assertNotAlmostEqual(0.0, values[-1])", "def...
[ "0.7053024", "0.6514648", "0.6510062", "0.64673233", "0.6354083", "0.6268674", "0.62312365", "0.6228066", "0.6227956", "0.6155046", "0.6153381", "0.61482006", "0.6102702", "0.6070507", "0.60431606", "0.6037593", "0.60291284", "0.60207057", "0.59471196", "0.593792", "0.5898857...
0.0
-1
Tests of try rotation with ignore in configuration
def test_process_log_with_ignore_in_configuration(self): with mock.patch('sys.stdout', new=io.StringIO()) as fake_stdout: compressors = process_log( datetime.datetime(year=2019, month=1, day=10, hour=21, minute=30), {'ignore': True}, 'hourly', '/tmp/pokus.log', 10 ) self.assertEqual(compressors, []) self.assertEqual(fake_stdout.getvalue(), '')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_rotated(self):\n self._calibration_test(\"rotated\")", "def test_need_to_rotate_log(self):\n self.assertTrue(need_to_rotate_log(0, 20, 'daily', 15, 'daily'), 'rotate log by time')\n self.assertFalse(need_to_rotate_log(10, 20, 'daily', 15, 'hourly'), 'do not rotate log by time')\n ...
[ "0.6419533", "0.63577354", "0.59528947", "0.5869784", "0.58633524", "0.5819878", "0.57872254", "0.5771617", "0.5742969", "0.57247704", "0.57068", "0.5701424", "0.5659168", "0.555079", "0.5517062", "0.5510512", "0.54531544", "0.5388795", "0.53871274", "0.5342193", "0.53315157"...
0.0
-1
Tests of try rotation with min_size in configuration
def test_process_log_with_min_size_in_configuration(self): with tempfile.TemporaryDirectory() as sandbox: with mock.patch('sys.stdout', new=io.StringIO()) as fake_stdout: srcfile = Path(sandbox, 'pokus.log') srcfile.touch() destfile = Path(sandbox, 'backup', 'pokus.log') compressors = process_log( datetime.datetime(year=2019, month=1, day=10, hour=21, minute=30), {'target': '{{path}}/backup/{{name}}.{{ext}}', 'interval': 'hourly', 'min_size': 15}, 'hourly', str(srcfile), 10 ) self.assertEqual(compressors, []) self.assertTrue(srcfile.exists()) self.assertFalse(destfile.exists()) self.assertEqual(fake_stdout.getvalue(), 'Checking "{src}"... rotation not needed.\n'.format(src=srcfile))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _optimise_rotation(self):\n logger.info(\n f\"Minimising dimer rotation up to \"\n f'δϕ = {self.phi_tol.to(\"degrees\"):.4f}º'\n )\n\n for i in range(self._ratio_rot_iters):\n\n result = self._rotate()\n\n if (\n result == _StepRes...
[ "0.5948658", "0.5849822", "0.5711593", "0.5711048", "0.5682156", "0.565054", "0.563231", "0.56153196", "0.5595372", "0.55438685", "0.55225056", "0.54687154", "0.5414168", "0.5409983", "0.5369747", "0.5347305", "0.5310389", "0.52922046", "0.5289117", "0.52687037", "0.52551305"...
0.54828143
11
Tests of try rotation with target and interval in configuration
def test_process_log_with_target_in_configuration(self): with tempfile.TemporaryDirectory() as sandbox: with mock.patch('sys.stdout', new=io.StringIO()) as fake_stdout: srcfile = Path(sandbox, 'pokus.log') srcfile.touch() destfile = Path(sandbox, 'backup', 'pokus-20190110-2130.log') compressors = process_log( datetime.datetime(year=2019, month=1, day=10, hour=21, minute=30), {'target': '{{path}}/backup/{{name}}-%Y%m%d-%H%M.{{ext}}', 'interval': 'hourly'}, 'hourly', str(srcfile), 10 ) self.assertEqual(compressors, []) self.assertFalse(srcfile.exists()) self.assertTrue(destfile.exists()) self.assertEqual(fake_stdout.getvalue(), 'Checking "{src}"... rotating... "{src}" -> "{dest}" done.\n'.format(src=srcfile, dest=destfile))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_arbitrary_rotation(self):\n \n # This test is run a bunch of times on various intervals, ranging from 50% to 1/6\n\t\t# (16.667%).\n for i in range(2, 7):\n \n interval = 1 / i # The amount to increase each qubit's probability by, relative to the previous qubi...
[ "0.6687461", "0.6452325", "0.6416947", "0.62044525", "0.6084357", "0.5957329", "0.5694046", "0.56862086", "0.56001717", "0.556808", "0.5566651", "0.5540372", "0.5497633", "0.54841346", "0.54720324", "0.54684687", "0.5468463", "0.5463228", "0.53882957", "0.53821856", "0.535683...
0.0
-1
Tests of try rotation with relative target and interval in configuration
def test_process_log_with_relative_target_in_configuration(self): with tempfile.TemporaryDirectory() as sandbox: with mock.patch('sys.stdout', new=io.StringIO()) as fake_stdout: srcfile = Path(sandbox, 'pokus.log') srcfile.touch() destfile = Path(sandbox, 'backup', 'pokus.log') compressors = process_log( datetime.datetime(year=2019, month=1, day=10, hour=21, minute=30), {'target': 'backup/{{name}}.{{ext}}', 'interval': 'hourly'}, 'hourly', str(srcfile), 10 ) self.assertEqual(compressors, []) self.assertFalse(srcfile.exists()) self.assertTrue(destfile.exists()) self.assertEqual(fake_stdout.getvalue(), 'Checking "{src}"... rotating... "{src}" -> "{dest}" done.\n'.format(src=srcfile, dest=Path('backup', 'pokus.log')))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_arbitrary_rotation(self):\n \n # This test is run a bunch of times on various intervals, ranging from 50% to 1/6\n\t\t# (16.667%).\n for i in range(2, 7):\n \n interval = 1 / i # The amount to increase each qubit's probability by, relative to the previous qubi...
[ "0.6849133", "0.66332906", "0.64928585", "0.63230497", "0.621054", "0.6208657", "0.6164278", "0.59979725", "0.5994086", "0.5906198", "0.5899471", "0.5757551", "0.5706605", "0.5610273", "0.56079453", "0.560449", "0.56034946", "0.5596018", "0.55945164", "0.55886596", "0.5548625...
0.0
-1
Tests of try rotation with compress in configuration
def test_process_log_with_compress_in_configuration(self): with tempfile.TemporaryDirectory() as sandbox: with mock.patch('sys.stdout', new=io.StringIO()) as fake_stdout: srcfile = Path(sandbox, 'pokus.log') srcfile.touch() destfile = Path(sandbox, 'backup', 'pokus.log') compressors = process_log( datetime.datetime(year=2019, month=1, day=10, hour=21, minute=30), { 'target': '{{path}}/backup/{{name}}.{{ext}}', 'interval': 'hourly', 'compress': 'gzip -9' }, 'hourly', str(srcfile), 10 ) self.assertEqual(compressors, [[sandbox, 'gzip', '-9', str(destfile)]]) self.assertFalse(srcfile.exists()) self.assertTrue(destfile.exists()) self.assertEqual(fake_stdout.getvalue(), 'Checking "{src}"... rotating... "{src}" -> "{dest}" done.\n'.format(src=srcfile, dest=destfile))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_compress_works(self):\n tau = 45.0\n mrate = 60.0\n Mrate = 100.0\n gain = 5\n\n tmax = 50.0\n dt = 0.2\n\n self.rule.tau = tau\n self.rule.min_rate = mrate\n self.rule.max_rate = Mrate\n self.rule.compress_rates = False\n self.rule.gain = gain\n\n self.motor.error_fct ...
[ "0.63947666", "0.61248237", "0.6114381", "0.6083236", "0.6082104", "0.6062771", "0.59740895", "0.59541243", "0.5930159", "0.5895802", "0.57862955", "0.5755226", "0.5731432", "0.56462026", "0.56308395", "0.5578965", "0.5572358", "0.55638254", "0.5525263", "0.5516631", "0.55155...
0.62119424
1
Tests of try rotation with exec_pre in configuration
def test_process_log_with_exec_pre_in_configuration(self): with tempfile.TemporaryDirectory() as sandbox: with mock.patch('sys.stderr', new=io.StringIO()) as fake_stderr: with mock.patch('sys.stdout', new=io.StringIO()) as fake_stdout: stream_handler = logging.StreamHandler(fake_stderr) logging.getLogger().addHandler(stream_handler) try: srcfile = Path(sandbox, 'pokus.log') srcfile.touch() destfile = Path(sandbox, 'backup', 'pokus.log') compressors = process_log( datetime.datetime(year=2019, month=1, day=10, hour=21, minute=30), { 'target': '{{path}}/backup/{{name}}.{{ext}}', 'interval': 'hourly', 'compress': 'bzip2', 'exec_pre': '/bin/false' }, 'hourly', str(srcfile), 10 ) finally: logging.getLogger().removeHandler(stream_handler) self.assertEqual(compressors, []) self.assertTrue(srcfile.exists()) self.assertFalse(destfile.exists()) self.assertEqual(fake_stdout.getvalue(), 'Checking "{src}"... exec_pre failed.\n'.format(src=srcfile)) self.assertEqual(fake_stderr.getvalue(), 'exec_pre "/bin/false pokus.log" failed with code 1\n')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_relaunch_deployment_run(self):\n pass", "def test_retest_deployment_run(self):\n pass", "def test_retry_run(self):\n pass", "def test_pre_hooks(self):\n os.makedirs('/tmp/localhost/pacha_pre')\n touch_script = open('/tmp/localhost/pacha_pre/foo.sh', 'w')\n t...
[ "0.62804943", "0.62302554", "0.5996128", "0.59816766", "0.58999544", "0.57469875", "0.5720267", "0.5683897", "0.5645453", "0.55972165", "0.5577958", "0.55499387", "0.54937345", "0.54752177", "0.5470032", "0.53967464", "0.5395424", "0.53703004", "0.5367641", "0.53619397", "0.5...
0.6106966
2
Tests of try rotation with exec_post in configuration
def test_process_log_with_exec_post_in_configuration(self): with tempfile.TemporaryDirectory() as sandbox: with mock.patch('sys.stderr', new=io.StringIO()) as fake_stderr: with mock.patch('sys.stdout', new=io.StringIO()) as fake_stdout: stream_handler = logging.StreamHandler(fake_stderr) logging.getLogger().addHandler(stream_handler) try: srcfile = Path(sandbox, 'pokus.log') srcfile.touch() destfile = Path(sandbox, 'backup', 'pokus.log') compressors = process_log( datetime.datetime(year=2019, month=1, day=10, hour=21, minute=30), { 'target': '{{path}}/backup/{{name}}.{{ext}}', 'interval': 'hourly', 'compress': 'bzip2', 'exec_post': '/bin/false' }, 'hourly', str(srcfile), 10 ) finally: logging.getLogger().removeHandler(stream_handler) self.assertEqual(compressors, []) self.assertFalse(srcfile.exists()) self.assertTrue(destfile.exists()) self.assertEqual(fake_stdout.getvalue(), 'Checking "{src}"... rotating... "{src}" -> "{dest}" exec_post failed.\n'.format(src=srcfile, dest=destfile)) self.assertEqual(fake_stderr.getvalue(), 'exec_post "/bin/false {dest}" failed with code 1\n'.format(dest=destfile))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_relaunch_deployment_run(self):\n pass", "def test_retest_deployment_run(self):\n pass", "def test_retry_run(self):\n pass", "def test_redeploy(self):\n pass", "def test_encrypt_creates_and_restores_backup(\n self,\n mock_os,\n mock_shutil,\n ...
[ "0.642584", "0.63849103", "0.63836694", "0.5834038", "0.5756419", "0.56373674", "0.56172615", "0.5577163", "0.55671585", "0.5550985", "0.5545134", "0.5540187", "0.55376", "0.5514732", "0.5497733", "0.5491249", "0.54792863", "0.54780215", "0.5450571", "0.543589", "0.5422628", ...
0.61695516
3
Tests of try rotation with positive pre and post exec in configuration
def test_process_log_with_pre_and_post_in_configuration(self): with tempfile.TemporaryDirectory() as sandbox: with mock.patch('sys.stdout', new=io.StringIO()) as fake_stdout: srcfile = Path(sandbox, 'pokus.log') srcfile.touch() destfile = Path(sandbox, 'backup', 'pokus.log') compressors = process_log( datetime.datetime(year=2019, month=1, day=10, hour=21, minute=30), { 'target': '{{path}}/backup/{{name}}.{{ext}}', 'interval': 'hourly', 'compress': 'gzip -9', 'exec_pre': '/bin/true', 'exec_post': '/bin/true' }, 'hourly', str(srcfile), 10 ) self.assertEqual(compressors, [[sandbox, 'gzip', '-9', str(destfile)]]) self.assertFalse(srcfile.exists()) self.assertTrue(destfile.exists()) self.assertEqual(fake_stdout.getvalue(), 'Checking "{src}"... rotating... "{src}" -> "{dest}" done.\n'.format(src=srcfile, dest=destfile))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_need_to_rotate_log(self):\n self.assertTrue(need_to_rotate_log(0, 20, 'daily', 15, 'daily'), 'rotate log by time')\n self.assertFalse(need_to_rotate_log(10, 20, 'daily', 15, 'hourly'), 'do not rotate log by time')\n self.assertTrue(need_to_rotate_log(10, 20, 'daily', 25, None), 'rotat...
[ "0.60960656", "0.6015227", "0.6008511", "0.5992133", "0.55971855", "0.553978", "0.5513998", "0.55035573", "0.54847586", "0.5416907", "0.5409339", "0.5401802", "0.5393093", "0.5367785", "0.53527", "0.53498495", "0.5327782", "0.53196454", "0.5276392", "0.5270697", "0.52541757",...
0.5355824
14
Tests of try rotation with target exists
def test_process_log_with_target_exists(self): with tempfile.TemporaryDirectory() as sandbox: with mock.patch('sys.stdout', new=io.StringIO()) as fake_stdout: srcfile = Path(sandbox, 'pokus.log') srcfile.touch() destfile = Path(sandbox, 'backup', 'pokus.log') destfile.mkdir(parents=True) compressors = process_log( datetime.datetime(year=2019, month=1, day=10, hour=21, minute=30), { 'target': '{{path}}/backup/{{name}}.{{ext}}', 'interval': 'hourly', 'compress': 'gzip -9', }, 'hourly', str(srcfile), 10 ) self.assertEqual(compressors, []) self.assertTrue(srcfile.exists()) self.assertTrue(destfile.exists()) self.assertEqual(fake_stdout.getvalue(), 'Checking "{src}"... rotating... "{src}" -> "{dest}" target already exists!\n'.format(src=srcfile, dest=destfile))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_skel_rotation_fail(self):\n cmds.file(f=1, new=1)\n cmds.mayaUSDImport(file=self.skel_file, ani=1)\n\n values = cmds.keyframe('joint1.rx', q=1, vc=1)\n self.assertNotAlmostEqual(0.0, values[-1])", "def check_rotation_fault(self, current_pos, target_pos):\n \n f...
[ "0.63469565", "0.633046", "0.5920605", "0.58786756", "0.58433646", "0.58242995", "0.57923526", "0.5692897", "0.5678962", "0.5639551", "0.55446136", "0.5513295", "0.5480883", "0.5480366", "0.5454172", "0.5421362", "0.5417596", "0.54171485", "0.53870094", "0.53733045", "0.53608...
0.5124146
41
Tests of try rotation with OS error while file move
def test_process_log_with_os_error_at_move(self): with tempfile.TemporaryDirectory() as sandbox: with mock.patch('sys.stdout', new=io.StringIO()) as fake_stdout: with self.assertLogs() as logger: srcfile = Path(sandbox, 'pokus.log') srcfile.touch() destpath = Path(sandbox, 'backup') destpath.touch() compressors = process_log( datetime.datetime(year=2019, month=1, day=10, hour=21, minute=30), { 'target': '{{path}}/backup/{{name}}.{{ext}}', 'interval': 'hourly', 'compress': 'gzip -9', }, 'hourly', str(srcfile), 10 ) self.assertEqual(compressors, []) self.assertTrue(srcfile.exists()) self.assertEqual(fake_stdout.getvalue(), 'Checking "{src}"... rotating... '.format(src=srcfile)) self.assertIn("FileExistsError: [Errno 17] File exists: '{}'".format(destpath), logger.output[0])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_rotatePermissionFileNotOk(self):\n log = logfile.DailyLogFile(self.name, self.dir)\n self.addCleanup(log.close)\n\n os.chmod(log.path, 0o444)\n previousFile = log._file\n log.rotate()\n self.assertEqual(previousFile, log._file)", "def test_move_badtgzfile(self):...
[ "0.684734", "0.6769702", "0.67119235", "0.6446284", "0.6391524", "0.6390081", "0.6323381", "0.6310103", "0.62937844", "0.6224313", "0.6208974", "0.6208974", "0.61933196", "0.619122", "0.6079188", "0.6045162", "0.598325", "0.59720737", "0.5965603", "0.5938034", "0.58546984", ...
0.6581009
3
Test get_spec_config on empty conf
def test_get_spec_config_empty(self): spec_conf = get_spec_config({}, '') self.assertEqual(spec_conf, {})
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_get_spec_config_defaults(self):\n spec_conf = get_spec_config({\n 'defaults': {\n 'foo': 'bar'\n }\n }, '')\n self.assertEqual(spec_conf, {'foo': 'bar'})", "def test_config_spec(self):\n spec = self._gen.config_spec()\n self.assertIn('Numbe...
[ "0.76028246", "0.7311133", "0.72477543", "0.7058012", "0.69974715", "0.69667923", "0.68360406", "0.6766845", "0.67532086", "0.6709966", "0.6709966", "0.66910833", "0.66852343", "0.6642581", "0.6638022", "0.6632264", "0.66242176", "0.661909", "0.661713", "0.6613062", "0.660850...
0.84538144
0
Test get_spec_config on conf with defaults
def test_get_spec_config_defaults(self): spec_conf = get_spec_config({ 'defaults': { 'foo': 'bar' } }, '') self.assertEqual(spec_conf, {'foo': 'bar'})
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_get_spec_config_match(self):\n spec_conf = get_spec_config({\n 'defaults': {\n 'default_foo': 'default_bar',\n 'foo': 'bar'\n },\n 'specific': [\n {'mask': ['filenomatch'], 'foo': 'bar_nomatch'},\n {'mask':...
[ "0.77164143", "0.7436098", "0.7076511", "0.7013907", "0.6881537", "0.6805723", "0.678276", "0.6729274", "0.6729274", "0.66931385", "0.66794413", "0.6565056", "0.6563163", "0.65287656", "0.6503387", "0.64884305", "0.64868605", "0.6484734", "0.6454509", "0.6409027", "0.63939387...
0.8287769
0
Test get_spec_config on matching conf
def test_get_spec_config_match(self): spec_conf = get_spec_config({ 'defaults': { 'default_foo': 'default_bar', 'foo': 'bar' }, 'specific': [ {'mask': ['filenomatch'], 'foo': 'bar_nomatch'}, {'mask': ['filematch'], 'foo': 'match'}, {'mask': ['filenomatch2'], 'foo': 'bar_nomatch2'} ] }, 'filematch') self.assertEqual(spec_conf, {'default_foo': 'default_bar', 'foo': 'match', 'mask': ['filematch']})
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_config_spec(self):\n spec = self._gen.config_spec()\n self.assertIn('Number of examples', spec)\n self.assertIn('Maximum number of columns to change', spec)\n self.assertIn('Regression threshold', spec)\n self.assertIn('Prediction key', spec)", "def test_get_spec_config_defaults(self):\n ...
[ "0.7582333", "0.67213416", "0.6621223", "0.66077214", "0.6536261", "0.65335846", "0.64342177", "0.6432759", "0.6411313", "0.6303535", "0.62789136", "0.6260329", "0.62516683", "0.6249136", "0.6229848", "0.6223742", "0.6223742", "0.62210363", "0.61939514", "0.617981", "0.615186...
0.8165602
0
Check that given modifier name is valid one. If not raise exception based on violation.
def _isValidModifier(self, modifiers, modifierName): if Modifiers.ILLEGAL_MODIFIER_PATTER.search(modifierName): msg = ('Modifier named "{0}" in sheet {1} contains illegal characters. ' 'Supported characters are a to z, A to Z, 0 to 9 and underscore "_". ' 'Spaces are not allowed characters, use underscore instead. For example ' '"some_mod".' ).format(modifierName, MODIFIER_LIST_SHEET_NAME) raise errors.UnsupportedCharacter(MODIFIER_LIST_SHEET_NAME, msg) if modifierName in map(lambda mod: mod.name, modifiers): msg = ('Modifier named "{0}" already exists in the sheet {1}. ' 'Modifier names must be unique. To fix remove or rename ' 'duplicates.' ).format(modifierName, MODIFIER_LIST_SHEET_NAME) raise errors.DuplicateError(MODIFIER_LIST_SHEET_NAME, msg)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def validateName(name):\r\n if not name:\r\n raise IllegalName('Name can not be an empty string.')\r\n\r\n m = _NAME_RE.match(name)\r\n\r\n if m is None or m.group(0) != name:\r\n raise IllegalName('Name has to start with a letter followed by an '\r\n 'arbitrary numb...
[ "0.64032656", "0.63721097", "0.6336271", "0.62860745", "0.62444806", "0.6232126", "0.6111689", "0.6085224", "0.60028106", "0.59533495", "0.5930594", "0.59233236", "0.5870704", "0.58153677", "0.5802187", "0.5795572", "0.57821715", "0.5776781", "0.5740856", "0.5735548", "0.5728...
0.78125316
0
Supported functions for the user in boolean equations.
def _evalContext(self): def xor(*args): return sum(args) == 1 def neg(result): return not result context = { 'xor': xor, 'neg': neg } return context
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def boolean_func(experiment):", "def true(func):\n return MultipleChoice(_text_from_func(func), Answer('True'), Answer('False'), is_code=True)", "def __bool__(self):\n raise RuntimeError(\"Cannot evaluate CrypTensors to boolean values\")", "def main():\n var_num = int(input(\"Please, enter the n...
[ "0.6549295", "0.6121206", "0.59864044", "0.59644145", "0.5946533", "0.5924393", "0.5879584", "0.58733624", "0.58189994", "0.58100283", "0.58097804", "0.57936394", "0.5781556", "0.5762648", "0.57621485", "0.57520574", "0.5727584", "0.57086086", "0.56952477", "0.56867784", "0.5...
0.0
-1
The process_song_data function extracts song data in JSON file format from an AWS S3 bucket(input_data), transforms data into specified tables and then loads the tables back into a designated AWS S3 bucket(output_data) in parquet file format.
def process_song_data(spark, input_data, output_data): # get filepath to song data file song_data = os.path.join(input_data, 'song_data/*/*/*/*.json') #specify schema for increased performance and control song_schema = StructType([ StructField("artist_id", StringType()), StructField("artist_latitude", DoubleType()), StructField("artist_location", StringType()), StructField("artist_longitude", StringType()), StructField("artist_name", StringType()), StructField("duration", DoubleType()), StructField("num_songs", IntegerType()), StructField("song_id", StringType()), StructField("title", StringType()), StructField("year", IntegerType()), ]) # read song data file dfs = spark.read.json(song_data, schema=song_schema) # create temporary view of table in order to run SQL queries dfs.createOrReplaceTempView("song_table") # extract columns to create songs table dim_songs = spark.sql(""" SELECT song_id, title, artist_id, year, duration FROM song_table WHERE song_id IS NOT NULL """) # write songs table to parquet files partitioned by year and artist dim_songs.write.mode('overwrite').partitionBy("year", "artist_id").parquet(output_data+"songs") # extract columns to create artists table dim_artists = spark.sql(""" SELECT DISTINCT artist_id, artist_name AS name, artist_location AS location, artist_latitude AS latitude, artist_longitude AS longitude FROM song_table WHERE artist_id IS NOT NULL """) # write artists table to parquet files dim_artists.write.mode('overwrite').parquet(output_data+"artists")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def process_song_data(spark, input_data, output_data):\n \n # get filepath to song data file\n song_data = input_data + \"song_data/*/*/*/*.json\"\n \n # read song data file\n df = spark.read.json(song_data).dropDuplicates()\n\n # extract columns to create songs table\n global songs_table\n...
[ "0.82500315", "0.82351744", "0.82207996", "0.82134485", "0.82060736", "0.8200832", "0.81907237", "0.81895614", "0.81846154", "0.8184577", "0.81579006", "0.8133783", "0.81304675", "0.8083736", "0.80220705", "0.8002966", "0.79769325", "0.7895738", "0.7668537", "0.6478855", "0.6...
0.804698
14
The process_log_data function extracts log data in JSON file format from an AWS S3 bucket(input_data), transforms data into specified tables and then loads the tables back into a designated AWS S3 bucket(output_data) in parquet file format.
def process_log_data(spark, input_data, output_data): # get filepath to log data file log_data = os.path.join(input_data, 'log_data/*/*/*.json') # read log data file dfl = spark.read.json(log_data) # filter by actions for song plays dfl = dfl.filter(dfl.page == "NextSong") #create temporary view in order to run SQL queries dfl.createOrReplaceTempView("log_table") # extract columns for users table dim_users = spark.sql(""" SELECT DISTINCT userId AS user_id, firstName AS first_name, lastName AS last_name, gender, level FROM log_table WHERE userId IS NOT NULL """) # write users table to parquet files dim_users.write.mode('overwrite').parquet(output_data+"users") # create timestamp column from original timestamp column #get_timestamp = udf() #df = # create datetime column from original timestamp column #get_datetime = udf() #df = #Convert ts field to timestamp time_convert = spark.sql(""" SELECT to_timestamp(ts/1000) as start_times FROM log_table WHERE ts IS NOT NULL """) #create temporary view of time_table to run SQL queries time_convert.createOrReplaceTempView("time_table") # extract columns to create time table dim_time = spark.sql(""" SELECT start_times as start_time, hour(start_times) as hour, dayofmonth(start_times) as day, weekofyear(start_times) as week, month(start_times) as month, year(start_times) as year, dayofweek(start_times) as weekday FROM time_table """) # write time table to parquet files partitioned by year and month dim_time.write.mode('overwrite').partitionBy("year", "month").parquet(output_data+"time") # read in song data to use for songplays table song_df = spark.read.parquet(output_data+'songs') # extract columns from joined song and log datasets to create songplays table fact_songplays = spark.sql(""" SELECT monotonically_increasing_id() as songplay_id, to_timestamp(lt.ts/1000) as start_time, month(to_timestamp(lt.ts/1000)) as month, year(to_timestamp(lt.ts/1000)) as year, lt.userId as user_id, lt.level as level, st.song_id as song_id, st.artist_id as artist_id, lt.sessionId as session_id, lt.location as location, lt.userAgent as user_agent FROM log_table lt JOIN song_table st ON lt.song = st.title AND lt.artist = st.artist_name """) # write songplays table to parquet files partitioned by year and month fact_songplays.write.mode('overwrite').partitionBy("year", "month").parquet(output_data+"songplays")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def process_log_data(spark, input_data, output_data):\n\n # get filepath to log data file\n log_data = input_data + \"log_data/2018/11/*.json\"\n\n # read log data file\n log_data_schema = StructType([\n StructField(\"artist\", StringType(), True),\n StructField(\"auth\", StringType(), Fa...
[ "0.7315748", "0.73054594", "0.6975627", "0.69641143", "0.6958745", "0.6947524", "0.6941278", "0.69293684", "0.6911164", "0.6907852", "0.686215", "0.6784478", "0.6759412", "0.6692344", "0.66841125", "0.6366269", "0.62756515", "0.6237044", "0.62061685", "0.61751795", "0.5997216...
0.66948694
13
View function that handles inserting new comments via POST data (form submission)
def post_comment_form(request): try: comment, previous_version = get_comment(request) except InvalidCommentException as e: raise parent_comment = comment.parent tree_root = parent_comment.get_root() parent_object = tree_root.content_object if not user_can_post_comment(request, comment): raise Exception("User can't create comments") if is_past_max_depth(comment): raise Exception("Max depth reached") # If the comment object (NOT the message) hasn't been saved yet... if comment._state.adding == True: comment = add_comment(comment) # Everything has checked out, so we save the new version and return the appropriate response version_form, new_version = create_new_version(request, comment) return comment
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def comments_new():\n comment = {\n \"title\": request.form.get(\"title\"),\n \"content\": request.form.get(\"content\"),\n \"playlist_id\": ObjectId(request.form.get(\"playlist._id\")),\n }\n print(comment)\n comment_id = db.comments.insert_one(comment).inserted_id\n return red...
[ "0.7209647", "0.7182928", "0.70809394", "0.70615923", "0.7031212", "0.70248646", "0.70202553", "0.6970048", "0.6925778", "0.69036263", "0.68516064", "0.68059367", "0.6770113", "0.6715247", "0.6710386", "0.67016625", "0.6607484", "0.66067475", "0.6575949", "0.65087175", "0.649...
0.70013684
7
View function that handles inserting new/editing previously existing comments via Ajax
def post_comment(request, send_signal=True): # Based on variables passed in we get the comment the user is attempting to create/edit try: comment, previous_version = get_comment(request) except InvalidCommentException as e: transaction.rollback() return JsonResponse({ 'ok': False, 'error_message': str(e), }) # Check if the user doesn't pass the appropriate permission check (on the parent_object)... # We call this on the parent comment because the comment itself may not have been saved yet (can't call .get_root on it) # TODO: Fix this for root comment? (no parent) parent_comment = comment.parent tree_root = parent_comment.get_root() parent_object = tree_root.content_object if not user_can_post_comment(request, comment): transaction.set_rollback(True) return JsonResponse({ 'ok': False, 'error_message': "You do not have permission to post this comment.", }) # Check to make sure we are not trying to save a comment "deeper" than we are allowed... if is_past_max_depth(comment): transaction.set_rollback(True) return JsonResponse({ 'ok': False, 'error_message': "You cannot respond to this comment.", }) # If the comment object (NOT the message) hasn't been saved yet... if comment._state.adding == True: comment = add_comment(comment) # Now that we have a comment object, we get a 'lock' on it to prevent a race condition try: lock_comment(comment) except DatabaseError: transaction.set_rollback(True) # Someone is already trying to update this comment, so we need to return an appropriate error return JsonResponse({ 'ok': False, 'error_message': "Someone else is currently editing this comment. Please refresh your page and try again.", }) # Now we know we have sole access to the comment object at the moment so we need to check if we are editing the most recent version if not_most_recent_version(comment, previous_version): transaction.set_rollback(True) return JsonResponse({ 'ok': False, 'error_message': "You are not editing the most recent version of this comment. Please refresh your page and try again.", }) # Everything has checked out, so we save the new version and return the appropriate response version_form, new_version = create_new_version(request, comment) if version_form.is_valid(): comment_template, kwargs = get_template(request, comment, parent_object, tree_root, new_version, previous_version, send_signal=send_signal) return JsonResponse({ 'ok': True, 'html_content': loader.render_to_string(comment_template, context=kwargs) }) else: transaction.set_rollback(True) return JsonResponse({ 'ok': False, 'error_message': "There were errors in your submission. Please correct them and resubmit.", })
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def save_comments():\n potential_deal_id = int(request.form.get(\"id\"))\n action = request.form.get(\"action\")\n if action.lower() == \"none\":\n action = None\n comments = request.form.get(\"comments\")\n db_handler = DBHandler()\n db_handler.update_by_id(potential_deal_id, action, comm...
[ "0.67972344", "0.6647041", "0.663194", "0.66208017", "0.65695584", "0.6565645", "0.6551287", "0.65155035", "0.6482362", "0.6476776", "0.6422695", "0.6392417", "0.6366266", "0.6361407", "0.63019305", "0.6271926", "0.6269781", "0.6195616", "0.61950845", "0.61534417", "0.6148189...
0.6193762
19
View function that returns the comment tree for the desired parent object.
def load_comments(request): # TODO: Add the ability to return comment tree in JSON format. # First we get the root of the comment tree being requested try: tree_root, parent_object = _get_or_create_tree_root(request) except InvalidCommentException as e: return JsonResponse({ 'ok': False, 'error_message': str(e), }) # Check if the user doesn't pass the appropriate permission check (on the parent_object)... if not user_has_permission(request, parent_object, 'can_view_comments'): return JsonResponse({ 'ok': False, 'error_message': "You do not have permission to view comments for this object.", }) # Once we have our desired nodes, we tack on all of the select/prefetch related stuff nodes = tree_root.get_family().select_related('deleted_user_info', 'created_by', 'parent', 'content_type')\ .prefetch_related(Prefetch('versions', queryset=CommentVersion.objects.order_by('-date_posted')\ .select_related('posting_user', 'deleted_user_info'))) # The 'X_KWARGS' header is populated by settings.kwarg in comments.js kwargs = json.loads(request.META.get('HTTP_X_KWARGS', {})) kwargs.update({ 'nodes': nodes, 'parent_object': parent_object, 'max_depth': tree_root.max_depth }) comments_template = get_attr_val(request, parent_object, 'comments_template', 'comments/comments.html', **kwargs) # In the parent_object, sites can define a function called 'filter_nodes' if they wish to apply any additional filtering to the nodes queryset before it's rendered to the template. # Default value is the nodes tree with the deleted comments filtered out. nodes = get_attr_val(request, parent_object, "filter_nodes", default=nodes.filter(deleted=False), **kwargs) kwargs.update({"nodes": nodes, 'request': request}) # Checks/assigns permissions to each node (so the template doesn't have to) _process_node_permissions(**kwargs) return JsonResponse({ 'ok': True, 'html_content': loader.render_to_string(comments_template, context=kwargs, request=request), 'number_of_comments': tree_root.get_descendant_count() })
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_by_parent(parent_id):\n return CommentsTreeDAO(root_id=parent_id)", "def parent(self) -> Comment | praw.models.Submission:\n # pylint: disable=no-member\n if self.parent_id == self.submission.fullname:\n return self.submission\n\n if self.parent_id in self.submis...
[ "0.69411963", "0.67797315", "0.6721547", "0.66233695", "0.65036964", "0.64508355", "0.63603806", "0.63551664", "0.6318134", "0.6311694", "0.6311694", "0.6311694", "0.6236535", "0.6184287", "0.6184287", "0.61701477", "0.6166827", "0.6166827", "0.6166827", "0.6166827", "0.61653...
0.5738746
76
Interpret and store the decoded json dictionary from the handshake header file. Start an empty data tracking dictionary, according to data_labels.
def __init__(self, sock, header_dict): super().__init__(header_dict) self.sock = sock
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _read_header(\n self, header, filename, run_check_acceptability=True, background_lsts=True\n ):\n # get telescope information\n latitude = header[\"latitude\"][()]\n longitude = header[\"longitude\"][()]\n altitude = header[\"altitude\"][()]\n self.telescope_locatio...
[ "0.5554169", "0.5521527", "0.53812873", "0.5335025", "0.5277362", "0.5266372", "0.5250536", "0.52459896", "0.5201471", "0.5174532", "0.5169621", "0.5158002", "0.5134929", "0.5060775", "0.5059112", "0.5053946", "0.50465447", "0.50403374", "0.50372404", "0.50230026", "0.5013707...
0.0
-1
Called by the GUI clock at regular intervals according to self.sampling_rate. Send a data request signal to the Arduino and interpret the json output. Return the entire history of all recorded data (regardless of whether or not a null value was read or if the data series are not 'checked' in the GUI).
def sample(self): print("sampling bluetooth arduino") self.sock.send(b'B') data = b'' '''while True: data += self.sock.recv(1024) if data.endswith(b'\n'): break ''' #self.sock.settimeout(2) try: while True: d = self.sock.recv(255) data += d if d.find(b'\n') != -1: break except Exception as err: print(err) pass print(data) data = json.loads(data.decode()) if not any(x == 0 for x in data.values()): for label in self.data_labels: self.data[label].append(data[label]) #self.data["time"].append(time.time() - self.start) print(data) #print(self.data) #return self.data
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def recive_data(self):\n # read all available data\n while self.ser.inWaiting() > self.INPUT_DATA_SIZE+1:\n data = array.array('c')\n # search the header\n data.append(self.ser.read(1))\n while data[0] != chr(1):\n data[0] = self.ser.read(1)\...
[ "0.6700092", "0.6360538", "0.6166242", "0.60464233", "0.6038091", "0.6023963", "0.60063523", "0.5908595", "0.58683914", "0.5861097", "0.58507204", "0.5833523", "0.5832018", "0.57664955", "0.57027274", "0.5638211", "0.5628629", "0.5615999", "0.56110036", "0.56060934", "0.55733...
0.6273694
2
Determines if a given datetime.datetime is aware.
def is_aware(value): return value.tzinfo is not None and value.tzinfo.utcoffset(value) is not None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_aware(value: datetime) -> bool:\n\n return value.utcoffset() is not None", "def test_make_datetime_aware(settings):\n # Set the TIME_ZONE in the settings.\n settings.TIME_ZONE = \"America/New_York\"\n\n # Calling make_datetime_aware() returns a timezone-aware datetime referring\n # to the m...
[ "0.7423152", "0.6706246", "0.656491", "0.65267324", "0.6137775", "0.60896856", "0.6019291", "0.59568536", "0.57755363", "0.57708514", "0.5679119", "0.5660917", "0.56022364", "0.55941117", "0.5527686", "0.55162454", "0.54947275", "0.5432787", "0.5420078", "0.5379313", "0.53406...
0.6891435
1
For date and time values shows how many seconds, minutes or hours ago compared to current timestamp returns representing string.
def naturaltime(value): if not isinstance(value, date): # datetime is a subclass of date return value now = datetime.now(utc if is_aware(value) else None) if value < now: delta = now - value if delta.days != 0: return 'hace %(delta)s' % {'delta': defaultfilters.timesince(value)} elif delta.seconds == 0: return 'ahora' elif delta.seconds < 60: return u'hace %(count)s segundos' % {'count': delta.seconds} elif delta.seconds // 60 < 60: count = delta.seconds // 60 return u'hace %(count)s minutos' % {'count': count} else: count = delta.seconds // 60 // 60 return u'hace %(count)s horas' % {'count': count}
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def humanize_ts(timestamp=False):\n now = datetime.now()\n diff = now - datetime.fromtimestamp(timestamp)\n second_diff = diff.seconds\n day_diff = diff.days\n\n if day_diff < 0:\n return ''\n\n if day_diff == 0:\n if second_diff < 10:\n return \"just now\"\n if se...
[ "0.7112443", "0.70603174", "0.6994772", "0.6923336", "0.69147646", "0.689994", "0.68880904", "0.68262726", "0.6814668", "0.6745085", "0.67083603", "0.6695963", "0.66525495", "0.6583381", "0.6518557", "0.65105355", "0.6455629", "0.63705647", "0.62662446", "0.6263275", "0.62592...
0.64102644
17