query
stringlengths
9
3.4k
document
stringlengths
9
87.4k
metadata
dict
negatives
listlengths
4
101
negative_scores
listlengths
4
101
document_score
stringlengths
3
10
document_rank
stringclasses
102 values
Returns the sum of all priorities stored in this sum tree.
def _total_priority(self): return self.nodes[0]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def sum(self) -> int:\n return self.root.sum", "def sum(self):\n return sum(self.items())", "def get_sum(self):\n return self.__tree[0]", "def sum(self):\n return sum(self._values.values())", "def total_priority(self) -> int:\n return self.tree[0].item()", "def sum(self...
[ "0.6984377", "0.67695606", "0.67609626", "0.6698405", "0.6667603", "0.6662966", "0.66467816", "0.66216046", "0.66216046", "0.65775466", "0.6440943", "0.6387141", "0.635604", "0.6329519", "0.63249815", "0.6249568", "0.61827457", "0.6161957", "0.61138844", "0.6100439", "0.60939...
0.65628356
10
Samples an element from the sum tree.
def sample(self, rng, query_value=None): nodes = jnp.array(self.nodes) query_value = ( jax.random.uniform(rng) if query_value is None else query_value) query_value *= self._total_priority() _, index, _ = jax.lax.fori_loop(0, self.depth, step, (query_value, 0, nodes)) return np.minimum(index - self.low_idx, self.highest_set)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def sample(self):\n return self._root.sample()", "def sample(self) -> None:\n with self._samples_lock:\n if self.element_sampler.has_element:\n self._samples.append(self.element_sampler.el)\n self.element_sampler.has_element = False", "def sample(self, x):", "def sample(self):\n ...
[ "0.7230191", "0.65261966", "0.63430864", "0.60525787", "0.60525787", "0.60074574", "0.5991044", "0.5952631", "0.59495324", "0.5881766", "0.5785147", "0.57582784", "0.57345945", "0.57001364", "0.5682698", "0.5620183", "0.56149185", "0.56036645", "0.55956614", "0.55863214", "0....
0.58518076
10
Performs stratified sampling using the sum tree.
def stratified_sample(self, batch_size, rng): if self._total_priority() == 0.0: raise Exception('Cannot sample from an empty sum tree.') indices = parallel_stratified_sample(rng, self.nodes, np.arange(batch_size), batch_size, self.depth) return np.minimum(indices - self.low_idx, self.highest_set)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def sample(tree, i, alpha=0.5, beta=0.5, only_tree=True):\n # for n in tree.nodes():\n # lab = tuple(n)\n # if len(n) == 1:\n # lab = \"(\" + str(list(n)[0]) + \")\"\n # tree.node[n] = {\"color\": \"black\", \"label\": lab}\n # print tree.nodes()\n\n if only_tree is True:\n...
[ "0.5792323", "0.57242346", "0.5687577", "0.5676991", "0.56017643", "0.55336124", "0.54666936", "0.5430783", "0.53604496", "0.5358043", "0.530985", "0.5304589", "0.5277224", "0.5271483", "0.52695686", "0.5267166", "0.5261225", "0.52445275", "0.52442276", "0.5240669", "0.520557...
0.76193213
0
Returns the value of the leaf node corresponding to the index.
def get(self, node_index): return self.nodes[node_index + self.low_idx]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get(self, index: int) -> int:\n node = self.get_node(index)\n if node:\n return node.val\n else:\n return -1", "def get_leaf(self, leaf_index):\n return self.__leaves_db.get(encode_int(leaf_index))", "def _value_at(self, index):\n node = self._get_no...
[ "0.7675132", "0.75705653", "0.748514", "0.748514", "0.7377657", "0.7373278", "0.7160219", "0.7113328", "0.70526975", "0.7030114", "0.67928636", "0.66183364", "0.64346117", "0.6430315", "0.64223456", "0.641513", "0.63261515", "0.6309149", "0.62792677", "0.62526083", "0.6249397...
0.6483467
12
Sets the value of a leaf node and updates internal nodes accordingly. This operation takes O(log(capacity)).
def set(self, node_index, value): if value < 0.0: raise ValueError( 'Sum tree values should be nonnegative. Got {}'.format(value)) self.highest_set = max(node_index, self.highest_set) node_index = node_index + self.low_idx self.max_recorded_priority = max(value, self.max_recorded_priority) delta_value = value - self.nodes[node_index] # Now traverse back the tree, adjusting all sums along the way. for _ in reversed(range(self.depth)): # Note: Adding a delta leads to some tolerable numerical inaccuracies. self.nodes[node_index] += delta_value node_index = (node_index - 1) // 2 self.nodes[node_index] += delta_value assert node_index == 0, ('Sum tree traversal failed, final node index ' 'is not 0.')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_leaf_node(self, leaf_value):\n\n if not self.empty:\n try:\n node_key = self.node_key\n except AttributeError:\n node_key = '_'\n raise ValueError(\n 'Cannot modify a non-empty node. ' + \\\n 'If you meant to change type of node {}, '.format...
[ "0.7562629", "0.7099471", "0.6929827", "0.6921371", "0.6896148", "0.6799146", "0.67869157", "0.67869157", "0.67869157", "0.6710561", "0.6672968", "0.66426194", "0.6601433", "0.6589014", "0.6578079", "0.6524441", "0.6495659", "0.6437595", "0.64201003", "0.64104587", "0.6396286...
0.63829994
21
Logs message for given level
def log_message(msg, lvl='info'): extra = { 'remote_addr': request.remote_addr, 'url': request.url_rule } loggers = { 'warning': current_app.logger.warning, 'info': current_app.logger.info, 'debug': current_app.logger.debug, 'error': current_app.logger.error, 'critical': current_app.logger.critical } loggers[lvl](msg, extra=extra)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __log(level, message):\n if level == 1:\n logging.info(\" \" + str(datetime.datetime.now()) + \" \" + message)\n if level == 2:\n logging.error(\" \" + str(datetime.datetime.now()) + \" \" + message)\n if level == 3:\n logging.critical(\" \" + str(datetime.datetime.now()) + \" \" ...
[ "0.81246626", "0.78997695", "0.78037274", "0.780287", "0.7799478", "0.77843386", "0.77657396", "0.77248126", "0.77185434", "0.7705718", "0.76910615", "0.7610388", "0.75899214", "0.7555818", "0.75395006", "0.74972963", "0.74645865", "0.7456792", "0.74560916", "0.7455943", "0.7...
0.68746495
44
Prints a message only when app is in debug mode
def print_debug(message): if current_app.debug: print(message)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def debug():\n assert current_app.debug == False, \"Don't panic! You're here by request of debug()\"", "def checkDebug(message):\n if debug == True:\n print(message)", "def debug(msg):\n if not DEBUG_ON:\n return\n print(\"DEBUG:\" + str(msg))", "def print_debug(msg):\n if IS_DEB...
[ "0.8137028", "0.80153483", "0.77376115", "0.7687844", "0.76748055", "0.767379", "0.7656623", "0.7569436", "0.7528649", "0.7510831", "0.7510831", "0.74950415", "0.7483869", "0.74653614", "0.74565285", "0.74526405", "0.7379881", "0.736538", "0.734727", "0.73451245", "0.734026",...
0.8150175
0
Call this function after determining game should end
def endGame(self, message): print(self.board) print("Game over! " + message) self.gameOver = True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def endGame(self):\n pass", "def quit_game(self):\n self.done = True", "def end_game(self):\n self.game.stop_running()", "def api_end_game(self):\n pass", "def end(self, won, reason):\n pass\n # replace with your end logic", "def endGame(self):\n #self.act...
[ "0.87138814", "0.78778964", "0.7744418", "0.774094", "0.7740156", "0.7713625", "0.76796615", "0.76591814", "0.7582613", "0.75363356", "0.7433579", "0.74228734", "0.7402117", "0.73863685", "0.7358601", "0.7350067", "0.73360884", "0.7276998", "0.72497123", "0.7248907", "0.71962...
0.69771904
45
Win by reaching the other side
def checkReachWin(self, color, row): if color[0] == 'W' and row == 0: self.endGame("White reached the end. White wins!\n") elif color[0] == 'B' and row == self.board.size - 1: self.endGame("Black reached the end. Black wins!\n")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def does_move_win(self, x, y):\n me = self.board[x][y]\n for (dx, dy) in [(0, +1), (+1, +1), (+1, 0), (+1, -1)]:\n p = 1\n while self.is_on_board(x+p*dx, y+p*dy) and self.board[x+p*dx][y+p*dy] == me:\n p += 1\n n = 1\n while self.is_on_board(...
[ "0.653839", "0.65257937", "0.64947987", "0.6482304", "0.6401277", "0.6380247", "0.62850153", "0.6280651", "0.62673837", "0.6259922", "0.6240374", "0.62018436", "0.62003434", "0.61975145", "0.61953324", "0.6182207", "0.61644655", "0.61555684", "0.61346805", "0.61338586", "0.61...
0.0
-1
Lose by not having any possible valid moves to make
def checkNoMoveLoss(self, moves): if not moves: winner = "Black" if self.turn[0] == "W" else "White" self.endGame(self.turn + " has nowhere to move. " + winner + " wins!\n")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def any_legal_move(self, player, board):\r\n moves = self.legal_moves(player, board)\r\n #print(moves)\r\n return len(moves)!=0", "def get_valid_moves(self):\r\n # castling and en-passant rights are stored, because move affects these values\r\n temp_enpassant_possible = self.en...
[ "0.7330048", "0.7140586", "0.7039396", "0.697742", "0.69300544", "0.6770414", "0.676601", "0.6749859", "0.66704226", "0.66598386", "0.66426575", "0.66328067", "0.6632505", "0.66169274", "0.6604243", "0.65838623", "0.65772635", "0.6562603", "0.6525635", "0.6522035", "0.6512511...
0.63703394
44
Checking string input for format [Row][ColLetter] or [ColLetter][Row] (and not case sensitive)
def parsePosition(self, parse): if len(parse) == 2: ch1 = ord(parse[0].lower()) ch2 = ord(parse[1].lower()) maxNum = 48 + self.board.size # ascii of max row # # [Row#][ColLetter]] case if 48 < ch1 <= maxNum and 97 <= ch2 < (97 + self.board.size): return maxNum - ch1, ch2 - 97 # actual grid indexes of desired position # [ColLetter][Row#] case if 48 < ch2 <= maxNum and 97 <= ch1 < (97 + self.board.size): return maxNum - ch2, ch1 - 97 # actual grid indexes of desired position return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_valid_row_or_col(val: str):\n try:\n val = int(val)\n if 1 <= val <= 10:\n return True\n return False\n except (ValueError, TypeError):\n return False", "def must_contain_letter(cell):\n # Check if it's nan first\n if check_empty(cell):\n return Tr...
[ "0.62249833", "0.6188426", "0.6152934", "0.59214056", "0.58721745", "0.5853586", "0.5825021", "0.5805107", "0.5749606", "0.57305473", "0.5700164", "0.5682207", "0.565165", "0.5625806", "0.55352753", "0.55241305", "0.5514427", "0.54875064", "0.5479798", "0.5474993", "0.5458025...
0.5664392
12
Returns a string of the possible moves a pawn can make
def suggestMoves(self, startLoc, moves, hasCapture): suggest = "" for move in moves[startLoc]: if hasCapture: if moves[startLoc][move] == "capture": suggest += chr(move[1] + 97) + str(self.board.size - move[0]) + " " else: suggest += chr(move[1] + 97) + str(self.board.size - move[0]) + " " return suggest
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def legal_moves():\n\tlegal_moves = (\"r\", \"p\", \"s\")\n\treturn legal_moves", "def legal_moves(self):\n moves = \"\"\n swappable = self.swappable_positions\n empty_position = self.get_position(0)\n\n for s in swappable:\n pos_diff = empty_position[0] - s[0], empty_posit...
[ "0.75860536", "0.7416093", "0.7053937", "0.69375557", "0.68239874", "0.67419034", "0.6697224", "0.6633449", "0.6593096", "0.6580948", "0.6536958", "0.65312415", "0.6490962", "0.64726305", "0.64582986", "0.6440909", "0.6390071", "0.6388789", "0.6385916", "0.6381437", "0.637102...
0.5896593
60
This function will optionally print a header guard for `cl_khr_fp64` if a 64bit type is used as the source or destination and return a bool that indicates whether this guard will need closed after the calling function has finished printing functions that use the 64bit source/destination type.
def conditional_guard(src, dst): int64_count = 0 float64_count = 0 float16_count = 0 if src in int64_types or dst in int64_types: int64_count = 1 if src in float64_types or dst in float64_types: float64_count = 1 if src in float16_types or dst in float16_types: float16_count = 1 if float16_count > 0: print("#ifdef cl_khr_fp16") if float64_count > 0: #In embedded profile, if cl_khr_fp64 is supported cles_khr_int64 has to be print("#ifdef cl_khr_fp64") return 1 + float16_count elif int64_count > 0: print("#if defined cles_khr_int64 || !defined(__EMBEDDED_PROFILE__)") return 1 + float16_count return float16_count
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_64_windows():\n return struct.calcsize('P') * 8 == 64", "def have_binary128():\n try:\n ti = type_info(np.longdouble)\n except FloatingError:\n return False\n return (ti['nmant'], ti['maxexp']) == (112, 16384)", "def is_H(self):\n return True", "def is_H(self):\n ...
[ "0.4971328", "0.46334147", "0.46194386", "0.46194386", "0.4604871", "0.4570718", "0.45214003", "0.45135522", "0.44693208", "0.44124466", "0.44066575", "0.44048572", "0.43934348", "0.43365443", "0.42555895", "0.4250501", "0.42504188", "0.42486706", "0.42251563", "0.4212047", "...
0.59550357
0
This function will close conditional guard opened by conditional_guard.
def close_conditional_guard(close_conditional): for _ in range(close_conditional): print("#endif")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __exit__(self, type, value, traceback) :\n if self.spec :\n self.handle.close()\n self.handle = None", "def __exit__(self, type, value, traceback):\n self._conn.close()\n if self._mode == 'w' and self.__special_exit != 'lock exists':\n if os.path.exists('%s_l...
[ "0.6132105", "0.6084127", "0.6026904", "0.59685796", "0.59367067", "0.5921348", "0.5896142", "0.5895424", "0.5871011", "0.5860161", "0.5860161", "0.5860161", "0.5860161", "0.58590287", "0.58513856", "0.58342", "0.58342", "0.58342", "0.58342", "0.58342", "0.582248", "0.58066...
0.6058337
2
This helper function returns the correct clc core conversion function name for a given source and destination type, with optional size, mode and saturation arguments.
def clc_core_fn_name(dst, size='', mode='', sat=''): return "__clc_convert_{DST}{N}{SAT}{MODE}".format(DST=dst, N=size, SAT=sat, MODE=mode)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def src_get_name(converter_type):\n return ffi.string(_lib.src_get_name(converter_type)).decode()", "def cython_funcname(self, name, argkinds=None):\n if isinstance(name, basestring):\n return name\n if argkinds is None:\n argkinds = [(Arg.NONE, None)] * (len(name) - 1)\n ...
[ "0.58428967", "0.5695806", "0.56170404", "0.5537308", "0.54060704", "0.5312154", "0.52636075", "0.5209674", "0.5179819", "0.5158412", "0.5142017", "0.51086265", "0.50943804", "0.5006753", "0.5005477", "0.5003055", "0.49839967", "0.4969238", "0.49600613", "0.49500346", "0.4889...
0.80150145
0
Initilize Style MelGAN generator.
def __init__( self, in_channels=128, aux_channels=80, channels=64, out_channels=1, kernel_size=9, dilation=2, bias=True, noise_upsample_scales=[11, 2, 2, 2], noise_upsample_activation="LeakyReLU", noise_upsample_activation_params={"negative_slope": 0.2}, upsample_scales=[2, 2, 2, 2, 2, 2, 2, 2, 1], upsample_mode="nearest", gated_function="softmax", use_weight_norm=True, ): super().__init__() self.in_channels = in_channels noise_upsample = [] in_chs = in_channels for noise_upsample_scale in noise_upsample_scales: # NOTE(kan-bayashi): How should we design noise upsampling part? noise_upsample += [ torch.nn.ConvTranspose1d( in_chs, channels, noise_upsample_scale * 2, stride=noise_upsample_scale, padding=noise_upsample_scale // 2 + noise_upsample_scale % 2, output_padding=noise_upsample_scale % 2, bias=bias, ) ] noise_upsample += [ getattr(torch.nn, noise_upsample_activation)( **noise_upsample_activation_params ) ] in_chs = channels self.noise_upsample = torch.nn.Sequential(*noise_upsample) self.noise_upsample_factor = np.prod(noise_upsample_scales) self.blocks = torch.nn.ModuleList() aux_chs = aux_channels for upsample_scale in upsample_scales: self.blocks += [ TADEResBlock( in_channels=channels, aux_channels=aux_chs, kernel_size=kernel_size, dilation=dilation, bias=bias, upsample_factor=upsample_scale, upsample_mode=upsample_mode, gated_function=gated_function, ), ] aux_chs = channels self.upsample_factor = np.prod(upsample_scales) self.output_conv = torch.nn.Sequential( torch.nn.Conv1d( channels, out_channels, kernel_size, 1, bias=bias, padding=(kernel_size - 1) // 2, ), torch.nn.Tanh(), ) # apply weight norm if use_weight_norm: self.apply_weight_norm() # reset parameters self.reset_parameters()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def model_setup(self):\n self.DNN = SganMLP(self.settings.number_of_bins)\n self.D = SganMLP(self.settings.number_of_bins)\n self.G = Generator()", "def model_setup(self):\n self.DNN = DgganMLP(self.settings.hidden_size)\n self.D = DgganMLP(self.settings.hidden_size)\n s...
[ "0.70487845", "0.644251", "0.63743216", "0.6038303", "0.5973697", "0.59719676", "0.5944061", "0.5878216", "0.5834873", "0.58195704", "0.5817429", "0.5784765", "0.5783701", "0.577011", "0.57584363", "0.57117826", "0.5703821", "0.5685936", "0.5671047", "0.5652533", "0.5648202",...
0.0
-1
Remove weight normalization module from all of the layers.
def remove_weight_norm(self): def _remove_weight_norm(m): try: logging.debug(f"Weight norm is removed from {m}.") torch.nn.utils.remove_weight_norm(m) except ValueError: # this module didn't have weight norm return self.apply(_remove_weight_norm)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def remove_norms(module_: \"WN\") -> \"WN\":\n module_.start = torch.nn.utils.remove_weight_norm(module_.start_conv)\n module_.cond_layer = torch.nn.utils.remove_weight_norm(module_.cond_layer)\n for i, layer_ in enumerate(module_.in_layers):\n layer_ = DepthwiseSeparableConv1d.remo...
[ "0.79998016", "0.76628864", "0.7629923", "0.72848314", "0.7045166", "0.67848754", "0.6780856", "0.6746634", "0.6591088", "0.65823525", "0.6566755", "0.65617", "0.65572643", "0.65561306", "0.6538293", "0.6538293", "0.6538293", "0.6529808", "0.6526071", "0.651576", "0.64801204"...
0.7893102
2
Apply weight normalization module from all of the layers.
def apply_weight_norm(self): def _apply_weight_norm(m): if isinstance(m, torch.nn.Conv1d) or isinstance( m, torch.nn.ConvTranspose1d ): torch.nn.utils.weight_norm(m) logging.debug(f"Weight norm is applied to {m}.") self.apply(_apply_weight_norm)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _init_weights(self):\n for layer in self.modules():\n if isinstance(layer, (nn.Conv1d, nn.Linear)):\n nn.init.xavier_uniform_(layer.weight)\n if layer.bias is not None:\n nn.init.constant_(layer.bias, 0)\n elif isinstance(layer, nn.B...
[ "0.7143955", "0.7143955", "0.7143955", "0.71335757", "0.7069364", "0.7069364", "0.70534694", "0.7049442", "0.70303166", "0.6807604", "0.6779128", "0.6764302", "0.67596924", "0.6736109", "0.6710482", "0.66960436", "0.6584008", "0.6554928", "0.6526808", "0.65179425", "0.6516647...
0.73184675
1
Register stats for denormalization as buffer.
def register_stats(self, stats): assert stats.endswith(".h5") or stats.endswith(".npy") if stats.endswith(".h5"): mean = read_hdf5(stats, "mean").reshape(-1) scale = read_hdf5(stats, "scale").reshape(-1) else: mean = np.load(stats)[0].reshape(-1) scale = np.load(stats)[1].reshape(-1) self.register_buffer("mean", torch.from_numpy(mean).float()) self.register_buffer("scale", torch.from_numpy(scale).float()) logging.info("Successfully registered stats as buffer.")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _cast_buffers(self,\n dtype: Optional[torch.dtype] = None,\n memo: Optional[Set] = None) -> None:\n if memo is None:\n memo = set()\n for module in self.modules():\n if module is not self and isinstance(module, XlaFullyShardedDataParallel):\n # Allow...
[ "0.51819456", "0.5043784", "0.49926385", "0.4946909", "0.49447924", "0.49344134", "0.4884747", "0.4879836", "0.48627967", "0.48563662", "0.48147842", "0.47965068", "0.478542", "0.47853506", "0.47853506", "0.47853506", "0.47518125", "0.4747054", "0.47394067", "0.47262183", "0....
0.67643166
0
Initilize Style MelGAN discriminator.
def __init__( self, repeats=2, window_sizes=[512, 1024, 2048, 4096], pqmf_params=[ [1, None, None, None], [2, 62, 0.26700, 9.0], [4, 62, 0.14200, 9.0], [8, 62, 0.07949, 9.0], ], discriminator_params={ "out_channels": 1, "kernel_sizes": [5, 3], "channels": 16, "max_downsample_channels": 512, "bias": True, "downsample_scales": [4, 4, 4, 1], "nonlinear_activation": "LeakyReLU", "nonlinear_activation_params": {"negative_slope": 0.2}, "pad": "ReflectionPad1d", "pad_params": {}, }, use_weight_norm=True, ): super().__init__() # window size check assert len(window_sizes) == len(pqmf_params) sizes = [ws // p[0] for ws, p in zip(window_sizes, pqmf_params)] assert len(window_sizes) == sum([sizes[0] == size for size in sizes]) self.repeats = repeats self.window_sizes = window_sizes self.pqmfs = torch.nn.ModuleList() self.discriminators = torch.nn.ModuleList() for pqmf_param in pqmf_params: d_params = copy.deepcopy(discriminator_params) d_params["in_channels"] = pqmf_param[0] if pqmf_param[0] == 1: self.pqmfs += [torch.nn.Identity()] else: self.pqmfs += [PQMF(*pqmf_param)] self.discriminators += [BaseDiscriminator(**d_params)] # apply weight norm if use_weight_norm: self.apply_weight_norm() # reset parameters self.reset_parameters()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self):\n super(Discriminator, self).__init__()\n\n # Use stride in convolutions to downsample image to size 1\n\n # Using BatchNorm2d 0.8 for stability based on reading of https://github.com/eriklindernoren/PyTorch-GAN code\n layers = [nn.Conv2d(512, 1, kernel_size=4, strid...
[ "0.67468584", "0.62949085", "0.6285982", "0.62006986", "0.6102082", "0.60385245", "0.6021358", "0.60148615", "0.60062915", "0.5981237", "0.59416723", "0.590252", "0.5898757", "0.5888632", "0.58052456", "0.5799055", "0.5795089", "0.5771385", "0.5755582", "0.5748591", "0.572184...
0.0
-1
Apply weight normalization module from all of the layers.
def apply_weight_norm(self): def _apply_weight_norm(m): if isinstance(m, torch.nn.Conv1d) or isinstance( m, torch.nn.ConvTranspose1d ): torch.nn.utils.weight_norm(m) logging.debug(f"Weight norm is applied to {m}.") self.apply(_apply_weight_norm)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _init_weights(self):\n for layer in self.modules():\n if isinstance(layer, (nn.Conv1d, nn.Linear)):\n nn.init.xavier_uniform_(layer.weight)\n if layer.bias is not None:\n nn.init.constant_(layer.bias, 0)\n elif isinstance(layer, nn.B...
[ "0.71435213", "0.71435213", "0.71435213", "0.7134102", "0.70675933", "0.70675933", "0.70535696", "0.7048801", "0.70298284", "0.6807073", "0.67789346", "0.6764012", "0.6759336", "0.67359823", "0.671116", "0.6696031", "0.6583814", "0.6554803", "0.65268165", "0.65178454", "0.651...
0.7316938
2
Initilize Style MelGAN generator.
def __init__( self, in_channels=128, aux_channels=128, channels=64, out_channels=1, num_embs=100, num_spk_embs=128, spk_emb_dim=128, concat_spk_emb=False, kernel_size=9, dilation=2, bias=True, noise_upsample_scales=[11, 2, 2, 2], noise_upsample_activation="LeakyReLU", noise_upsample_activation_params={"negative_slope": 0.2}, upsample_scales=[2, 2, 2, 2, 2, 2, 2, 2, 1], upsample_mode="nearest", gated_function="softmax", use_weight_norm=True, ): super().__init__() self.in_channels = in_channels # define id embedding self.emb = torch.nn.Embedding( num_embeddings=num_embs, embedding_dim=aux_channels ) self.spk_emb = torch.nn.Embedding( num_embeddings=num_spk_embs, embedding_dim=spk_emb_dim ) self.concat_spk_emb = concat_spk_emb if not concat_spk_emb: assert aux_channels == spk_emb_dim else: aux_channels = aux_channels + spk_emb_dim noise_upsample = [] in_chs = in_channels for noise_upsample_scale in noise_upsample_scales: # NOTE(kan-bayashi): How should we design noise upsampling part? noise_upsample += [ torch.nn.ConvTranspose1d( in_chs, channels, noise_upsample_scale * 2, stride=noise_upsample_scale, padding=noise_upsample_scale // 2 + noise_upsample_scale % 2, output_padding=noise_upsample_scale % 2, bias=bias, ) ] noise_upsample += [ getattr(torch.nn, noise_upsample_activation)( **noise_upsample_activation_params ) ] in_chs = channels self.noise_upsample = torch.nn.Sequential(*noise_upsample) self.noise_upsample_factor = np.prod(noise_upsample_scales) self.blocks = torch.nn.ModuleList() aux_chs = aux_channels for upsample_scale in upsample_scales: self.blocks += [ TADEResBlock( in_channels=channels, aux_channels=aux_chs, kernel_size=kernel_size, dilation=dilation, bias=bias, upsample_factor=upsample_scale, upsample_mode=upsample_mode, gated_function=gated_function, ), ] aux_chs = channels self.upsample_factor = np.prod(upsample_scales) self.output_conv = torch.nn.Sequential( torch.nn.Conv1d( channels, out_channels, kernel_size, 1, bias=bias, padding=(kernel_size - 1) // 2, ), torch.nn.Tanh(), ) # apply weight norm if use_weight_norm: self.apply_weight_norm() # reset parameters self.reset_parameters()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def model_setup(self):\n self.DNN = SganMLP(self.settings.number_of_bins)\n self.D = SganMLP(self.settings.number_of_bins)\n self.G = Generator()", "def model_setup(self):\n self.DNN = DgganMLP(self.settings.hidden_size)\n self.D = DgganMLP(self.settings.hidden_size)\n s...
[ "0.70491105", "0.64428604", "0.637395", "0.6040094", "0.5974412", "0.5972569", "0.59458107", "0.5878243", "0.5834997", "0.58195186", "0.5818394", "0.57847136", "0.5784305", "0.57710445", "0.5760498", "0.57129806", "0.57045466", "0.5686031", "0.56733465", "0.56541693", "0.5648...
0.0
-1
Remove weight normalization module from all of the layers.
def remove_weight_norm(self): def _remove_weight_norm(m): try: logging.debug(f"Weight norm is removed from {m}.") torch.nn.utils.remove_weight_norm(m) except ValueError: # this module didn't have weight norm return self.apply(_remove_weight_norm)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def remove_norms(module_: \"WN\") -> \"WN\":\n module_.start = torch.nn.utils.remove_weight_norm(module_.start_conv)\n module_.cond_layer = torch.nn.utils.remove_weight_norm(module_.cond_layer)\n for i, layer_ in enumerate(module_.in_layers):\n layer_ = DepthwiseSeparableConv1d.remo...
[ "0.79998016", "0.76628864", "0.7629923", "0.72848314", "0.7045166", "0.67848754", "0.6780856", "0.6746634", "0.6591088", "0.65823525", "0.6566755", "0.65617", "0.65572643", "0.65561306", "0.6538293", "0.6538293", "0.6538293", "0.6529808", "0.6526071", "0.651576", "0.64801204"...
0.7893102
1
Apply weight normalization module from all of the layers.
def apply_weight_norm(self): def _apply_weight_norm(m): if isinstance(m, torch.nn.Conv1d) or isinstance( m, torch.nn.ConvTranspose1d ): torch.nn.utils.weight_norm(m) logging.debug(f"Weight norm is applied to {m}.") self.apply(_apply_weight_norm)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _init_weights(self):\n for layer in self.modules():\n if isinstance(layer, (nn.Conv1d, nn.Linear)):\n nn.init.xavier_uniform_(layer.weight)\n if layer.bias is not None:\n nn.init.constant_(layer.bias, 0)\n elif isinstance(layer, nn.B...
[ "0.71430475", "0.71430475", "0.71430475", "0.713435", "0.70688635", "0.70688635", "0.7052796", "0.7049518", "0.70298177", "0.68070155", "0.6778134", "0.676388", "0.6760103", "0.67364126", "0.6710347", "0.6696243", "0.65825963", "0.65547544", "0.6525561", "0.6517765", "0.65163...
0.73182416
0
Load csv data into pandas
def load_data(filename): # Load the necessary columns from the csv into pandas data = pd.read_csv(filename, sep=';') # Cleans the data data = data[["Perioden", "Regio's",\ "Kerkelijke gezindte/Geen kerkelijke gezindte (% van de bevolking)",\ "Kerkelijke gezindte/Totaal kerkelijke gezindte (% van de bevolking)",\ "Kerkelijke gezindte/Rooms-Katholiek (% van de bevolking)",\ "Kerkelijke gezindte/Protestantse Kerk in Nederland (% van de bevolking)",\ "Kerkelijke gezindte/Nederlands Hervormd (% van de bevolking)",\ "Kerkelijke gezindte/Gereformeerd (% van de bevolking)",\ "Kerkelijke gezindte/Islam (% van de bevolking)",\ "Kerkelijke gezindte/Overige gezindte (% van de bevolking)"]] # Creates new columns for renaming purposes data["Year"] = data["Perioden"] data["Region"] = data["Regio's"] data["Athiest"] = data["Kerkelijke gezindte/Geen kerkelijke gezindte (% van de bevolking)"] data["Total"] = data["Kerkelijke gezindte/Totaal kerkelijke gezindte (% van de bevolking)"] data["Roman Catholic"] = data["Kerkelijke gezindte/Rooms-Katholiek (% van de bevolking)"] data["Protestant"] = data["Kerkelijke gezindte/Protestantse Kerk in Nederland (% van de bevolking)"] data["Dutch Reformed"] = data["Kerkelijke gezindte/Nederlands Hervormd (% van de bevolking)"] data["Reformed"] = data["Kerkelijke gezindte/Gereformeerd (% van de bevolking)"] data["Islam"] = data["Kerkelijke gezindte/Islam (% van de bevolking)"] data["Other"] = data["Kerkelijke gezindte/Overige gezindte (% van de bevolking)"] # Deletes doubles data.drop(data.columns[[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]], axis = 1, inplace=True) data = data.set_index("Region") print(data) return data
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def data_from_csv(self, filepath):\n self.dataframe = pd.load_csv(filepath, separator='')", "def load_data(csv_path):\n df = pd.read_csv(csv_path)\n return df", "def _parse_csv(csv_file: str) -> pd.DataFrame:\n return pd.read_csv(csv_file, header=0)", "def load_from_csv(path, delimiter=',...
[ "0.7997091", "0.76976955", "0.7580888", "0.75562894", "0.7530883", "0.75125337", "0.7335565", "0.7333322", "0.73209023", "0.72114277", "0.7200636", "0.71992266", "0.7190701", "0.71325487", "0.7120215", "0.71044284", "0.7094358", "0.7092082", "0.7048781", "0.70392513", "0.7030...
0.0
-1
Start Spark, define config and path to test data
def setUp(self): self.test_data_path = 'testing/test_data/'
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def setUp(self):\n self.spark, self.log, self.config = start_spark(app_name = \"test_etl_job\",\n files='configs/etl_config.json')", "def setUp(self):\n with open(SRC_PATH + \"configs/etl_config.json\", \"r\") as f:\n self.config = j...
[ "0.77397996", "0.70026386", "0.68174267", "0.68167377", "0.6324946", "0.632194", "0.63131744", "0.62284076", "0.61973965", "0.6191538", "0.615016", "0.61465317", "0.6010293", "0.5978047", "0.59729236", "0.59613144", "0.59418756", "0.5929893", "0.5851277", "0.5777042", "0.5773...
0.50840384
79
Write to file or write to db
def on_data(self, tweet): if (time.time() - self.start_time) < self.limit: self.saveFile.write(tweet) return True else: self.saveFile.close() return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def write(self, **kwargs):\n # First, attempt to update the local store\n self.update()\n # Only get here if the file doesn't already exist\n with open(self._db_file, 'w') as fp:\n json.dump(self.to_dict(\"JSON\"), fp, **kwargs)", "def writedb(path, key, value) -> int:\n ...
[ "0.6699802", "0.6676422", "0.65458137", "0.6534973", "0.6344988", "0.6320584", "0.6263552", "0.62094015", "0.62065643", "0.6195185", "0.61912936", "0.61912936", "0.6186057", "0.6150113", "0.6143777", "0.6141619", "0.6129302", "0.61252534", "0.61246955", "0.61167467", "0.61080...
0.0
-1
Authenticate, define interested topics to search, define running mode
def __init__(self, topics=None, tweet_file=None, mode='batch'): self.topics = topics # (The twitter API will only return a max of 100 count) self.GEN_MAX_TWEET = 100 # the max number of tweets to generate self.tweet_file = tweet_file self.mode = mode self.tweets = [] if topics and tweet_file: print("WARNING! you input both topics and the tweet file, only one is expected") exit(-1) if not topics and not tweet_file: print("WARNING! you input either topics or tweet file, one is expected") exit(-1) # If file argument is given, it will not connect to twitter server # It will just save tweets in self.tweets if tweet_file: with open(tweet_file, 'r') as infile: for line in infile: self.tweets.append(json.loads(line)) else: consumer_key = 'bbqKfXEU2VJNoWlYJvbdtptOE' consumer_secret = 'afPk2JuMMMD6IhP5Xijo60ni4FUK39PDzhU7ylgT9FgNZX9ngh' access_token = '434708489-DTeHfK4OYKRuIXlfoWnNgzzwpEZTPCEpSMv8C0ll' access_token_secret = 'SjWFYfX2k3q4RJKQXcP1LP9ikhRfckPKOEcrb2cpQ0A0n' # Attempt authentication try: # create OAuthHandler object self.auth = tweepy.OAuthHandler(consumer_key, consumer_secret) # set access token and secret self.auth.set_access_token(access_token, access_token_secret) # create tweepy API object to fetch tweets self.api = tweepy.API(self.auth) except: print("Error: Authentication Failed") exit(-1)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_topic_retrieval_authenticated(self):\n self.client.credentials(HTTP_AUTHORIZATION='Token ' + self.free_token.key)\n response = self.client.get('/topics/', format='json')\n data = json.loads(response.content)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n ...
[ "0.6053095", "0.58813876", "0.5666131", "0.557826", "0.5459492", "0.53823984", "0.532111", "0.532111", "0.5253463", "0.5241494", "0.52394074", "0.5199479", "0.51915526", "0.5182518", "0.51710373", "0.5163896", "0.51167077", "0.5087796", "0.50840545", "0.5075657", "0.5069533",...
0.0
-1
Limit the request sent to twitter server
def limit_handled(cursor): # TODO: possibly need this function to limit request frequency while True: try: yield cursor.next() except tweepy.RateLimitError: time.sleep(60)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def streamTweets(words = [], authors = [], timeLimit=120, removeRetweets=False, **kwargs):\n if 'stream' not in globals():\n global stream\n if 'api' not in globals():\n startTwitterApi(getApiKeys(fileName=\"apiConf2.txt\"))\n listener = StdOutListener(removeRetweets=removeRetwee...
[ "0.6198629", "0.6029169", "0.58477575", "0.5808274", "0.57665455", "0.57665455", "0.5764181", "0.56391084", "0.56317997", "0.5610845", "0.56010497", "0.5593688", "0.559113", "0.5582899", "0.5582899", "0.5582899", "0.5582899", "0.5582672", "0.5578223", "0.55680096", "0.5566374...
0.558243
18
Returns a humanized rstring representing time difference between now() and the input timestamp. The output rounds up to days, hours, minutes, or seconds. 4 days 5 hours returns '4 days' 0 days 4 hours 3 minutes returns '4 hours', etc...
def time_since(timestamp=None): rstr = "" if not timestamp or not isinstance(timestamp, datetime.datetime): return rstr now = timezone.now() timediff = now - timestamp days = timediff.days weeks = days//7 months = days//30 minutes = timediff.seconds % 3600 // 60 seconds = timediff.seconds % 3600 % 60 hours = minutes // 60 if days > 365: return "> a year" if months > 0: if months == 1: tstr = "month" else: tstr = "months" rstr = rstr + "%s %s" % (months, tstr) return rstr if weeks > 0: if weeks == 1: tstr = "week" else: tstr = "weeks" rstr = rstr + "%s %s" % (weeks, tstr) return rstr if days > 0: if days == 1: tstr = "day" else: tstr = "days" rstr = rstr + "%s %s" % (days, tstr) return rstr elif hours > 0: if hours == 1: tstr = "hour" else: tstr = "hours" rstr = rstr + "%s %s" % (hours, tstr) return rstr elif minutes > 0: if minutes == 1: tstr = "min" else: tstr = "mins" rstr = rstr + "%s %s" % (minutes, tstr) return rstr elif seconds > 0: if seconds == 1: tstr = "sec" else: tstr = "secs" rstr = rstr + "%s %s" % (seconds, tstr) return rstr else: return "Now"
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def humanize_ts(timestamp=False):\n now = datetime.now()\n diff = now - datetime.fromtimestamp(timestamp)\n second_diff = diff.seconds\n day_diff = diff.days\n\n if day_diff < 0:\n return ''\n\n if day_diff == 0:\n if second_diff < 10:\n return \"just now\"\n if se...
[ "0.7199235", "0.6999816", "0.6862485", "0.6823745", "0.6743255", "0.6654757", "0.66247755", "0.65492713", "0.64874226", "0.64806837", "0.6469421", "0.64674145", "0.64304537", "0.6402844", "0.6387063", "0.6360706", "0.6358006", "0.6357516", "0.63572836", "0.63446885", "0.63081...
0.7168039
1
Wigner Ville Distribution and PseudoWigner Ville Distribution.
def wvd(signal, sampling_rate=1000, n_freqbins=None, analytical_signal=True, method="WignerVille"): # Compute the analytical signal if analytical_signal: signal = scipy.signal.hilbert(signal_detrend(signal)) # Pre-processing if n_freqbins is None: n_freqbins = 256 if method in ["pseudoWignerVille", "pwvd"]: fwindows = np.zeros(n_freqbins + 1) fwindows_mpts = len(fwindows) // 2 windows_length = n_freqbins // 4 windows_length = windows_length - windows_length % 2 + 1 windows = np.hamming(windows_length) fwindows[fwindows_mpts + np.arange(-windows_length // 2, windows_length // 2)] = windows else: fwindows = np.ones(n_freqbins + 1) fwindows_mpts = len(fwindows) // 2 time = np.arange(len(signal)) * 1.0 / sampling_rate # This is discrete frequency (should we return?) if n_freqbins % 2 == 0: frequency = np.hstack((np.arange(n_freqbins / 2), np.arange(-n_freqbins / 2, 0))) else: frequency = np.hstack( (np.arange((n_freqbins - 1) / 2), np.arange(-(n_freqbins - 1) / 2, 0)) ) tfr = np.zeros((n_freqbins, time.shape[0]), dtype=complex) # the time-frequency matrix tausec = round(n_freqbins / 2.0) winlength = tausec - 1 # taulens: len of tau for each step taulens = np.min( np.c_[ np.arange(signal.shape[0]), signal.shape[0] - np.arange(signal.shape[0]) - 1, winlength * np.ones(time.shape), ], axis=1, ) conj_signal = np.conj(signal) # iterate and compute the wv for each indices for idx in range(time.shape[0]): tau = np.arange(-taulens[idx], taulens[idx] + 1).astype(int) # this step is required to use the efficient DFT indices = np.remainder(n_freqbins + tau, n_freqbins).astype(int) tfr[indices, idx] = ( fwindows[fwindows_mpts + tau] * signal[idx + tau] * conj_signal[idx - tau] ) if (idx < signal.shape[0] - tausec) and (idx >= tausec + 1): tfr[tausec, idx] = ( fwindows[fwindows_mpts + tausec] * signal[idx + tausec] * np.conj(signal[idx - tausec]) + fwindows[fwindows_mpts - tausec] * signal[idx - tausec] * conj_signal[idx + tausec] ) tfr[tausec, idx] *= 0.5 # Now tfr contains the product of the signal segments and its conjugate. # To find wd we need to apply fft one more time. tfr = np.fft.fft(tfr, axis=0) tfr = np.real(tfr) # continuous time frequency frequency = 0.5 * np.arange(n_freqbins, dtype=float) / n_freqbins * sampling_rate return frequency, time, tfr
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def wpi(nev,mu):\n return np.sqrt(nev*eV2J**2/(mp*mu*epsilon));", "def test_wignerVille(self):\n datafile = os.path.join(os.path.dirname(__file__), 'data', 'wv.npz')\n rec = np.load(datafile)\n wv = abs(wigner_ville_spectrum(\n signal_bursts(), 10, 3.5, smoothing_filter='gauss'...
[ "0.61289203", "0.5875487", "0.58468914", "0.5686039", "0.56515056", "0.56493896", "0.5648032", "0.56267035", "0.5614728", "0.5562713", "0.5514241", "0.5454711", "0.5417939", "0.54137903", "0.53736", "0.53622884", "0.53448796", "0.5342925", "0.5333436", "0.53204405", "0.528870...
0.0
-1
Smoothed Pseudo Wigner Ville Distribution
def smooth_pseudo_wvd( signal, sampling_rate=1000, freq_length=None, time_length=None, segment_step=1, nfreqbin=None, window_method="hamming", ): # Define parameters N = len(signal) # sample_spacing = 1 / sampling_rate if nfreqbin is None: nfreqbin = 300 # Zero-padded signal to length 2N signal_padded = np.append(signal, np.zeros_like(signal)) # DFT signal_fft = np.fft.fft(signal_padded) signal_fft[1 : N - 1] = signal_fft[1 : N - 1] * 2 signal_fft[N:] = 0 # Inverse FFT signal_ifft = np.fft.ifft(signal_fft) signal_ifft[N:] = 0 # Make analytic signal signal = scipy.signal.hilbert(signal_detrend(signal_ifft)) # Create smoothing windows in time and frequency if freq_length is None: freq_length = np.floor(N / 4.0) # Plus one if window length is not odd if freq_length % 2 == 0: freq_length += 1 elif len(freq_length) % 2 == 0: raise ValueError("The length of frequency smoothing window must be odd.") if time_length is None: time_length = np.floor(N / 10.0) # Plus one if window length is not odd if time_length % 2 == 0: time_length += 1 elif len(time_length) % 2 == 0: raise ValueError("The length of time smoothing window must be odd.") if window_method == "hamming": freq_window = scipy.signal.hamming(int(freq_length)) # normalize by max time_window = scipy.signal.hamming(int(time_length)) # normalize by max elif window_method == "gaussian": std_freq = freq_length / (6 * np.sqrt(2 * np.log(2))) freq_window = scipy.signal.gaussian(freq_length, std_freq) freq_window /= max(freq_window) std_time = time_length / (6 * np.sqrt(2 * np.log(2))) time_window = scipy.signal.gaussian(time_length, std_time) time_window /= max(time_window) # to add warning if method is not one of the supported methods # Mid-point index of windows midpt_freq = (len(freq_window) - 1) // 2 midpt_time = (len(time_window) - 1) // 2 # Create arrays time_array = np.arange(start=0, stop=N, step=segment_step, dtype=int) / sampling_rate # frequency_array = np.fft.fftfreq(nfreqbin, sample_spacing)[0:nfreqbin / 2] frequency_array = 0.5 * np.arange(nfreqbin, dtype=float) / N pwvd = np.zeros((nfreqbin, len(time_array)), dtype=complex) # Calculate pwvd for i, t in enumerate(time_array): # time shift tau_max = np.min( [t + midpt_time - 1, N - t + midpt_time, np.round(N / 2.0) - 1, midpt_freq] ) # time-lag list tau = np.arange( start=-np.min([midpt_time, N - t]), stop=np.min([midpt_time, t - 1]) + 1, dtype="int" ) time_pts = (midpt_time + tau).astype(int) g2 = time_window[time_pts] g2 = g2 / np.sum(g2) signal_pts = (t - tau - 1).astype(int) # zero frequency pwvd[0, i] = np.sum(g2 * signal[signal_pts] * np.conjugate(signal[signal_pts])) # other frequencies for m in range(int(tau_max)): tau = np.arange( start=-np.min([midpt_time, N - t - m]), stop=np.min([midpt_time, t - m - 1]) + 1, dtype="int", ) time_pts = (midpt_time + tau).astype(int) g2 = time_window[time_pts] g2 = g2 / np.sum(g2) signal_pt1 = (t + m - tau - 1).astype(int) signal_pt2 = (t - m - tau - 1).astype(int) # compute positive half rmm = np.sum(g2 * signal[signal_pt1] * np.conjugate(signal[signal_pt2])) pwvd[m + 1, i] = freq_window[midpt_freq + m + 1] * rmm # compute negative half rmm = np.sum(g2 * signal[signal_pt2] * np.conjugate(signal[signal_pt1])) pwvd[nfreqbin - m - 1, i] = freq_window[midpt_freq - m + 1] * rmm m = np.round(N / 2.0) if t <= N - m and t >= m + 1 and m <= midpt_freq: tau = np.arange( start=-np.min([midpt_time, N - t - m]), stop=np.min([midpt_time, t - 1 - m]) + 1, dtype="int", ) time_pts = (midpt_time + tau + 1).astype(int) g2 = time_window[time_pts] g2 = g2 / np.sum(g2) signal_pt1 = (t + m - tau).astype(int) signal_pt2 = (t - m - tau).astype(int) x = np.sum(g2 * signal[signal_pt1] * np.conjugate(signal[signal_pt2])) x *= freq_window[midpt_freq + m + 1] y = np.sum(g2 * signal[signal_pt2] * np.conjugate(signal[signal_pt1])) y *= freq_window[midpt_freq - m + 1] pwvd[m, i] = 0.5 * (x + y) pwvd = np.real(np.fft.fft(pwvd, axis=0)) # Visualization return frequency_array, time_array, pwvd
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _smooth(values, std):\n width = std * 4\n x = np.linspace(-width, width, min(2 * width + 1, len(values)))\n kernel = np.exp(-(x / 5)**2)\n\n values = np.array(values)\n weights = np.ones_like(values)\n\n smoothed_values = np.convolve(values, kernel, mode='same')\n smoothed_weights = np.convolve(weights,...
[ "0.64140326", "0.602416", "0.5890475", "0.5847634", "0.58306766", "0.5785613", "0.57544583", "0.57424194", "0.57397753", "0.57333004", "0.57170975", "0.5714303", "0.5707594", "0.5699608", "0.56825364", "0.5621545", "0.5603333", "0.56010664", "0.5588229", "0.5582662", "0.55763...
0.0
-1
Visualize a timefrequency matrix.
def plot_timefrequency(z, time, f, signal=None, method="stft"): if method == "stft": figure_title = "Short-time Fourier Transform Magnitude" fig, ax = plt.subplots() for i in range(len(time)): ax.plot(f, z[:, i], label="Segment" + str(np.arange(len(time))[i] + 1)) ax.legend() ax.set_title("Signal Spectrogram") ax.set_ylabel("STFT Magnitude") ax.set_xlabel("Frequency (Hz)") elif method == "cwt": figure_title = "Continuous Wavelet Transform Magnitude" elif method == "wvd": figure_title = "Wigner Ville Distrubution Spectrogram" fig = plt.figure() plt.plot(time, signal) plt.xlabel("Time (sec)") plt.ylabel("Signal") elif method == "pwvd": figure_title = "Pseudo Wigner Ville Distribution Spectrogram" fig, ax = plt.subplots() spec = ax.pcolormesh(time, f, z, cmap=plt.get_cmap("magma"), shading="auto") plt.colorbar(spec) ax.set_title(figure_title) ax.set_ylabel("Frequency (Hz)") ax.set_xlabel("Time (sec)") return fig
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def plot_frequency(self):\n canvas = xboa.common.make_root_canvas(\"frequency vs time\")\n canvas.Draw()\n freq_list = [freq for freq in self.freq_list]\n hist, graph = xboa.common.make_root_graph(\"frequency vs time\",\n self.time_list, ...
[ "0.6815102", "0.6405556", "0.6397176", "0.6318654", "0.62567717", "0.62256616", "0.6137152", "0.6075933", "0.603835", "0.60201347", "0.6009235", "0.6006938", "0.5997316", "0.5959236", "0.5952973", "0.5915869", "0.58863264", "0.58826864", "0.5851976", "0.5786718", "0.57783747"...
0.6246299
5
return nothing but store data in self.parse_msg and self.parse_type
def parse(self, message, prefix, cmd_list): self.parse_type = "" self.parse_msg = [] for i in message: if i[0].isdigit(): self.parse_number(i, "w") elif len(i) == 1: self.parse_type += "w" self.parse_msg.append(i) elif i == "@everyone" or i == "@here": self.parse_type += "s" self.parse_msg.append(i) elif i[0] == prefix: self.parse_command(i, cmd_list) elif i[0] == "-": self.parse_number(i, "o") elif i[0] == "<" and len(i) > 3: self.parse_mention(i) else: self.parse_type += "w" self.parse_msg.append(i[(i[0] == "\\"):])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def parse(self, data):\n raise NotImplementedError", "def parse(cls, data):\n raise NotImplementedError", "def _parse(self):\n pass", "def parse_data(self):\n\t\traise NotImplementedError('%s: No parse function implemented!' % self.name)", "def parse(self, message: Message):\n\t\tpass"...
[ "0.7193137", "0.6979878", "0.6934807", "0.6921208", "0.670192", "0.66826284", "0.66826284", "0.66826284", "0.66826284", "0.66752", "0.66359085", "0.6599634", "0.6561199", "0.6499567", "0.6447754", "0.6330373", "0.6247721", "0.62246966", "0.61490583", "0.61253613", "0.61129683...
0.58469677
49
return elements in the message with given parameters match is the type of elements you want to get (check the parse_type variable to see possibilities) using ! at start of match will reverse the value of positive occurences will create the nth indexes elements to capture None will find everything
def finder(self, match="w", occurences=None, start=None, stop=None, trigger=True, positive=True, reverse=False, keep_prefix=False): res = [] length = len(self.parse_type) if occurences != None: occurences = str(occurences) index_array = self.indexes(occurences, 1) is_capturing = (start == None) target = 0 if match == None: match = "xwoifmrcs" if len(match) > 0 and match[0] == "!": positive = (positive == False) for idx in range(length*reverse-reverse, length*(-reverse+1)-reverse, (-reverse)*2+1): #xd lol if is_capturing == False: if type(start) == type(0): is_capturing = (idx == start) else: is_capturing = (self.parse_type[idx] in start) if stop != None: if trigger == True or is_capturing == True: if type(stop) == type(0) and (idx == stop): break if type(stop) == " " and (self.parse_type[idx] in stop): break if is_capturing == True: if (self.parse_type[idx] in match) == positive: if target in index_array: res.append(self.parse_msg[idx][(keep_prefix == False and self.parse_type[idx] in "ox"):]) target += 1 if len(res) == 0: return None return res
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _any_depth_parse(match):\n markers = [match.p1, match.p2, match.p3, match.p4, match.p5, match.p6]\n for idx in (4, 5):\n if markers[idx]:\n markers[idx] = mtypes.emphasize(markers[idx])\n return [m for m in markers if m]", "def onNameType(self, match):\n\t\treturn [self.process(mat...
[ "0.5471574", "0.52397555", "0.5145322", "0.5005529", "0.49990287", "0.49739963", "0.49577978", "0.49357885", "0.49001318", "0.48870462", "0.48827666", "0.4872868", "0.48422822", "0.48398778", "0.48214757", "0.4820572", "0.48020837", "0.47951323", "0.4777706", "0.4765019", "0....
0.5920306
0
return True if parameters does match the parse_type match is the amount of each parse_type elements you want to search. You can write www to check 3 words in a row ranges follow the same syntax as occurences except it targets indexes
def checker(self, match="xw", ranges="0,1", in_a_row=True, reverse=False): res = [] length = len(self.parse_type) if ranges != None: ranges = str(ranges) index_array = self.indexes(ranges) substring = "" for idx in range(length*reverse-reverse, length*(-reverse+1)-reverse, (-reverse)*2+1): #xd lol if idx in index_array: substring += self.parse_type[idx] if in_a_row == True: return (match in substring) if in_a_row == False: target = 0 for i in substring: target += (match[target] == i) return (target == maxi) if in_a_row == None: for i in self.parse_type: if i in match: match = match.replace(i, '', 1) return (match == "") return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _match(self, *token_types):\n for token in token_types:\n if self._check(token):\n self._advance()\n return True\n\n return False", "def _multiindex_row_in(cls, row, parse_list, start=None, stop=None):\n\n row_sub = r...
[ "0.65751725", "0.56603086", "0.557421", "0.5525039", "0.54756975", "0.5400486", "0.53058875", "0.52834386", "0.5266304", "0.5260166", "0.5238574", "0.5214264", "0.5185073", "0.51797396", "0.51623565", "0.5152065", "0.5134428", "0.5108154", "0.5108154", "0.51043475", "0.509991...
0.62283635
1
Run the unit tests.
def test(): import unittest tests = unittest.TestLoader().discover('tests') unittest.TextTestRunner(verbosity=2).run(tests)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def runTests(self):\n \n pass", "def test():\n import unittest\n\n tests = unittest.TestLoader().discover(\"tests\")\n unittest.TextTestRunner(verbosity=2).run(tests)", "def main():\n run_test_all()", "def test():\n import unittest\n tests = unittest.TestLoader().dis...
[ "0.85183024", "0.82517105", "0.81414855", "0.8076668", "0.8069847", "0.80517304", "0.8050559", "0.801922", "0.7989364", "0.7988354", "0.79849315", "0.7968269", "0.7955071", "0.7955071", "0.79471713", "0.79471713", "0.79471713", "0.7850217", "0.7809605", "0.7801223", "0.777470...
0.7952072
22
Processor pool entry point. Responsible for processing and writing one event from the message bus
def process_event(event_processor, event): log = logging.getLogger(__name__) try: log.debug("Processing Event: %s - %s" % (event["content_type"], event["routing_key"])) event_processor.process_event(event["content_type"], event["routing_key"], event["body"]) except: log.exception("Unable to process event")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def process(self, event):\n pass", "def event_queue_proc(self,event):\r\n event()", "def process_event(self, event):\r\n pass", "def process_amqp_events(self):\n self.connection.process_data_events()", "def ProcessEvents(self):\n self.work_queue.put(self.__ProcessEventsAsync)...
[ "0.69330835", "0.6680876", "0.654648", "0.64165616", "0.6389811", "0.6374216", "0.6304277", "0.628056", "0.6252483", "0.6216612", "0.6205624", "0.6205508", "0.61948574", "0.61800677", "0.61060655", "0.6080079", "0.60410905", "0.6012227", "0.6012227", "0.6012227", "0.6008976",...
0.62044877
12
Determine which EDR events should be sent to forwarder
def on_starting(self): self.set_capture_events_from_config()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_capture_events_from_config(self):\n\n event_config = [\n {\n \"config_key\": \"events_watchlist\",\n \"events\": [\n \"watchlist.hit.process\",\n \"watchlist.hit.binary\",\n \"watchlist.storage.hit.proc...
[ "0.61217207", "0.55249405", "0.55249405", "0.5509148", "0.54260296", "0.53321046", "0.52999175", "0.52999175", "0.5238973", "0.5216273", "0.51892865", "0.51738495", "0.5165304", "0.51611227", "0.5153233", "0.51480025", "0.5140899", "0.51364225", "0.5098679", "0.50892186", "0....
0.0
-1
Prepare to process RabbitMQ events and start consuming
def run(self): self.debug = self.forwarder_options.get("debug", "0") != "0" if self.debug: self.logger.setLevel(logging.DEBUG) processor_count = int(self.forwarder_options.get("message_processor_count", 1)) cpu_count = multiprocessing.cpu_count() if processor_count > cpu_count: self.logger.info("processor_count (%s) > cpu_count. Defaulting to cpu_count", (processor_count, cpu_count)) processor_count = cpu_count self.event_processor = EventProcessor(self.forwarder_options) self.processor_pool = multiprocessing.Pool(processor_count) while True: try: self.consume_message_bus(test=self.testing) except Exception as e: self.retry_attempts += 1 if self.retry_attempts > self.max_retry_attempts: self.logger.critical("Too many attempts to reconnect (%d). Exiting now." % self.max_retry_attempts) break if isinstance(e, pika.exceptions.AMQPConnectionError) or isinstance(e, pika.exceptions.ConnectionClosed): self.logger.error("Connection is closed or refused, retrying in %s seconds" % self.retry_interval) else: self.logger.exception("An unexpected error occurred, retrying in %s seconds" % self.retry_interval) if self.connection is not None: self.connection.close() self.connection = None time.sleep(self.retry_interval)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def process_amqp_events(self):\n self.connection.process_data_events()", "def consume():\n with conn.channel() as chan:\n \n def on_msg_recv(msg):\n \"\"\" Called when message arrives from RabbitMQ\n \"\"\"\n print \"processor|%s::Received message: %s\" % ...
[ "0.7568344", "0.67390686", "0.6702978", "0.6613848", "0.65698355", "0.6562793", "0.65285885", "0.6489923", "0.6489923", "0.6489923", "0.64743716", "0.64450294", "0.63913554", "0.63741064", "0.62963545", "0.62910515", "0.6283345", "0.62690264", "0.6255861", "0.62298816", "0.62...
0.62451327
19
Close any open resources
def on_stopping(self): self.logger.info("Got a shutdown of service") try: if self.connection is not None: self.connection.close() self.connection = None if self.processor_pool is not None: self.processor_pool.close() self.processor_pool.join() self.debug = self.forwarder_options.get("debug", "0") != "0" if self.debug: self.logger.setLevel(logging.DEBUG) except: self.logger.exception("Error stopping service")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def close():", "def finalize(self):\n self.ratings.close()\n self.users.close()\n self.movies.close()", "def close(self):\n for lrms in self.resources.itervalues():\n lrms.close()", "def do_close(self):\n self.cleanup(True)\n self.close()", "def close( s...
[ "0.8018784", "0.78392184", "0.77499765", "0.7615301", "0.7552293", "0.75487936", "0.7532097", "0.75203544", "0.7515063", "0.7499064", "0.7497777", "0.7487781", "0.7487781", "0.7487781", "0.7487781", "0.7487781", "0.7487781", "0.7487781", "0.7487781", "0.7487781", "0.7487781",...
0.0
-1
Subscribe to the EDR event bus and begin consuming messages
def consume_message_bus(self, test=False): if test: from test_fake_bus import FakeChannel, FakeConnection self.logger.info("Running Test Message Bus") self.channel = FakeChannel(self.on_bus_message, self.forwarder_options, self.logger) self.connection = FakeConnection() return username, password = self.get_bus_credentials() credentials = pika.PlainCredentials(username, password) parameters = pika.ConnectionParameters(self.cb_server_hostname, 5004, "/", credentials) self.connection = pika.SelectConnection(parameters, self.bus_on_connected, on_close_callback=self.bus_on_closed) self.logger.info("Starting bus connection") self.retry_attempts = 0 self.connection.ioloop.start()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def subscribe(receiver):", "def subscribe(receiver):", "def subscribe(receiver):", "def subscribe(self):\n if hasattr(self.bus, \"signal_handler\"):\n self.bus.signal_handler.subscribe()\n if hasattr(self.bus, \"console_control_handler\"):\n self.bus.console_control_handle...
[ "0.65859115", "0.65859115", "0.65859115", "0.6490624", "0.63868344", "0.6325292", "0.6266391", "0.62001973", "0.6125916", "0.60468745", "0.6021934", "0.6003453", "0.59971917", "0.5954995", "0.59471166", "0.59357494", "0.5932107", "0.59087026", "0.58901316", "0.5858419", "0.58...
0.0
-1
Callback that gets called for any event on the EDR event bus
def on_bus_message(self, channel, method_frame, header_frame, body): try: # there are two messages that get broadcast that we really # don"t care about. They have to do with feed synchronization # and other internal book-keeping if method_frame.routing_key in self.capture_events: event = { "content_type": header_frame.content_type, "routing_key": method_frame.routing_key, "body": body } self.logger.debug("Received Message: %s - %s" % (header_frame.content_type, method_frame.routing_key)) self.processor_pool.apply_async(process_event, (self.event_processor, event)) else: self.logger.debug("Unknown message info: %s" % method_frame.routing_key) except: self.logger.exception("Error processing bus message")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def event_in_cb(self, msg):\n self.event = msg.data", "def process_event(self, event):\r\n pass", "def visit_event(self, event):", "def handleEvents(self, events):\n pass", "def on_event(self, event):\n pass", "def on_event(self, event):\r\n pass", "def event_receive(...
[ "0.6813228", "0.67946595", "0.6717539", "0.6711378", "0.6636821", "0.6603972", "0.6455036", "0.6390352", "0.63873655", "0.6378598", "0.6359191", "0.63422745", "0.6334636", "0.6332409", "0.6296997", "0.6277028", "0.627466", "0.62578577", "0.62556595", "0.6215644", "0.61823285"...
0.0
-1
Retrieve which events to capture from the config
def set_capture_events_from_config(self): event_config = [ { "config_key": "events_watchlist", "events": [ "watchlist.hit.process", "watchlist.hit.binary", "watchlist.storage.hit.process", "watchlist.storage.hit.binary" ], "options": self.forwarder_options.get("wlhitnotifenabled", "0") }, { "config_key": "events_feed", "events": [ "feed.ingress.hit.process", "feed.ingress.hit.binary", "feed.ingress.hit.host", "feed.storage.hit.process", "feed.storage.hit.binary", "feed.query.hit.process", "feed.query.hit.binary" ], "options": self.forwarder_options.get("feedhitnotif", "0") }, { "config_key": "events_alert", "events": [ "alert.watchlist.hit.ingress.process", "alert.watchlist.hit.ingress.binary", "alert.watchlist.hit.ingress.host", "alert.watchlist.hit.query.process", "alert.watchlist.hit.query.binary" ], "options": self.forwarder_options.get("alertnotifenabled", "0") }, { "config_key": "events_raw_sensor", "events": [ "ingress.event.process", "ingress.event.procstart", "ingress.event.netconn", "ingress.event.procend", "ingress.event.childproc", "ingress.event.moduleload", "ingress.event.module", "ingress.event.filemod", "ingress.event.regmod" "ingress.event.tamper", "ingress.event.crossprocopen", "ingress.event.remotethread", "ingress.event.processblock", "ingress.event.emetmitigation", ], "options": self.forwarder_options.get("rawsensnotifenabled", "0") }, { "config_key": "events_binary_observed", "events": ["binaryinfo.host.observed", "binaryinfo.observed," "binaryinfo.group.observed"], "options": self.forwarder_options.get("binobsnotifenabled", "0") }, { "config_key": "events_binary_upload", "events": ["binarystore.file.added"], "options": self.forwarder_options.get("binuplnotifenabled", "0") } ] self.capture_events = [] for event_type in event_config: events = self.forwarder_options.get(event_type["config_key"], "0").lower() if events == "all": self.capture_events.extend(event_type["events"]) elif events != "0": events_from_config = events.split(",") events_to_capture = list(set(events_from_config) & set(event_type["events"])) self.capture_events.extend(events_to_capture) self.logger.info("Configured to capture events: %s" % self.capture_events)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_events(self):\n #Returne the capture events\n raise NotImplementedError", "def get_events(self):\n #Returne the capture events\n raise NotImplementedError", "def get_events(self):\n raise NotImplementedError", "def get_events(self):\n raise NotImplementedErro...
[ "0.7154731", "0.7154731", "0.6736886", "0.6736886", "0.6634429", "0.6563874", "0.6368687", "0.6339225", "0.6153467", "0.6147387", "0.6090058", "0.60785455", "0.6055919", "0.6055919", "0.6025618", "0.60202944", "0.60145056", "0.60046005", "0.59880394", "0.5945153", "0.5929929"...
0.7547521
0
the id of the Condition
def __init__(self): self.id = None self.typeInfo['id'] = 'string' """the owner of the Condition.""" self.account = None self.typeInfo['account'] = 'string' """Details of the Counter.""" self.counter = None self.typeInfo['counter'] = 'list' """the domain name of the owner.""" self.domain = None self.typeInfo['domain'] = 'string' """the domain id of the Condition owner""" self.domainid = None self.typeInfo['domainid'] = 'string' """the project name of the Condition""" self.project = None self.typeInfo['project'] = 'string' """the project id of the Condition.""" self.projectid = None self.typeInfo['projectid'] = 'string' """Relational Operator to be used with threshold.""" self.relationaloperator = None self.typeInfo['relationaloperator'] = 'string' """Threshold Value for the counter.""" self.threshold = None self.typeInfo['threshold'] = 'long' """zone id of counter""" self.zoneid = None self.typeInfo['zoneid'] = 'string'
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getId(self):", "def condition_id(self, condition_id):\n\n self._condition_id = condition_id", "def id(self):\n return self.status.id", "def getID():", "def _id(self):\n pass", "def get_id(self):\n return \"required_modules_exists_but_condition_is_false_plugin\"", "def ch...
[ "0.615068", "0.60920733", "0.60874665", "0.60557884", "0.5979219", "0.5969927", "0.5945815", "0.593884", "0.59332013", "0.589888", "0.5893271", "0.58621705", "0.58531296", "0.58531296", "0.58335763", "0.57970726", "0.57366246", "0.57366246", "0.57366246", "0.57366246", "0.571...
0.0
-1
Constructs a request and sends it to the endpoint to create a custom job instance.
def Create(self, parent, specs=None, config_path=None, display_name=None, python_package_uri=None, args=None, command=None, kms_key_name=None, network=None, service_account=None): if not python_package_uri: python_package_uri = [] job_spec = self.messages.GoogleCloudAiplatformV1beta1CustomJobSpec() job_spec.network = network job_spec.serviceAccount = service_account if config_path: data = yaml.load_path(config_path) if data: job_spec = messages_util.DictToMessageWithErrorCheck( data, self.messages.GoogleCloudAiplatformV1beta1CustomJobSpec) worker_pool_specs = [] if specs is not None: for spec in specs: machine_type = spec.get('machine-type') if not spec.get('replica-count'): replica_count = 1 else: replica_count = int(spec.get('replica-count')) container_image_uri = spec.get('container-image-uri') python_image_uri = spec.get('python-image-uri') python_module = spec.get('python-module') machine_spec = ( self.messages.GoogleCloudAiplatformV1beta1MachineSpec( machineType=machine_type)) worker_pool_spec = ( self.messages.GoogleCloudAiplatformV1beta1WorkerPoolSpec( replicaCount=replica_count, machineSpec=machine_spec)) if container_image_uri: worker_pool_spec.containerSpec = ( self.messages.GoogleCloudAiplatformV1beta1ContainerSpec( imageUri=container_image_uri)) if args is not None: worker_pool_spec.containerSpec.args = args if command is not None: worker_pool_spec.containerSpec.command = command if python_package_uri or python_image_uri or python_module: worker_pool_spec.pythonPackageSpec = ( self.messages.GoogleCloudAiplatformV1beta1PythonPackageSpec( executorImageUri=python_image_uri, packageUris=python_package_uri, pythonModule=python_module)) if args is not None: worker_pool_spec.pythonPackageSpec.args = args worker_pool_specs.append(worker_pool_spec) if worker_pool_specs: job_spec.workerPoolSpecs = worker_pool_specs validation.ValidateWorkerPoolSpec(job_spec.workerPoolSpecs) custom_job = ( self.messages.GoogleCloudAiplatformV1beta1CustomJob( displayName=display_name, jobSpec=job_spec)) if kms_key_name is not None: custom_job.encryptionSpec = self.messages.GoogleCloudAiplatformV1beta1EncryptionSpec( kmsKeyName=kms_key_name) return self._service.Create( self.messages.AiplatformProjectsLocationsCustomJobsCreateRequest( parent=parent, googleCloudAiplatformV1beta1CustomJob=custom_job))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def request_factory(self) -> 'JSONRPCRequest':\n return JSONRPCRequest()", "def post(self):\n data, errors = JobSchema().loads(request.data)\n\n if errors:\n return Response().send(\n data=None, status=400, code=\"bad_request\", message=errors\n )\n ...
[ "0.65077955", "0.6448152", "0.64046395", "0.63458514", "0.6320473", "0.63068354", "0.628952", "0.62621844", "0.6166124", "0.61317235", "0.61317235", "0.61317235", "0.61317235", "0.61317235", "0.61317235", "0.61317235", "0.61317235", "0.61317235", "0.61317235", "0.61317235", "...
0.0
-1
Returns a function to decide if log fetcher should continue polling.
def CheckJobComplete(self, name): request = self.messages.AiplatformProjectsLocationsCustomJobsGetRequest( name=name) response = self._service.Get(request) def ShouldContinue(periods_without_logs): if periods_without_logs <= 1: return True return response.endTime is None return ShouldContinue
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _do_request(self):\n\n if time.time() < self._next_request:\n return False\n else:\n return True", "def should_poll(self):\n return True", "def should_poll(self):\n return True", "def should_poll(self):\n return True", "def should_poll(self):\n ...
[ "0.6179781", "0.6099804", "0.6099804", "0.6099804", "0.6099804", "0.6099804", "0.6099804", "0.6099804", "0.6099804", "0.6099804", "0.6099804", "0.6099804", "0.6099804", "0.6099804", "0.6099804", "0.6099804", "0.60813695", "0.6056094", "0.6056094", "0.6056094", "0.6056094", ...
0.0
-1
Implementation of TPMINVNOM00000 Step 1.1
def upload(version=minv.__version__, release="1"): version = version or minv.__version__ put( join( env.builder_path, "build/RPMS/minv-%s-%s.noarch.rpm" % (version, release) ), "" ) put("minv/package/minv_install_postgresql.sh", "") sudo("chmod a+x minv_install_postgresql.sh") with lcd(env.ink_path): for rpm in RPMS: put(rpm, "")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getTru(n,int_method,func) :\n m = np.asarray([0.5,1,1.5,2,2.5,3,3.5,4,4.5,5,6,7,8,9,10])\n bes = np.asarray([-0.5,0,0.43675,0.47773,0.49231,0.49316,0.49280,0.50325,\n 0.51140,0.52169,0.55823,0.58086,0.60463,0.61483,0.66995])\n p = np.asarray([1,0,0.61007,0.77491,0.84071,0.87...
[ "0.5948996", "0.5655197", "0.5646285", "0.5632018", "0.5629894", "0.5600689", "0.55927324", "0.54705757", "0.54594123", "0.5431765", "0.5404841", "0.5401604", "0.5367439", "0.53368443", "0.5317328", "0.53077716", "0.52990746", "0.52621627", "0.52585423", "0.52466846", "0.5245...
0.0
-1
Implementation of TPMINVNOM00000 Step 1.2
def install(version=minv.__version__, release="1"): sudo("yum install -y %s" % " ".join(RPMS)) sudo("yum install -y minv-%s-%s.noarch.rpm" % (version, release)) sudo( 'printf "abcdefghijklmnopq\nabcdefghijklmnopq" ' '| sh minv_install_postgresql.sh --tablespace /disk/minv_tablespace/' )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getTru(n,int_method,func) :\n m = np.asarray([0.5,1,1.5,2,2.5,3,3.5,4,4.5,5,6,7,8,9,10])\n bes = np.asarray([-0.5,0,0.43675,0.47773,0.49231,0.49316,0.49280,0.50325,\n 0.51140,0.52169,0.55823,0.58086,0.60463,0.61483,0.66995])\n p = np.asarray([1,0,0.61007,0.77491,0.84071,0.87...
[ "0.59679216", "0.5714044", "0.5704294", "0.5693948", "0.56368655", "0.5636519", "0.558531", "0.55831385", "0.5476309", "0.545802", "0.54322004", "0.541791", "0.5415495", "0.5400221", "0.53617483", "0.5336507", "0.5317923", "0.52990615", "0.529753", "0.5280693", "0.52662027", ...
0.0
-1
Implementation of TPMINVNOM00000 Step 1.3
def config(): sudo( r"sed -i '/#password=/c\password=abcdefghijklmnopq' /etc/minv/minv.conf" ) sudo( r"sed -i '/log_level = INFO/c\log_level = DEBUG' /etc/minv/minv.conf" )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getTru(n,int_method,func) :\n m = np.asarray([0.5,1,1.5,2,2.5,3,3.5,4,4.5,5,6,7,8,9,10])\n bes = np.asarray([-0.5,0,0.43675,0.47773,0.49231,0.49316,0.49280,0.50325,\n 0.51140,0.52169,0.55823,0.58086,0.60463,0.61483,0.66995])\n p = np.asarray([1,0,0.61007,0.77491,0.84071,0.87...
[ "0.6024059", "0.58742326", "0.5747745", "0.57027227", "0.5643047", "0.5618062", "0.5584847", "0.5547953", "0.5515293", "0.54348683", "0.5426006", "0.5425438", "0.5389946", "0.53414834", "0.532526", "0.5292617", "0.52875394", "0.5270108", "0.52518106", "0.5248011", "0.52446", ...
0.0
-1
Implementation of TPMINVNOM00000 Step 1.4
def setup(): sudo("minv_setup.sh")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getTru(n,int_method,func) :\n m = np.asarray([0.5,1,1.5,2,2.5,3,3.5,4,4.5,5,6,7,8,9,10])\n bes = np.asarray([-0.5,0,0.43675,0.47773,0.49231,0.49316,0.49280,0.50325,\n 0.51140,0.52169,0.55823,0.58086,0.60463,0.61483,0.66995])\n p = np.asarray([1,0,0.61007,0.77491,0.84071,0.87...
[ "0.5975354", "0.574612", "0.57286537", "0.5664337", "0.5646467", "0.5617162", "0.55988216", "0.55463326", "0.5473999", "0.5429033", "0.54245466", "0.54071516", "0.5387508", "0.5321748", "0.5301005", "0.5291172", "0.52895296", "0.5283824", "0.52834505", "0.52806515", "0.527926...
0.0
-1
Implementation of TPMINVNOM00000 Step 1.5
def run_services(): for service in ("minvd", "httpd", "ntpd"): sudo("service %s start" % service) sudo("chkconfig %s on" % service)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getTru(n,int_method,func) :\n m = np.asarray([0.5,1,1.5,2,2.5,3,3.5,4,4.5,5,6,7,8,9,10])\n bes = np.asarray([-0.5,0,0.43675,0.47773,0.49231,0.49316,0.49280,0.50325,\n 0.51140,0.52169,0.55823,0.58086,0.60463,0.61483,0.66995])\n p = np.asarray([1,0,0.61007,0.77491,0.84071,0.87...
[ "0.60359097", "0.57987505", "0.57006055", "0.56011873", "0.55925214", "0.5585051", "0.55665624", "0.55553573", "0.55482876", "0.5474939", "0.5453688", "0.5451362", "0.5410539", "0.540798", "0.54051346", "0.53952694", "0.5374782", "0.5306764", "0.529368", "0.5291805", "0.52669...
0.0
-1
Implementation of TPMINVNOM00001 Steps 2 4
def initialize(): with settings(prompts={'Password: ': 'test', 'Password (again): ': 'test'}): for user, group in USER_GROUPS: sudo("useradd %s -G %s,minv -g minv -N || true" % (user, group)) sudo("chmod g+rwx /home/%s" % user) sudo('minv_ createuser %s -g %s' % (user, group), user="minv") # upload script to create collections put( join(env.testdata_path, "scripts/initial_collections.sh"), "", mode=0755 ) sudo("cp initial_collections.sh /home/minv-app-administrator/") # upload collection configs for conf in glob(join(env.testdata_path, "configurations/*.conf")): put(conf, "", mode=0444, use_sudo=True) sudo("cp %s /home/minv-app-administrator/" % basename(conf)) with cd("/home/minv-app-administrator/"): sudo("chmod a+rx . *") sudo( "sh -l ./initial_collections.sh", user="minv-app-administrator" )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getTru(n,int_method,func) :\n m = np.asarray([0.5,1,1.5,2,2.5,3,3.5,4,4.5,5,6,7,8,9,10])\n bes = np.asarray([-0.5,0,0.43675,0.47773,0.49231,0.49316,0.49280,0.50325,\n 0.51140,0.52169,0.55823,0.58086,0.60463,0.61483,0.66995])\n p = np.asarray([1,0,0.61007,0.77491,0.84071,0.87...
[ "0.5861715", "0.5764415", "0.5698264", "0.568726", "0.56142557", "0.55775875", "0.54205596", "0.54187477", "0.54100156", "0.5328907", "0.529744", "0.52744836", "0.5273768", "0.526851", "0.5267105", "0.52515525", "0.52438647", "0.5238269", "0.5230068", "0.5224255", "0.52228403...
0.0
-1
Compares an image to its reference
def compare(self, reference, image): if not os.path.isfile(reference): raise PictureComparatorError("Reference file %s does not exist" % reference) if not os.path.isfile(image): raise PictureComparatorError("Image file %s does not exist" % image) reference_img = cv2.imread(reference, 0) image_img = cv2.imread(image, 0) reference_width, reference_height = reference_img.shape[::-1] image_width, image_height = image_img.shape[::-1] if reference_width < image_width or reference_height < image_height: raise PictureComparatorError("Reference picture must be greater than image to find") method = cv2.TM_CCOEFF_NORMED # Apply template Matching res = cv2.matchTemplate(reference_img, image_img, method) min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res) if max_val > 0.95: return Rectangle(max_loc[0], max_loc[1], image_width, image_height) else: return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def compareTo(self,imagefullpath):\n exc = ExtractColor2(self.k)\n bgrcolor = exc.getColorBGR(imagefullpath)\n\n score = 0\n for i in range(self.k):\n score += np.linalg.norm(bgrcolor[i] - self._ref_BGRcolor[i])/(np.sqrt(255*255*3))\n score /= self.k\n return 1 ...
[ "0.7269888", "0.69743997", "0.6905206", "0.6846669", "0.67375135", "0.6730658", "0.67301905", "0.67132235", "0.6670823", "0.66276044", "0.65917426", "0.6564198", "0.65512747", "0.6517447", "0.65027654", "0.6423081", "0.64036614", "0.63828444", "0.6322787", "0.6311486", "0.628...
0.7576849
0
From a matrix of difference pixels (for each pixel, we have 0 if pixel is the same, or nonzero if they are different), creates list of pixels which are different a PNG image of the same size as 'step' image, where each different pixel is coloured RED
def _build_list_of_changed_pixels(self, diff, image_width, image_height, min_width, min_height, exclude_zones): # complete diff "image" to the size of step image diff = numpy.pad(diff, ((0, max(0, image_height - min_height)), (0, max(0, image_width - min_width))), constant_values=1) # ignore excluded pixels diff *= self._build_list_of_excluded_pixels2(exclude_zones, image_width, image_height) # draw mask of differences mask = numpy.ones((image_height, image_width, 1), dtype=uint8) diff_image = numpy.zeros((image_height, image_width, 4), dtype=uint8) cnd = diff[:,:] > 0 # says which pixels are non-zeros diff_image[cnd] = mask[cnd] diff_image *= numpy.array([0, 0, 255, 255], dtype=uint8) # print red pixels diff_pixels = numpy.transpose(diff.nonzero()); return diff_pixels, diff_image
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def separate_colors(self):\n colors = self.get_sorted_pixels()\n colors_dict = dict((val[1], Image.new('RGB', self.size, (255,255,255))) \n for val in colors)\n pixel_dict = dict((img, []) for img in colors_dict.keys())\n\n pix = self.image.load()\n for...
[ "0.594339", "0.5888286", "0.5828619", "0.5812646", "0.5808996", "0.5784285", "0.57349354", "0.57313955", "0.5709643", "0.56814396", "0.566322", "0.56176597", "0.55183667", "0.5507987", "0.54847234", "0.54687953", "0.5410043", "0.53843975", "0.53818494", "0.53818494", "0.53743...
0.7071252
0
From the list of rectangles, build a list of pixels that these rectangles cover
def _build_list_of_excluded_pixels2(self, exclude_zones, img_width, img_height): full_image = numpy.ones((img_height, img_width), dtype=uint8) for x, y, width, height in exclude_zones: # creates a matrix where 0 is placed on pixels to exclude, and 1 on pixel to keep exclusion = numpy.zeros((height, width), dtype=uint8) exclusion = numpy.pad(exclusion, ((min(y, img_height) , max(0, img_height - (y + height))), (min(x, img_width), max(0, img_width - (x + width)))), constant_values=1) full_image *= exclusion[0:img_height, 0:img_width] # crop exclusion array if it's size is higher than image (exclusion zone outside of image dimensions) return full_image
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def rect(rows: int, cols: int, top: int = 0,\n left: int = 0) -> List['GridQubit']:\n return [\n GridQubit(row, col)\n for row in range(top, top + rows)\n for col in range(left, left + cols)\n ]", "def find_rects(image: np.ndarray) -> List[np.ndarray]:\n...
[ "0.6603085", "0.65592986", "0.65554035", "0.6548141", "0.65277016", "0.6455441", "0.6452907", "0.644425", "0.64078", "0.6345731", "0.63407683", "0.63396865", "0.6298158", "0.62660736", "0.6236707", "0.6198757", "0.61927736", "0.6180278", "0.616941", "0.61400336", "0.61380047"...
0.59542006
37
From the list of rectangles, build a list of pixels that these rectangles cover
def _build_list_of_excluded_pixels(self, exclude_zones): pixels = [] for x, y, width, height in exclude_zones: for row in range(height): for col in range(width): pixels.append(Pixel(col + x, row + y)) return pixels
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def rect(rows: int, cols: int, top: int = 0,\n left: int = 0) -> List['GridQubit']:\n return [\n GridQubit(row, col)\n for row in range(top, top + rows)\n for col in range(left, left + cols)\n ]", "def find_rects(image: np.ndarray) -> List[np.ndarray]:\n...
[ "0.66037875", "0.6559397", "0.6555009", "0.65479845", "0.6528629", "0.6455227", "0.6453855", "0.64455384", "0.64104044", "0.6346816", "0.63399655", "0.6339705", "0.62987906", "0.6266192", "0.6236844", "0.61997765", "0.6193706", "0.6171467", "0.6139797", "0.6138508", "0.612909...
0.6178429
17
Check if two things have the same type.
def same_type(one, two): return isinstance(one, type(two))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _is_equal_same_type(self, other):\n return True", "def is_same(type1, type2):\n nake_type1 = remove_declarated(type1)\n nake_type2 = remove_declarated(type2)\n return nake_type1 == nake_type2", "def sametype(variable1, variable2):\n\n # Return the result\n return isinstance(variable1,...
[ "0.826196", "0.7960103", "0.7685877", "0.75767535", "0.73899287", "0.7386229", "0.73627245", "0.72946095", "0.7151994", "0.7137583", "0.7062871", "0.7059909", "0.70217156", "0.69869566", "0.6975931", "0.69511235", "0.6951103", "0.682518", "0.6799043", "0.6766386", "0.67605907...
0.8348503
0
Merge data from another configuration space into this one.
def merge(one, two, overwrite=False, typecheck=True): if one is two: return if typecheck and not same_type(one, two): raise ValueError('Type mismatch') for (key, value) in two.items(): if key not in one: one[key] = value if typecheck and not same_type(one[key], value): raise ValueError('Type mismatch') if isinstance(value, dict): merge(one[key], two[key], overwrite, typecheck) elif not overwrite: continue else: one[key] = two[key]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def merge(self, other_config):\n # Make a copy of the current attributes in the config object.\n config_options = copy.copy(self._user_provided_options)\n\n # Merge in the user provided options from the other config\n config_options.update(other_config._user_provided_options)\n\n ...
[ "0.68907833", "0.67463917", "0.6724158", "0.6697968", "0.6695578", "0.66716415", "0.635137", "0.6323535", "0.6313957", "0.628166", "0.62575287", "0.62176895", "0.6143152", "0.611059", "0.61058307", "0.6072944", "0.60497296", "0.5980816", "0.59184533", "0.5873966", "0.5863279"...
0.0
-1
AirInstance constructor name The name of the instance input An object with the YAML description of the IR instance transmit_handler A function to be called to transmit pkts Add support to allow the specification of the MetaIR instance
def __init__(self, name, input, transmit_handler): local_dir = os.path.dirname(os.path.abspath(__file__)) MetaIRInstance.__init__(self, os.path.join(local_dir, 'air_meta.yml')) self.transmit_handler = transmit_handler self.name = name self.tm_started = False self.disabled = True # Add the content to the MetaIR instance self.add_content(input) self.port_count = self.meta_ir_object_map["layout"]["port_count"] # Create the AIR objects: parsers, actinos, tables, pipelines and TMs self.air_value_set = {} self.air_value_map = {} self.air_parser = {} self.air_action = {} self.air_table = {} self.air_pipeline = {} self.air_traffic_manager = {} self.processors = {} self.transmit_processor = TransmitProcessor(transmit_handler) for name, val in self.value_set.items(): self.air_value_set[name] = [] # Just use a list for name, val in self.value_map.items(): self.air_value_map[name] = {} # Just use a dict for name, val in self.parser.items(): self.air_parser[name] = Parser(name, val, self.parse_state, self.header, self.value_set) self.processors[name] = self.air_parser[name] for name, val in self.action.items(): self.air_action[name] = Action(name, val) for name, val in self.table.items(): self.air_table[name] = Table(name, val, self.air_action) for name, val in self.control_flow.items(): self.air_pipeline[name] = Pipeline(name, val, self.air_table, self.air_action) self.processors[name] = self.air_pipeline[name] for name, val in self.traffic_manager.items(): self.air_traffic_manager[name] = SimpleQueueManager(name, val, self.port_count) self.processors[name] = self.air_traffic_manager[name] # Plumb the layout layout = self.meta_ir_object_map["layout"] meta_ir_assert(layout["format"] == "list", "Unsupported layout: not a list") layout_name_list = layout["implementation"] meta_ir_assert(isinstance(layout_name_list, list), "Layout implementation is not a list") proc_count = len(layout_name_list) for idx, processor_name in enumerate(layout_name_list): cur_proc = self.processors[processor_name] if idx == 0: logging.debug("Layout: First processor %s" % cur_proc.name) self.first_processor = cur_proc if idx < proc_count - 1: next_proc = self.processors[layout_name_list[idx + 1]] cur_proc.next_processor = next_proc else: # Last one connects to transmit processor cur_proc.next_processor = self.transmit_processor logging.debug("Layout %s to %s" % (cur_proc.name, cur_proc.next_processor.name)) # Grab table initialization object if present self.table_initialization = {} ext_objs = self.external_object_map if "table_initialization" in ext_objs.keys(): self.table_initialization = ext_objs["table_initialization"]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n add_on: Optional[pulumi.Input[pulumi.InputType['InstanceAddOnArgs']]] = None,\n availability_zone: Optional[pulumi.Input[str]] = None,\n bl...
[ "0.66051346", "0.6576527", "0.6427389", "0.63597023", "0.62879163", "0.6271562", "0.6235065", "0.6202321", "0.6141576", "0.6116739", "0.6116739", "0.6116739", "0.6116739", "0.6116739", "0.6116739", "0.61087185", "0.60978323", "0.6088997", "0.60772216", "0.606114", "0.6051864"...
0.7563762
0
Process any table initialization spec from the IR desc The IR specification may provide a set of table initialization operations in a "table_initialization" object. This takes the form of a sequence of table entry specifications.
def process_table_init(self): logging.debug("Processing table initialization, %d entries", len(self.table_initialization)) for init_entry in self.table_initialization: for table_name, entry_desc in init_entry.items(): self.air_table[table_name].add_entry( table_entry.description_to_entry(entry_desc))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_up_tables(self):\n tables = []\n tables.append({'groupname': 'metadata',\n 'tablename': 'sim_info',\n 'description': desc.SimInfoRow,\n 'tabletitle': 'Simulation Information'})\n tables.append({'groupname': 'metadata',\n...
[ "0.52702624", "0.51661", "0.51060444", "0.5094332", "0.50876445", "0.50695586", "0.50232565", "0.49792466", "0.49553454", "0.49386698", "0.49274278", "0.49077904", "0.4889465", "0.48876804", "0.48618603", "0.48576298", "0.48565733", "0.48539078", "0.4839813", "0.48349887", "0...
0.7456325
0
Enable the switch instance Start the traffic manager threads and allow packets to enter the processor chain
def enable(self): if not self.tm_started: for name, tm in self.air_traffic_manager.items(): logging.debug("Starting tm %s" % name) tm.start() tm_started = True logging.debug("Enabling switch %s" % self.name) self.disabled = False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def enable(self):\n self.switch.enable()\n self._enabled = True", "def start_sending_to_switch(self):\n self.switch_active = True\n for message in self.internal_switch_buffer:\n self.switch.buffer.append(message)\n self.internal_switch_buffer = []", "def launch ():...
[ "0.633651", "0.63337624", "0.627072", "0.62615603", "0.6093382", "0.6046594", "0.595381", "0.5936305", "0.58609205", "0.58298725", "0.5779756", "0.57681865", "0.5766365", "0.5766356", "0.5763502", "0.57629", "0.57612556", "0.5747415", "0.5741657", "0.57210463", "0.5721009", ...
0.7502978
0
Disable the switch instance Packets on ingress are discarded while the switch is disabled. Traffic manager threads are not stopped.
def disable(self): logging.debug("Disabling switch %s" % self.name) self.disabled = True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def async_turn_off(self):\n path = \"/ip/firewall/nat\"\n param = \".id\"\n value = None\n for uid in self._ctrl.data[\"nat\"]:\n if (\n self._ctrl.data[\"nat\"][uid][\"name\"]\n == f\"{self._data['protocol']}:{self._data['dst-port']}\"\n ...
[ "0.66462976", "0.6192181", "0.6160286", "0.6113852", "0.6101358", "0.6030463", "0.6019299", "0.60160685", "0.6007818", "0.59888124", "0.5958932", "0.59445566", "0.5917474", "0.591579", "0.5867535", "0.5856679", "0.5840998", "0.583786", "0.58215153", "0.57709235", "0.5764594",...
0.70145595
0
in_port The ingress port number on which packet arrived packet A bytearray with the packet data
def process_packet(self, in_port, packet): buf = bytearray(packet) for idx in range((len(packet) + 19)/20): logging.debug(hexify(buf[20*idx : 20*(idx+1)], 20)) if self.disabled: logging.debug("Switch is disabled; discarding packet") return parsed_packet = ParsedPacket(buf, self.metadata) logging.debug("Processing packet %d from port %d with %s" % (parsed_packet.id, in_port, self.first_processor.name)) self.first_processor.process(parsed_packet)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def port_in(self, port_in):\n\n self._port_in = port_in", "def port_in(self, port_in):\n\n self._port_in = port_in", "def _packet_in_debug(self, ev, in_port):\n #*** Extract parameters:\n msg = ev.msg\n datapath = msg.datapath\n dpid = datapath.id\n pkt = packet...
[ "0.6083645", "0.6083645", "0.6004842", "0.5940256", "0.58412826", "0.5745346", "0.56811863", "0.56805307", "0.56792486", "0.5678725", "0.55001324", "0.5463868", "0.545723", "0.54208636", "0.5390863", "0.5390862", "0.537292", "0.5355706", "0.5340329", "0.5338251", "0.5322882",...
0.5686567
6
Transmit handler template for documentation out_port The port number to which the packet is to be sent packet A bytearray object holding the packet to transmit
def dummy_transmit_handler(out_port, packet): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def write(self, out):", "def _post(self, which_port, msg):\n return _spacegrant_swig.binary_sink_sptr__post(self, which_port, msg)", "def send_traffic_data(serialport, pack):\n pack[0] = 0x01\n pack[1] = 0x00\n serialport.write(pack)\n logging.debug(\"Traffic Data - Sent.\")\n logging.deb...
[ "0.5656294", "0.5640042", "0.5389474", "0.53658545", "0.52940327", "0.52939874", "0.5285707", "0.51725745", "0.51605856", "0.5146283", "0.51232743", "0.5088026", "0.5069205", "0.50586265", "0.50550586", "0.50533634", "0.50321823", "0.5028129", "0.49887457", "0.4957033", "0.49...
0.655866
0
Process interface that sends a packet parsed_packet The packet instance to transmit
def process(self, parsed_packet): byte_buf = parsed_packet.serialize() out_port= parsed_packet.get_field("intrinsic_metadata.egress_port") logging.debug("Transmit pkt id %d to %d" % (parsed_packet.id, out_port)) buf = bytearray(byte_buf) for idx in range((len(buf) + 19)/20): logging.debug(hexify(buf[20*idx : 20*(idx+1)], 20)) self.transmit_handler(out_port, byte_buf)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def process(self, packet):\n pass", "def process(self, pkt):\n pass", "def _do_some_logic(self, packet):\n\n\n pass", "def process_packet(self, in_port, packet):\n \n buf = bytearray(packet)\n for idx in range((len(packet) + 19)/20):\n logging.debug(he...
[ "0.8039347", "0.7425329", "0.6986574", "0.69667333", "0.6850102", "0.66773987", "0.65571284", "0.655704", "0.65034556", "0.64887303", "0.64614993", "0.64393044", "0.6438453", "0.6343893", "0.633845", "0.63383657", "0.62919194", "0.62790996", "0.626932", "0.6252799", "0.625152...
0.7235204
2
Send message to specified channel.
def sendmsg(msg, target=channel): msg = bytes('PRIVMSG ' + target + ' :' + msg + '\n', 'UTF-8') sleep(randint(5, 10) / 10) # to avoid throttling due to flooding write(msg) ircsocket.send(msg)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def send_message(self, channel : str, message : str):\n await self._connection.send_message(channel, message)", "def sendMsg(self, channel, message, length=None):\n self.logger.info(\"Sending in %s: %s\" % (channel, message))\n self.msg(channel, message, length)", "def send_message(s...
[ "0.82728493", "0.7965429", "0.7865041", "0.77663475", "0.7414901", "0.7366934", "0.7189363", "0.71749264", "0.7114904", "0.7107324", "0.7078955", "0.703922", "0.69624746", "0.6917826", "0.69085354", "0.6878344", "0.6788846", "0.67383105", "0.6678887", "0.66784793", "0.6666718...
0.0
-1
Respond to server Pings.
def ping(msg): msg = msg[0:1] + 'O' + msg[2:] ircsocket.send(bytes(msg, 'utf-8')) sendmsg('This message should be eaten by irc. QQ.')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def ping_response():\n\n return Response(\"ok\", status=200)", "def ping(self) -> Response:\n raise NotImplementedError", "def ping():\n\treturn HTTPResponse(status=200)", "def ping():\r\n return make_response(\"pong!\", 200)", "def ping():\n return jsonify({'response': 'pong'}), 200", "a...
[ "0.71649414", "0.7104726", "0.6999726", "0.69393915", "0.68687826", "0.67558753", "0.6743167", "0.6712012", "0.6712012", "0.6712012", "0.6712012", "0.6609778", "0.6493357", "0.6481408", "0.64656126", "0.63697636", "0.63491356", "0.634054", "0.6311246", "0.62131786", "0.620798...
0.0
-1
Wrapper around the MappingColumns class to create the list of suggested mappings
def build_column_mapping(raw_columns, dest_columns, previous_mapping=None, map_args=None, default_mappings=None, thresh=0): return MappingColumns(raw_columns, dest_columns, previous_mapping=previous_mapping, map_args=map_args, default_mappings=default_mappings, threshold=thresh).final_mappings
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_column_mapping_suggestions(request):\n body = json.loads(request.body)\n import_file = ImportFile.objects.get(pk=body.get('import_file_id'))\n org_id = body.get('org_id')\n result = {'status': 'success'}\n # Make a dictionary of the column names and their respective types.\n # Build this ...
[ "0.6701818", "0.66194874", "0.64661616", "0.6289916", "0.62860745", "0.6266867", "0.6198274", "0.6167986", "0.6022413", "0.5931042", "0.5810462", "0.5776295", "0.5758414", "0.5749279", "0.57203007", "0.5662926", "0.5634716", "0.5623659", "0.55996656", "0.5535574", "0.5528489"...
0.63081527
3
Set any attributes that are passed in as initial data.
def apply_initial_data(model, initial_data): for item in initial_data: value = initial_data[item] if hasattr(model, item): setattr(model, item, value) elif hasattr(model, 'extra_data') and isinstance(model.extra_data, dict): model.extra_data[item] = value return model
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _set_attributes(self):", "def __init__(self, **initial_attributes):\n\n for attribute_name, attribute_value in initial_attributes.items():\n setattr(self, attribute_name, attribute_value)", "def init_attrs(self):\n raise NotImplementedError", "def __init__(self, **attributes):\n ...
[ "0.79411536", "0.7760498", "0.74538267", "0.73853767", "0.7362866", "0.71887094", "0.7146371", "0.7146371", "0.7146371", "0.7098289", "0.70153654", "0.699478", "0.69330215", "0.68741333", "0.68410134", "0.6835195", "0.673314", "0.67293113", "0.66948265", "0.6694178", "0.66822...
0.6589023
28
Concatenate the values into one string to set for target.
def _concat_values(concat_columns, column_values, delimiter): # Use the order of values that we got from concat_columns def. values = [ column_values[item] for item in concat_columns if item in column_values ] return delimiter.join(values) or None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def concat(values, sep=', '):\n concat_str = None\n try:\n concat_str = sep.join([str(v) for v in values if not is_empty(v)])\n except Exception as e:\n pass\n return concat_str", "def joined_parameter(*values: str) -> str:\n return \"+\".join(values)", "def _concat(self, *args, **...
[ "0.66425836", "0.64691097", "0.63475597", "0.6099031", "0.60475785", "0.60406435", "0.604041", "0.60189885", "0.6017926", "0.5998727", "0.5937355", "0.5918181", "0.5915184", "0.5901235", "0.5898622", "0.58785903", "0.5799823", "0.5796432", "0.5734698", "0.5730965", "0.5715827...
0.54848677
46
Set the column value as the target attr on our model.
def apply_column_value(raw_column_name, column_value, model, mapping, is_extra_data, cleaner): # If the item is the extra_data column, then make sure to save it to the # extra_data field of the database if raw_column_name in mapping: table_name, mapped_column_name, display_name, is_extra_data = mapping.get(raw_column_name) # special postal case: if mapped_column_name in ['postal_code', 'owner_postal_code']: if '-' in str(column_value): postal = str(column_value).split('-')[0].zfill(5) ext = str(column_value).split('-')[1].zfill(4) column_value = postal + '-' + ext column_value = str(column_value).zfill(5) cleaned_value = None if cleaner: # Get the list of Quantity fields from the Column object in SEED. This is non-ideal, since the # rest of the mapping code does not use SEED models. Perhaps make this an argument. if (model.__class__.__name__, mapped_column_name) in apps.get_model('seed', 'Column').QUANTITY_UNIT_COLUMNS: # clean against the database type first cleaned_value = cleaner.clean_value(column_value, mapped_column_name, is_extra_data) # This is a temporary fix for when the raw_column_name and the mapped_column_name # are the same. It causes the units to be cast twice since the cleaner look up finds # the same column twice. The cleaner needs to be cleaned up quite a bit to handle # this error correctly. if mapped_column_name != raw_column_name: # now clean against the raw name with pint (Quantity Units) because that's the column # that holds the units needed to interpret the value correctly cleaned_value = cleaner.clean_value(cleaned_value, raw_column_name, is_extra_data) else: cleaned_value = cleaner.clean_value(column_value, mapped_column_name, is_extra_data) else: cleaned_value = default_cleaner(column_value) if is_extra_data: if hasattr(model, 'extra_data'): # only save it if the model and the mapping are the same if model.__class__.__name__ == table_name: if isinstance(cleaned_value, (datetime, date)): # TODO: create an encoder for datetime once we are in Django 1.11 model.extra_data[mapped_column_name] = cleaned_value.isoformat() else: model.extra_data[mapped_column_name] = cleaned_value else: # Simply set the field to the cleaned value if it is the correct model if model.__class__.__name__ == table_name: setattr(model, mapped_column_name, cleaned_value) return model
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_attribute(self, name, value):\n attrs = self._column.attrs\n attrs[name] = value\n self._column.attrs = attrs", "def __set__(self, instance, value):\r\n if instance:\r\n return instance._values[self.column.column_name].setval(value)\r\n else:\r\n r...
[ "0.7223488", "0.69311714", "0.6659743", "0.65025675", "0.64680314", "0.64388746", "0.64149034", "0.63883615", "0.6383518", "0.63276327", "0.6326977", "0.6314786", "0.62230754", "0.62147987", "0.61330974", "0.6095479", "0.6058556", "0.6057366", "0.6056918", "0.6027524", "0.602...
0.0
-1
Go through the list of dictionaries and setup their keys.
def _set_default_concat_config(concat): concat = concat or [] if not isinstance(concat, list): concat = [concat] for c in concat: c['target'] = c.get('target', '__broken_target__') c['concat_columns'] = c.get('concat_columns', []) c['delimiter'] = c.get('delimiter', ' ') c['concat_values'] = {} return concat
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def example_dict_from_dict_list(dict_list,recursive=False):\n if not isinstance(dict_list,list):\n if isinstance(dict_list,dict):\n dict_list = [dict_list]\n else:\n raise TypeError(\"dict_list must be a dict or a list of dicts\")\n else:\n if not all([isinstance(x,...
[ "0.5770778", "0.5673443", "0.5617882", "0.5580341", "0.55801505", "0.5575327", "0.5571679", "0.55587304", "0.5528891", "0.5508562", "0.55064344", "0.5500273", "0.5479882", "0.5471181", "0.5468983", "0.546297", "0.54368216", "0.54329467", "0.5403189", "0.5389576", "0.5385095",...
0.0
-1
Fields that are expanded (typically tax lot id) are also in need of normalization to remove characters that prevent easy matching. This method will remove unwanted characters from the jurisdiction tax lot id. Here are some examples of what actual city taxlots can look like 13153123902 069180102923 14A612 123.4123 PANL1593005 0.000099 00012312 12123121212134567 12 0123 TT0612
def _normalize_expanded_field(value): value = value.strip() value = re.sub(r'\s{2,}', ' ', value) value = re.sub(r'/{2,}', '/', value) value = re.sub(r'\\{2,}', '\\\\', value) value = re.sub(r'-{2,}', '-', value) value = re.sub(r'\*{2,}', '*', value) value = re.sub(r'\.{2,}', '.', value) value = value.upper() return value
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def tidy_telephone(telephone):\n junk = ['none', 'none1', 'na', 'n/a', 'same', 'yes', 'cell', 'offsite']\n telephone = telephone.replace('xxx-xxx-xxxx', '')\n telephone = telephone.replace('ext', ' x')\n telephone = telephone.replace(' cell', '')\n telephone = telephone.replace('\"', '')\n teleph...
[ "0.617545", "0.59764254", "0.5974559", "0.58157676", "0.57999325", "0.57070196", "0.56774664", "0.5623333", "0.56135535", "0.55448365", "0.5531851", "0.5499692", "0.54500884", "0.54454935", "0.5440887", "0.5432032", "0.54141355", "0.5411208", "0.54100305", "0.53966856", "0.53...
0.5550276
9
take a field from the csv and expand/split on a delimiter and return a list of individual values. If the return_list flag is set to true, then this method will return the data back as a list of new fields instead of a cleaned up string and normalized with semicolon delimiter
def expand_and_normalize_field(field, return_list=False): if isinstance(field, basestring): field = field.rstrip(';:,') data = [_normalize_expanded_field(r) for r in re.split(",|;|:", field)] if return_list: return data else: return ";".join(data) else: if return_list: return [field] else: return field
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def csv_line(value_parser):\n def convert(string):\n return list(map(value_parser, string.split(',')))\n return convert", "def parse_and_flatten(df, field_name):\n\n # Parse and flatten the list\n lst = list(df[field_name])\n lst = [x.split('|') for x in lst]\n\n lst_flat = []\n for s...
[ "0.63519025", "0.63037026", "0.6226924", "0.621855", "0.621855", "0.605141", "0.60084903", "0.5960452", "0.5954665", "0.58187693", "0.58148223", "0.57171553", "0.56535655", "0.5637533", "0.56276727", "0.55754817", "0.55633485", "0.55611026", "0.54911935", "0.54818034", "0.543...
0.7508108
0
Take a row and a field which may have delimited values and convert into a list of new rows with the same data expect for the replaced delimited value.
def expand_rows(row, delimited_fields, expand_row): # _log.debug('expand_row is {}'.format(expand_row)) # go through the delimited fields and clean up the rows copy_row = copy.deepcopy(row) for d in delimited_fields: if d in copy_row: copy_row[d] = expand_and_normalize_field(copy_row[d], False) if expand_row: new_values = [] for d in delimited_fields: fields = [] if d in copy_row: for value in expand_and_normalize_field(copy_row[d], True): fields.append({d: value}) new_values.append(fields) # return all combinations of the lists combinations = list(itertools.product(*new_values)) new_rows = [] for c in combinations: new_row = copy.deepcopy(copy_row) # c is a tuple because of the .product command for item in c: for k, v in item.items(): new_row[k] = v new_rows.append(new_row) return new_rows else: return [copy_row]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def processRow(self, row):\n\t\tif self.delim is not None:\n\t\t\trowArr = row.split(self.delim)\n\t\t\tmsg = \"row does not have expected number of columns found \" + str(len(rowArr)) + \" expected \" + str(self.rowSize)\n\t\t\tassert len(rowArr) == self.rowSize, msg\n\t\telse:\n\t\t\trowArr = row\n\t\t\t\n\t\tne...
[ "0.6390672", "0.61461806", "0.61275697", "0.6075597", "0.6021947", "0.60119367", "0.5930786", "0.5849591", "0.5666509", "0.56609106", "0.562658", "0.56255656", "0.5619078", "0.5576439", "0.55647933", "0.55612415", "0.5549309", "0.5546282", "0.5542975", "0.5528512", "0.5520903...
0.63998896
0
Apply mapping of row data to model.
def map_row(row, mapping, model_class, extra_data_fields=[], cleaner=None, **kwargs): initial_data = kwargs.get('initial_data', None) model = model_class() # _log.debug("map_row's mappings {}".format(mapping)) # If there are any initial states we need to set prior to mapping. if initial_data: model = apply_initial_data(model, initial_data) # concat is not used as of 2016-09-14 # concat = _set_default_concat_config(concat) for raw_field, value in row.items(): is_extra_data = True if raw_field in extra_data_fields else False # Save the value if is is not None, keep empty fields. if value is not None: model = apply_column_value(raw_field, value, model, mapping, is_extra_data, cleaner) return model
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def applyMapping(self):\n pass", "def map(self, function=lambda item: item):\n for i, row in enumerate(self):\n for j, item in enumerate(row):\n row[j] = function(item)", "def map_transfer_to_row(self, transfer):\n pass", "def _do_mapping(self):\n pass", ...
[ "0.66845304", "0.62534523", "0.61838084", "0.604524", "0.59644157", "0.5678246", "0.56550837", "0.5597702", "0.5595955", "0.5552257", "0.55398905", "0.54507", "0.54413307", "0.542242", "0.54172045", "0.54105145", "0.5367758", "0.5321824", "0.5305091", "0.5284699", "0.52551997...
0.77035165
0
Updates stats inside mod_stats_map with data gathered from the file.
def get_file_mod_stats_for_upstream_refs(file_name, mod_stats_map): with open(file_name) as f: lines = f.readlines() upstream_ref = None upstream_start_line = None for line_number, line in enumerate(lines): if REGION_START_TAG in line: tag, ref_name = _extract_tag_and_ref_name_from_line(line, False) if REGION_UPSTREAM_TAG in tag: upstream_ref = ref_name upstream_start_line = line_number elif REGION_END_TAG in line and upstream_ref: mod_stats = mod_stats_map[upstream_ref] mod_stats.mod_count += 1 mod_stats.line_count += line_number - upstream_start_line - 1 upstream_ref = None upstream_start_line = None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update_stat_file(self):\n logfile = \"../data/{}_stat.json\".format(self.ID)\n statobj = {\n 'hp': self.hp,\n 'max_hp': MAX_TANK_HP,\n 'ammo': self.ammo,\n 'score': self.score,\n 'age': self.age,\n 'alive': not self.is_dead(),\n ...
[ "0.63213795", "0.61827755", "0.57400024", "0.5537577", "0.5465735", "0.5393763", "0.5373763", "0.5370881", "0.53650606", "0.5338288", "0.5331641", "0.52704084", "0.52475107", "0.5246663", "0.51833093", "0.5156864", "0.51413554", "0.51362735", "0.5133316", "0.51267594", "0.512...
0.63252497
0
Find the tracking file for the given file. Returns the last path mentioned in the file via a tracking tag or the equivalent thirdparty path given the file's path. If there is no file in the default path and no files mentioned within the file exist, returns None. Normally the thirdparty path must exist. Passing |check_exist|=False will bypass this check when it is not desired. An additional check is enabled by passing |check_uses_tag|=True. In this case the given file MUST use either a file track tag or another modification tag, before a tracking_path is returned. stats is a variable for keeping track of the status of the analyzer, which can be None.
def compute_tracking_path(stats, our_path, our_lines, do_lint_check=False, check_exist=True, check_uses_tags=False): tracking_path = staging.get_default_tracking_path(our_path) base_matcher = re.compile(re.escape(FILE_TRACK_TAG) + r' "([^\"]+)"') tag_matcher = re.compile(re.escape(REGION_START_TAG)) uses_any_tags = False next_lineno = 1 for line in our_lines: if stats: stats['lineno'] = next_lineno match = base_matcher.search(line) if match: tracking_path = match.group(1) if not os.path.exists(tracking_path) and stats: show_error(stats, 'Mod tracking path does not exist:\n' + line) if next_lineno > MAX_ARC_TRACK_SEARCH_LINES: show_error(stats, 'Tracking not allowed on line > %d' % MAX_ARC_TRACK_SEARCH_LINES) uses_any_tags = True break elif not uses_any_tags and tag_matcher.search(line): uses_any_tags = True next_lineno += 1 if (not do_lint_check and (uses_any_tags or not check_uses_tags) and next_lineno > MAX_ARC_TRACK_SEARCH_LINES): break if not tracking_path: return None if check_uses_tags and not uses_any_tags: return None if check_exist and not os.path.exists(tracking_path): return None return tracking_path
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _findfile(self, path):\n\n # Build list of possible local file paths\n if not self._isurl(path):\n # Valid local paths\n filelist = self._possible_names(path)\n # Paths in self._destpath\n filelist += self._possible_names(self.abspath(path))\n el...
[ "0.54929686", "0.5405405", "0.5390188", "0.538945", "0.52806234", "0.52451473", "0.524215", "0.5137974", "0.5047577", "0.502459", "0.50199705", "0.49906838", "0.4970432", "0.49497023", "0.4910103", "0.4907254", "0.48910886", "0.48726743", "0.48697725", "0.4835479", "0.4827484...
0.718714
0
Compute the notices object as if the two paths were properly staged. analyze_diffs needs to be independent of staging. Staging might not have been run, or might be out of date from when analyze_diffs is run. So we make a best attempt to reconstruct the notices that would have occurred poststaging.
def _compute_staged_notices(mods_path, third_party_path): mods_notices = notices.Notices() if mods_path: mods_notices.add_sources([mods_path]) third_party_notices = notices.Notices() if third_party_path: third_party_notices.add_sources([third_party_path]) # If there are mods and third_party notices, pick the one that is more # specific to the file, which is the one that has a deeper path. if (_count_directory_levels_in_license_root(third_party_notices) > _count_directory_levels_in_license_root(mods_notices)): return third_party_notices else: return mods_notices
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_diffs(history):\n\n # First get all possible representations\n mgr = plugins_get_mgr() \n keys = mgr.search('representation')['representation']\n representations = [mgr.get_by_key('representation', k) for k in keys]\n\n for i in range(len(history)):\n if i+1 > len(history) - 1:\n ...
[ "0.5009308", "0.50050104", "0.47927547", "0.47090858", "0.4646092", "0.46367276", "0.4600246", "0.45939845", "0.4547362", "0.45332745", "0.45131713", "0.45066488", "0.44925582", "0.44843617", "0.44840214", "0.4477567", "0.4472213", "0.44629148", "0.4452782", "0.4435036", "0.4...
0.5701247
0
Create a Vocabulary object.
def __init__(self, max_size=None, lower=True, unk_token=True, remove_stopwords=False, specials=('<pad>',)): self._max_size = max_size self._lower = lower self._unk = unk_token self.token2id = {token: i for i, token in enumerate(specials)} self._id2token = list(specials) self._token_count = Counter() self.can_remove_stopwords=remove_stopwords self.num_docs = 0 self.num_pos = 0 self.num_nnz = 0 self.corpus=None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def vocab():\n symbols = DEFAULT_SPECIAL_SYMBOLS + [\"mouse\", \"dog\", \"tree\"]\n return Vocabulary(symbols)", "def from_dict(cls, dikt: dict) -> 'Vocabulary':\n return util.deserialize_model(dikt, cls)", "def create_vocab(vocab_path='ORBvoc-synth.txt'):\n total_time = 10 # seconds\n num_...
[ "0.67872804", "0.660514", "0.6490391", "0.6246055", "0.6192233", "0.61789507", "0.6158393", "0.613217", "0.61019224", "0.60960674", "0.606695", "0.6037764", "0.6025569", "0.59994584", "0.59994584", "0.5967367", "0.5922967", "0.59061986", "0.58743894", "0.58231103", "0.5815941...
0.0
-1
Add token to vocabulary.
def add_token(self, token): token = self.process_token(token) self._token_count.update([token])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_token(self,token):\n\t\tif not token:\n\t\t\tlogging.error(\"Token cannot be empty!\")\n\t\t\texit()\n\n\t\tself.tokens.append(token.lower())\n\t\t#self.user_defined_token = token.lower()", "def add_embedding(self, token, embedding):\n self.word2idx[token] = self.vocab_size\n self.vocab_siz...
[ "0.7474371", "0.7235822", "0.6903988", "0.6526929", "0.65188766", "0.6505907", "0.6505907", "0.64998305", "0.6496983", "0.6493279", "0.64262176", "0.6420701", "0.6391367", "0.6325264", "0.63200015", "0.62549394", "0.62311953", "0.622333", "0.6177798", "0.61690384", "0.6133628...
0.68879795
4
Update dictionary from a collection of documents. Each document is a list of tokens.
def add_documents(self, docs): if 'sentences' in docs: for sent in docs.sentences: sent = map(self.process_token, [t for t in sent.tokens if not t.is_stopword]) self._token_count.update(sent) else: sent = list(map(self.process_token, [t for t in docs.tokens if not t.is_stopword])) self._token_count.update(sent)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_documents(self, docs):\n for sent in docs:\n sent = map(self.process_token, sent)\n self._token_count.update(sent)", "def add_document_lists(self, docs):\n for sent in docs:\n sent = map(self.process_token, sent)\n self._token_count.update(sent)",...
[ "0.68999934", "0.6873283", "0.6396484", "0.6270356", "0.6200283", "0.61873966", "0.61502284", "0.6042249", "0.6037067", "0.5998114", "0.59293014", "0.58950543", "0.5877849", "0.5844076", "0.58293545", "0.5767847", "0.57396615", "0.5730059", "0.5711739", "0.563978", "0.5620126...
0.67038983
2
Update dictionary from a collection of documents. Each document is a list of tokens.
def add_document_lists(self, docs): for sent in docs: sent = map(self.process_token, sent) self._token_count.update(sent)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_documents(self, docs):\n for sent in docs:\n sent = map(self.process_token, sent)\n self._token_count.update(sent)", "def add_documents(self, docs):\n if 'sentences' in docs:\n for sent in docs.sentences:\n sent = map(self.process_token, [t fo...
[ "0.68998206", "0.67045903", "0.63965476", "0.6270014", "0.6202376", "0.61885744", "0.6151995", "0.60434496", "0.60377157", "0.59981596", "0.59295934", "0.58968425", "0.5878697", "0.5846581", "0.5829846", "0.57702076", "0.57415456", "0.5731184", "0.571159", "0.56422395", "0.56...
0.6872754
1
Get the list of token_id given doc.
def doc2id(self, doc): if isinstance(doc, string_types): raise TypeError("doc2idx expects an array of unicode tokens on input, not a single string") doc = map(self.process_token, doc) return [self.token_to_id(token) for token in doc]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def doc2id(self, doc):\n doc = map(self.process_token, doc)\n return [self.token_to_id(token) for token in doc]", "def get_tokens(self, document):\n raise NotImplementedError()", "def doc2token(self, doc):\n return [self.word2idx[word] if self.word2idx.__contains__(word)\n ...
[ "0.71084183", "0.6638949", "0.64065033", "0.6308615", "0.6308615", "0.6248695", "0.6248695", "0.616698", "0.61223453", "0.60448986", "0.6016193", "0.6015829", "0.6009144", "0.59384847", "0.5869318", "0.5861415", "0.5859866", "0.5852833", "0.58410364", "0.5834465", "0.5816366"...
0.6642477
1
Get the token list.
def id2doc(self, ids): return [self.id_to_token(idx) for idx in ids]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_tokens(self) -> List[str]:\n return self.tokens", "def tokens(self):\n # type: () -> List[Token]\n return self._tokens", "def tokens(self):\n return self.rpc.call(MsfRpcMethod.AuthTokenList)['tokens']", "def get_tokens(self):\r\n return self.token_set", "def get_token_lis...
[ "0.83647424", "0.80425704", "0.79461306", "0.79414326", "0.78283435", "0.75734204", "0.75067395", "0.7500921", "0.7500921", "0.7500921", "0.74711776", "0.74703306", "0.73524517", "0.72486407", "0.7219096", "0.7147502", "0.7132472", "0.7059246", "0.6929604", "0.69008", "0.6877...
0.0
-1
Convert `document` into the bagofwords (BoW) format = list of `(token_id, token_count)` tuples.
def doc2bow(self, document, allow_update=False, return_missing=False): doc=[t.text for t in document.tokens] if isinstance(doc, string_types): raise TypeError("doc2bow expects an array of unicode tokens on input, not a single string") # Construct (word, frequency) mapping. counter = defaultdict(int) for w in doc: counter[w if isinstance(w, str) else str(w, 'utf-8')] += 1 token2id = self.token2id if allow_update or return_missing: missing = sorted(x for x in iteritems(counter) if x[0] not in token2id) if allow_update: for w, _ in missing: # new id = number of ids made so far; # NOTE this assumes there are no gaps in the id sequence! token2id[w] = len(token2id) result = {token2id[w]: freq for w, freq in iteritems(counter) if w in token2id} if allow_update: self.num_docs += 1 self.num_pos += sum(itervalues(counter)) self.num_nnz += len(result) # keep track of document and collection frequencies for tokenid, freq in iteritems(result): self.cfs[tokenid] = self.cfs.get(tokenid, 0) + freq self.dfs[tokenid] = self.dfs.get(tokenid, 0) + 1 # return tokenids, in ascending id order result = sorted(iteritems(result)) if return_missing: return result, dict(missing) else: return result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def make_bag_words(document_tokenized):\n bag_words = dict()\n for token in document_tokenized:\n if token in bag_words.keys():\n bag_words[token] += 1\n else:\n bag_words[token] = 1\n return bag_words", "def sentencesToBow(documents):\n bowSentences = []\n dict...
[ "0.75598925", "0.67602545", "0.64582515", "0.63520515", "0.5917524", "0.5897528", "0.58469146", "0.5783915", "0.5782803", "0.5771413", "0.57590044", "0.5732742", "0.5720406", "0.57064784", "0.5675183", "0.5654374", "0.5644353", "0.5635149", "0.55853426", "0.5584801", "0.55662...
0.6443913
3
Get the token_id of given token.
def token_to_id(self, token): token = self.process_token(token) return self.token2id.get(token, len(self.token2id) - 1)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def token_to_id(self, token):\n token = self.process_token(token)\n return self._token2id.get(token, len(self._token2id) - 1)", "def token_to_id(self, token):\r\n return self.encoder.get(token, self.encoder.get(self.unk_token))", "def token_id(self) -> Optional[pulumi.Input[int]]:\n ...
[ "0.8310672", "0.7980398", "0.79235566", "0.7840283", "0.7578476", "0.730748", "0.71591955", "0.7151534", "0.7099397", "0.6803577", "0.66393685", "0.65554255", "0.6520694", "0.64726806", "0.64626735", "0.6437027", "0.6431865", "0.6408412", "0.63829017", "0.6313626", "0.6299330...
0.8298625
1
tokenid to token (string).
def id_to_token(self, idx): return self._id2token[idx]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def map_id_to_token(self, id: int):\n return self._id_to_token[id]", "def token_to_id(self, token):\r\n return self.encoder.get(token, self.encoder.get(self.unk_token))", "def token_to_id(self, token):\n token = self.process_token(token)\n return self.token2id.get(token, len(self.to...
[ "0.7235237", "0.69436103", "0.6893987", "0.6749872", "0.67123127", "0.6693682", "0.6675571", "0.6672998", "0.6606152", "0.66021", "0.65837336", "0.6532288", "0.6484542", "0.64636326", "0.6327243", "0.62979484", "0.6255819", "0.6240919", "0.6239636", "0.6181812", "0.6127101", ...
0.7267592
1
Return the vocabulary as a reversed dict object.
def reverse_vocab(self): return self._id2token
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def reverse_vocab(vocab: Dict[str, int]) -> Dict[int, str]:\n return {v: k for k, v in vocab.items()}", "def reverse_dicts(self):\n\t\tself.rev_worddict = {self.worddict[word]: word for word in self.worddict}\n\t\tself.rev_classdict = {self.classdict[cl]: cl for cl in self.classdict}", "def __reversed__(sel...
[ "0.77923155", "0.6690991", "0.6248791", "0.6209789", "0.6155369", "0.6095056", "0.60796165", "0.60738266", "0.606323", "0.60227686", "0.6020329", "0.5977967", "0.5962219", "0.5951278", "0.5935339", "0.591856", "0.591432", "0.5899537", "0.58708775", "0.58708775", "0.5850216", ...
0.71455634
2
Update the current trigger. The GTM API does not support a partial update. Therfore, this method will send all fields expliztily set in the method arguments and those cached in the instance properties.
def update(self, refresh=False, parameter=None, **kwargs): if refresh: self.__init__(path=self._path, service=self.service) default_asset = { "maxTimerLengthSeconds": self._maxTimerLengthSeconds, "totalTimeMinMilliseconds": self._totalTimeMinMilliseconds, "uniqueTriggerId": self._uniqueTriggerId, "verticalScrollPercentageList": self._verticalScrollPercentageList, "horizontalScrollPercentageList": self._horizontalScrollPercentageList, "containerId": self._containerId, "waitForTagsTimeout": self._waitForTagsTimeout, "accountId": self._accountId, "waitForTags": self._waitForTags, "intervalSeconds": self._intervalSeconds, "eventName": self._eventName, "visibilitySelector": self._visibilitySelector, "workspaceId": self._workspaceId, "customEventFilter": self._customEventFilter, "parentFolderId": self._parentFolderId, "continuousTimeMinMilliseconds": self._continuousTimeMinMilliseconds, "selector": self._selector, "triggerId": self._triggerId, "tagManagerUrl": self._tagManagerUrl, "fingerprint": self._fingerprint, "visiblePercentageMax": self._visiblePercentageMax, "path": self._path, "name": self._name, "visiblePercentageMin": self._visiblePercentageMin, "type": self._type, "notes": self._notes, "interval": self._interval, "filter": self._filter, "autoEventFilter": self._autoEventFilter, "limit": self._limit, "checkValidation": self._checkValidation, } update_asset = {**default_asset, **kwargs} if parameter: parameter_dict = {**param_dict(self._parameter), **param_dict(parameter)} parameter = list(parameter_dict.values()) else: parameter = self._parameter update_asset["parameter"] = [x.to_obj() for x in parameter] update_asset = {k: v for k, v in update_asset.items() if v is not None} request = self.triggers_service.update(path=self.path, body=update_asset) response = request.execute() self.__init__(trigger=response, service=self.service)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update(self, *args, **kwargs):\n # callable, but does nothing by default", "def update(self, *args, **kwargs):\n pass", "def update(self, *args, **kwargs):\n pass", "def update(self, *args, **kwargs):\n pass", "def update(self, *args, **kwargs): # real signature unknown\n ...
[ "0.5962123", "0.5947622", "0.5947622", "0.5947622", "0.59135634", "0.59135634", "0.59135634", "0.59135634", "0.59135634", "0.59135634", "0.5901567", "0.5882499", "0.5750089", "0.56001323", "0.55886126", "0.5582611", "0.5573218", "0.5543781", "0.5498977", "0.54955", "0.5467472...
0.56299794
13
Delete the current trigger.
def delete(self): request = self.triggers_service.delete(path=self._path) request.execute()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _delTrigger(self, message: IRCMessage) -> IRCResponse:\n triggerName = message.parameterList[1]\n if triggerName in self.storage:\n del self.storage[triggerName]\n return IRCResponse(f\"Trigger {triggerName} deleted!\", message.replyTo)\n else:\n return IRC...
[ "0.7167729", "0.70552254", "0.6779274", "0.6583904", "0.655758", "0.64790803", "0.6461914", "0.6461914", "0.6461914", "0.6461914", "0.645271", "0.64262265", "0.62824124", "0.6266517", "0.62515134", "0.62503153", "0.62478745", "0.62462974", "0.6191452", "0.61563873", "0.615638...
0.8082834
0
Create and return a D0 > hh' Selection object.
def makeD2hhAsymm(name, config, KPIDK_string, PiPIDK_string, Mass_low_string, Mass_high_string, CombPIDK_string, DecayDescriptor, inputSel, useTOS, Hlt1TOS, Hlt2TOS ) : def makeTISTOS( name, _input, _hlttos ) : from Configurables import TisTosParticleTagger _tisTosFilter = TisTosParticleTagger( name + "Tagger" ) _tisTosFilter.TisTosSpecs = _hlttos return Selection( name , Algorithm = _tisTosFilter , RequiredSelections = [ _input ] ) _Kcuts1 = "~ISMUON & (PT > %(DaugPtMin)s* MeV) & (MIPCHI2DV(PRIMARY) > %(DaugIPChi2)s)" % locals()['config'] _KcutsPIDK = KPIDK_string % locals()['config'] _Kcuts2 = " & (ISLONG) & (P > %(DaugP)s* MeV) & (TRCHI2DOF < %(DaugTrkChi2)s)" % locals()['config'] _Kcuts = _Kcuts1 + _KcutsPIDK + _Kcuts2 _Picuts1 = "~ISMUON & (PT > %(DaugPtMin)s* MeV) & (MIPCHI2DV(PRIMARY) > %(DaugIPChi2)s)" % locals()['config'] _PicutsPIDK = PiPIDK_string % locals()['config'] _Picuts2 = " & (ISLONG) & (P > %(DaugP)s* MeV) & (TRCHI2DOF < %(DaugTrkChi2)s)" % locals()['config'] _Picuts = _Picuts1 + _PicutsPIDK + _Picuts2 _dauCuts = { 'K+': _Kcuts, 'pi+': _Picuts } _massLow = Mass_low_string % locals()['config'] _massHigh = Mass_high_string % locals()['config'] _combCuts1 = "(APT > %(D0Pt)s* MeV)" \ "& (AHASCHILD( PT > %(DaugPtMax)s* MeV ) )" \ "& (ADOCA(1,2)< %(D0DOCA)s* mm)" \ "& (AP > %(D0P)s* MeV)" % locals()['config'] _combCutsPIDK = CombPIDK_string % locals()['config'] _combCuts = _combCuts1 + _combCutsPIDK + _massLow + _massHigh _motherCuts = "(VFASPF(VCHI2PDOF) < %(D0VtxChi2Ndof)s)" \ "& (BPVVDCHI2 > %(D0FDChi2)s)" \ "& (BPVLTIME() > %(D0Tau)s)" \ "& (BPVDIRA > %(D0BPVDira)s)" % locals()['config'] _D0 = CombineParticles( DecayDescriptor = DecayDescriptor, MotherCut = _motherCuts, CombinationCut = _combCuts, DaughtersCuts = _dauCuts) _sel = Selection ( name+'Sel', Algorithm = _D0, RequiredSelections = inputSel ) if not useTOS: return _sel _selD2hhHlt1TOS = makeTISTOS( name + "D2hhHlt1TOS" , _sel , Hlt1TOS ) _selD2hhHlt2TOS = makeTISTOS( name + "D2hhHlt2TOS" , _selD2hhHlt1TOS , Hlt2TOS ) return _selD2hhHlt2TOS
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def from_selection(cls):\n guid = compas_rhino.select_point()\n return cls.from_guid(guid)", "def get_slider():\n return dcc.RangeSlider(\n id='hours',\n value=[0, 23],\n min=0,\n max=23,\n marks={i: str(i) for i in range(0, 24, 3)}\n )", "def from_selecti...
[ "0.5978266", "0.5615562", "0.53860027", "0.53363514", "0.52604216", "0.5230591", "0.5123659", "0.5109732", "0.5086925", "0.50513536", "0.50479877", "0.5033334", "0.49208", "0.49170148", "0.4872133", "0.4828051", "0.48176673", "0.47932035", "0.4729161", "0.4611828", "0.4548788...
0.40723988
77
Create and return a D > D0 pi Selection object.
def makeDstar2D0Pi( name , config , DecayDescriptor , inputSel ) : daugCuts = "(TRCHI2DOF < %(Daug_TRCHI2DOF_MAX)s)" % locals()['config'] combCuts = "((AM - AM1) < %(Dstar_AMDiff_MAX)s* MeV)" % locals()['config'] dstarCuts = "(VFASPF(VCHI2/VDOF) < %(Dstar_VCHI2VDOF_MAX)s)" \ "& ((M - M1) < %(Dstar_MDiff_MAX)s* MeV)" % locals()['config'] _Dstar = CombineParticles( DecayDescriptor = DecayDescriptor , DaughtersCuts = { "pi+" : daugCuts } , CombinationCut = combCuts , MotherCut = dstarCuts ) return Selection( name+'Sel', Algorithm = _Dstar, RequiredSelections = inputSel )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def make_selection ( self ,\n tag , \n algotype ,\n inputs , \n *args ,\n **kwargs ) :\n sel_tag = '%s_Selection' % tag\n sel_name = 'Sel%sFor%s' % ( tag , se...
[ "0.60390633", "0.5895539", "0.55829453", "0.5348063", "0.5305103", "0.5261155", "0.5167734", "0.51543236", "0.5077886", "0.5058524", "0.50535107", "0.50340015", "0.5013713", "0.49337313", "0.49296355", "0.48771095", "0.48653218", "0.48559302", "0.48356175", "0.48229763", "0.4...
0.6516377
0
Create and return a D > K pi pi+ Selection object.
def makeDPartial( name , config , DecayDescriptor , inputSel ) : _Kcuts1 = "~ISMUON & (PT > %(DaugPtLoose)s* MeV) & (MIPCHI2DV(PRIMARY) > %(DaugIPChi2Loose)s)" % locals()['config'] _KcutsPIDK = " & (PIDK > %(HighPIDK)s)" % locals()['config'] _Kcuts2 = " & (ISLONG) & (P > %(DaugPLoose)s* MeV) & (TRCHI2DOF < %(DaugTrkChi2Loose)s)" % locals()['config'] _Kcuts = _Kcuts1 + _KcutsPIDK + _Kcuts2 _Picuts1 = "~ISMUON & (PT > %(DaugPtMin)s* MeV) & (MIPCHI2DV(PRIMARY) > %(DaugIPChi2)s)" % locals()['config'] _PicutsPIDK = " & (PIDK < %(LowPIDK)s)" % locals()['config'] _Picuts2 = " & (ISLONG) & (P > %(DaugP)s* MeV) & (TRCHI2DOF < %(DaugTrkChi2)s)" % locals()['config'] _Picuts = _Picuts1 + _PicutsPIDK + _Picuts2 _dauCuts = { 'K+': _Kcuts, 'pi+': _Picuts } #_Kcuts1 = "~ISMUON & (PT > 500* MeV) & (MIPCHI2DV(PRIMARY) > 4)" #_KcutsPIDK = " & (PIDK > 5)" #_Kcuts2 = " & (ISLONG) & (P > 5000* MeV) & (TRCHI2DOF < 5)" #_Kcuts = _Kcuts1 + _KcutsPIDK + _Kcuts2 #_Picuts1 = "~ISMUON & (PT > 500* MeV) & (MIPCHI2DV(PRIMARY) > 4)" #_PicutsPIDK = " & (PIDK < 0)" #_Picuts2 = " & (ISLONG) & (P > 5000* MeV) & (TRCHI2DOF < 5)" #_Picuts = _Picuts1 + _PicutsPIDK + _Picuts2 #_dauCuts = { 'K+': _Kcuts, 'pi+': _Picuts } _combCuts = "(APT > %(D0PtLoose)s* MeV)" \ "& (AP > %(D0P)s* MeV)" % locals()['config'] _motherCuts = "(VFASPF(VCHI2PDOF) < %(D0VtxChi2Ndof)s)" \ "& (BPVVDCHI2 > %(D0FDChi2)s)" % locals()['config'] _Dminus = CombineParticles( DecayDescriptor = DecayDescriptor , DaughtersCuts = _dauCuts , CombinationCut = _combCuts , MotherCut = _motherCuts ) return Selection( name+'Sel', Algorithm = _Dminus, RequiredSelections = inputSel )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def make_selection ( self ,\n tag , \n algotype ,\n inputs , \n *args ,\n **kwargs ) :\n sel_tag = '%s_Selection' % tag\n sel_name = 'Sel%sFor%s' % ( tag , se...
[ "0.656367", "0.60201555", "0.57468003", "0.55250317", "0.55029947", "0.54665506", "0.53652567", "0.5354767", "0.5222274", "0.52220505", "0.51531583", "0.51516193", "0.5116602", "0.50792444", "0.5024644", "0.5015439", "0.5015241", "0.49948704", "0.49836156", "0.49750015", "0.4...
0.0
-1
Create and return a D+ > D pi+ Selection object.
def makeDstarPartial( name , config , DecayDescriptor , inputSel ) : daugCuts = "(TRCHI2DOF < %(Daug_TRCHI2DOF_MAX)s)" % locals()['config'] combCuts = "((AM - AM1) < %(Dstar_AMDiff_MAX)s* MeV)" % locals()['config'] dstarCuts = "(VFASPF(VCHI2/VDOF) < %(Dstar_VCHI2VDOF_MAX)s)" \ "& ((M - M1) < %(Dstar_MDiff_MAX)s* MeV)" % locals()['config'] _Dstar = CombineParticles( DecayDescriptor = DecayDescriptor , DaughtersCuts = { "pi+" : daugCuts } , CombinationCut = combCuts , MotherCut = dstarCuts ) return Selection( name+'Sel', Algorithm = _Dstar, RequiredSelections = inputSel )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def make_selection ( self ,\n tag , \n algotype ,\n inputs , \n *args ,\n **kwargs ) :\n sel_tag = '%s_Selection' % tag\n sel_name = 'Sel%sFor%s' % ( tag , se...
[ "0.63153064", "0.5997607", "0.5657909", "0.5577311", "0.55091554", "0.5479855", "0.52665573", "0.5263441", "0.51841724", "0.5104352", "0.50552374", "0.49336368", "0.49273384", "0.49023122", "0.4876281", "0.48567244", "0.4838079", "0.48378935", "0.47973046", "0.47966853", "0.4...
0.5291082
6
Create and return a D > D0 pi Selection object.
def makePseudoPsi( name , config , DecayDescriptor , inputSel ) : _daugCuts = "(PT> %(D0PtLoose)s*MeV)" % locals()['config'] _combCuts = "(APT> %(D0PtLoose)s*MeV)" % locals()['config'] _Psi = CombineParticles( DecayDescriptor = DecayDescriptor , DaughtersCuts = { "D0": _daugCuts } , CombinationCut = _combCuts , MotherCut = "(VFASPF(VCHI2PDOF) < 10000)" ) return Selection( name+'Sel', Algorithm = _Psi, RequiredSelections = inputSel )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def makeDstar2D0Pi( name\n , config\n , DecayDescriptor\n , inputSel\n ) :\n\n daugCuts = \"(TRCHI2DOF < %(Daug_TRCHI2DOF_MAX)s)\" % locals()['config']\n combCuts = \"((AM - AM1) < %(Dstar_AMDiff_MAX)s* MeV)\" % locals()['config']\n ...
[ "0.65183586", "0.6036616", "0.58945334", "0.55806303", "0.5347493", "0.53058124", "0.5261733", "0.51529664", "0.50806415", "0.5059661", "0.50538725", "0.50354403", "0.5012788", "0.4933138", "0.49302754", "0.48780695", "0.48642525", "0.4854433", "0.4833495", "0.48240137", "0.4...
0.5167268
7
dataloader for training dataset via voxsampler.
def load_train(trainlst, traindir, maptrain5994, L=L, batch_size=batch_size, num_worker=num_worker, max_utt_per_spk=max_utt_per_spk, load_wav=None): if load_wav is None: def load_train_wav(path): return loadWAV(path, L=L, evalmode=False) else: load_train_wav = load_wav df_train = pd.read_csv(trainlst, sep=" ", header=None, names=["speaker", "file"]) df_train["file"] = df_train["file"].apply(lambda x: traindir + x) map_train = dict(pd.read_csv(maptrain5994, header=None).values) data = voxceleb2(df_train.values, map_train, load_train_wav) sampler = voxsampler(df_train, map_train, max_utt_per_spk=max_utt_per_spk, batch_size=batch_size) dataloader = DataLoader(data, batch_size=batch_size, num_workers=num_worker, shuffle=False, sampler=sampler) return dataloader
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def train_dataloader(self) -> torch.utils.data.DataLoader: \n return torch.utils.data.DataLoader(self.dataset_train, **self.dl_kwargs)", "def dataloader(self):\n\n # load / split data\n train_data = self.data.get_train_data()\n if self.args.use_dev:\n train_data, de...
[ "0.7731121", "0.7497621", "0.72368264", "0.71800596", "0.7143441", "0.7102657", "0.7093028", "0.7014497", "0.6950995", "0.6860976", "0.6830999", "0.67986304", "0.6784964", "0.6782085", "0.67802626", "0.6743672", "0.6729586", "0.6659931", "0.66424596", "0.6627726", "0.6623991"...
0.0
-1
Computes the precision for the specified values of k
def accuracy(output, target, topk=(1,)): maxk = max(topk) batch_size = target.size(0) _, pred = output.topk(maxk, 1, True, True) pred = pred.t() correct = pred.eq(target.view(1, -1).expand_as(pred)) res = [] for k in topk: correct_k = correct[:k].view(-1).float().sum(0, keepdim=True) res.append(correct_k.mul_(100.0 / batch_size)) return res
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def precision(gt, pred, k):\n k = min(len(pred), k)\n den = min(len(gt), k)\n return sum([int(pred[i] in gt) for i in range(k)]) / den", "def precision_at_k(r, k):\n assert k >= 1\n r = np.asarray(r)[:k] != 0\n if r.size != k:\n raise ValueError('Relevance score length < k')\...
[ "0.76427615", "0.74189496", "0.7418079", "0.72710466", "0.72456723", "0.72446615", "0.7237539", "0.72213703", "0.7210107", "0.7058935", "0.6852219", "0.6786084", "0.67836976", "0.6768417", "0.67273885", "0.67181736", "0.67181736", "0.67181736", "0.67181736", "0.67181736", "0....
0.0
-1
Calculate Equal Error Rate (EER).
def calculate_eer(y, y_score, pos=1): fpr, tpr, thresholds = roc_curve(y, y_score, pos_label=pos) eer = brentq(lambda x: 1. - x - interp1d(fpr, tpr)(x), 0., 1.) thresh = interp1d(fpr, thresholds)(eer) return eer, np.float(thresh)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def calculate_hr_ee(self):\n\n # HR - resting HR = net HR\n net_hr = np.array([i - self.rest_hr if i is not None else None for i in self.df_epoch[\"HR\"]])\n\n # Sets values below 0% HRR (below resting HR) to 0\n net_hr[net_hr <= 0] = 0\n\n # Equation from Brage et al., 2004. Act...
[ "0.6688616", "0.6576088", "0.6453312", "0.6436712", "0.6268617", "0.61915094", "0.60727984", "0.60624784", "0.6055529", "0.5957637", "0.5948924", "0.5935441", "0.59333384", "0.5899245", "0.5894991", "0.5884183", "0.5884183", "0.5883016", "0.5870002", "0.5867805", "0.58296996"...
0.55544835
67