query
stringlengths
9
3.4k
document
stringlengths
9
87.4k
metadata
dict
negatives
listlengths
4
101
negative_scores
listlengths
4
101
document_score
stringlengths
3
10
document_rank
stringclasses
102 values
Map MXNet's log_softmax operator attributes to onnx's LogSoftMax operator and return the created node.
def convert_logsoftmax(node, **kwargs): name, input_nodes, attrs = get_inputs(node, kwargs) # Converting to int axis = int(attrs.get("axis", -1)) temp = attrs.get("temperature", 'None') if temp != 'None': raise AttributeError("LogSoftMax: ONNX supports only temperature=None") node = onnx.helper.make_node( 'LogSoftmax', input_nodes, [name], axis=axis, name=name ) return [node]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _create_softmax(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('axis', op.axis),\n ])\n return node", "def convert_logsoftmax(g, op, block):\n\n x = g.get_node(op.input(\"X\")[0])\n axis...
[ "0.72433263", "0.6945794", "0.6428142", "0.63807946", "0.6325919", "0.62042975", "0.6159842", "0.60255045", "0.5992672", "0.58925354", "0.5820617", "0.5788801", "0.5781208", "0.55802464", "0.55784833", "0.5528118", "0.55134857", "0.55087835", "0.5452463", "0.5428512", "0.5405...
0.75760037
0
Map MXNet's norm operator attributes to onnx's ReduceL1 and ReduceL2 operators and return the created node.
def convert_norm(node, **kwargs): name, input_nodes, attrs = get_inputs(node, kwargs) mx_axis = attrs.get("axis", None) axes = convert_string_to_list(str(mx_axis)) if mx_axis else None keepdims = get_boolean_attribute_value(attrs, "keepdims") ord = int(attrs.get("ord", 2)) onnx_op_name = "ReduceL1" if ord == 1 else "ReduceL2" if axes: reduce_node = onnx.helper.make_node( onnx_op_name, input_nodes, [name], axes=axes, keepdims=keepdims, name=name ) return [reduce_node] else: reduce_node = onnx.helper.make_node( onnx_op_name, input_nodes, [name], keepdims=keepdims, name=name ) return [reduce_node]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def convert_norm(g, op, block):\n\n x = g.get_node(op.input(\"X\")[0])\n axis = op.attr(\"axis\")\n axis_l = [axis]\n epsilon = op.attr(\"epsilon\")\n out = _op.nn.l2_normalize(x, epsilon, axis_l)\n g.add_node(op.output(\"Out\")[0], out)", "def convert_l2normalization(node, **kwargs):\n name...
[ "0.5756237", "0.56087494", "0.5600301", "0.55728614", "0.55290496", "0.54875606", "0.5486537", "0.5459905", "0.53227746", "0.52392274", "0.5229864", "0.5215761", "0.52136004", "0.5174905", "0.5169228", "0.5152919", "0.5142199", "0.51399326", "0.51165926", "0.5102471", "0.5090...
0.7403831
0
Map MXNet's multinomial operator attributes to onnx's Multinomial operator and return the created node.
def convert_multinomial(node, **kwargs): name, input_nodes, attrs = get_inputs(node, kwargs) dtype = onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[np.dtype(attrs.get("dtype", 'int32'))] sample_size = convert_string_to_list(attrs.get("shape", '1')) if len(sample_size) < 2: sample_size = sample_size[-1] else: raise AttributeError("ONNX currently supports integer sample_size only") node = onnx.helper.make_node( "Multinomial", input_nodes, [name], dtype=dtype, sample_size=sample_size, name=name, ) return [node]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def convert_broadcast_mul(node, **kwargs):\n return create_basic_op_node('Mul', node, kwargs)", "def _create_gemm(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('alpha', float(op.alpha)),\n helper...
[ "0.58884233", "0.5768025", "0.54848015", "0.5398994", "0.5315125", "0.52907187", "0.5282063", "0.5165217", "0.51454043", "0.51417667", "0.5104819", "0.50990736", "0.50983584", "0.5056548", "0.5052555", "0.5041364", "0.50398886", "0.5031336", "0.5016045", "0.5006198", "0.50032...
0.71358234
0
Map MXNet's random_uniform operator attributes to onnx's RandomUniform operator and return the created node.
def convert_random_uniform(node, **kwargs): name, input_nodes, attrs = get_inputs(node, kwargs) # Converting to float32 low = float(attrs.get("low", 0)) high = float(attrs.get("high", 1.0)) shape = convert_string_to_list(attrs.get('shape', '[]')) dtype = onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[np.dtype(attrs.get('dtype', 'float32'))] node = onnx.helper.make_node( 'RandomUniform', input_nodes, [name], low=low, high=high, dtype=dtype, shape=shape, name=name ) return [node]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def gen(self, op, *args, **kwargs):\r\n random_state_variable = raw_random.random_state_type()\r\n new_r, out = op(random_state_variable, *args, **kwargs)\r\n out.rng = random_state_variable\r\n self.random_state_variables.append((random_state_variable, new_r))\r\n return out", ...
[ "0.60839343", "0.5966107", "0.5965341", "0.59418684", "0.5821455", "0.55140215", "0.5499431", "0.5494168", "0.5481163", "0.5478966", "0.5458399", "0.5442405", "0.54335564", "0.5417929", "0.53986716", "0.539796", "0.539796", "0.5386957", "0.53401625", "0.5309333", "0.5306019",...
0.7275685
0
Map MXNet's random_normal operator attributes to onnx's RandomNormal operator and return the created node.
def convert_random_normal(node, **kwargs): name, input_nodes, attrs = get_inputs(node, kwargs) # Converting to float32 mean = float(attrs.get("loc", 0)) scale = float(attrs.get("scale", 1.0)) shape = convert_string_to_list(attrs.get('shape', '[]')) dtype = onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[np.dtype(attrs.get('dtype', 'float32'))] node = onnx.helper.make_node( 'RandomNormal', input_nodes, [name], mean=mean, scale=scale, dtype=dtype, shape=shape, name=name ) return [node]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def convert_random_uniform(node, **kwargs):\n name, input_nodes, attrs = get_inputs(node, kwargs)\n\n # Converting to float32\n low = float(attrs.get(\"low\", 0))\n high = float(attrs.get(\"high\", 1.0))\n shape = convert_string_to_list(attrs.get('shape', '[]'))\n dtype = onnx.mapping.NP_TYPE_TO_...
[ "0.62901884", "0.60207254", "0.5817178", "0.57946956", "0.5718311", "0.5678145", "0.56305474", "0.5630399", "0.5541356", "0.55390555", "0.5524819", "0.55016184", "0.5497462", "0.54788977", "0.54603964", "0.54529095", "0.5433405", "0.5368286", "0.5364437", "0.534533", "0.53327...
0.7576377
0
Map MXNet's ROIPooling operator attributes to onnx's MaxRoiPool operator and return the created node.
def convert_roipooling(node, **kwargs): name, input_nodes, attrs = get_inputs(node, kwargs) pooled_shape = convert_string_to_list(attrs.get('pooled_size')) scale = float(attrs.get("spatial_scale")) node = onnx.helper.make_node( 'MaxRoiPool', input_nodes, [name], pooled_shape=pooled_shape, spatial_scale=scale, name=name ) return [node]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def convert_pooling(node, **kwargs):\n name, input_nodes, attrs = get_inputs(node, kwargs)\n\n kernel = eval(attrs[\"kernel\"])\n pool_type = attrs[\"pool_type\"] if attrs.get(\"pool_type\") else \"max\"\n stride = eval(attrs[\"stride\"]) if attrs.get(\"stride\") else (1, 1)\n global_pool = get_bool...
[ "0.641093", "0.58088446", "0.56600386", "0.5579279", "0.55671954", "0.55426383", "0.5540293", "0.54378605", "0.5431979", "0.54058385", "0.5381407", "0.5371967", "0.5365166", "0.53410465", "0.5257729", "0.52336967", "0.5228986", "0.5204838", "0.51902246", "0.5179581", "0.51613...
0.7807513
0
Map MXNet's Tile operator attributes to onnx's Tile operator and return the created node.
def convert_tile(node, **kwargs): name, input_nodes, attrs = get_inputs(node, kwargs) reps_list = convert_string_to_list(attrs["reps"]) initializer = kwargs["initializer"] reps_shape_np = np.array(reps_list, dtype='int64') data_type = onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[reps_shape_np.dtype] dims = np.shape(reps_shape_np) output_shape_name = "reps_attr_tensor" + str(kwargs["idx"]) tensor_node = onnx.helper.make_tensor_value_info(output_shape_name, data_type, dims) initializer.append( onnx.helper.make_tensor( name=output_shape_name, data_type=data_type, dims=dims, vals=reps_list, raw=False, ) ) input_nodes.append(output_shape_name) tile_node = onnx.helper.make_node( "Tile", input_nodes, [name], name=name ) return [tensor_node, tile_node]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _create_tile(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.input.append(op.name + \":repeats\")\n return node", "def _create_tile(cls, onnx_node, inputs, opset_version):\n # we move several inputs to singa's attribuates\n # and mark them ...
[ "0.6590867", "0.5910745", "0.58112407", "0.5749959", "0.5646954", "0.5629699", "0.5605283", "0.5601611", "0.55009604", "0.54751515", "0.5461425", "0.5446524", "0.5431833", "0.542485", "0.5410572", "0.53914165", "0.53725374", "0.5355028", "0.53471506", "0.5346676", "0.53449744...
0.67022306
0
Map MXNet's broadcast_to operator attributes to onnx's Expand operator and return the created node.
def convert_broadcast_to(node, **kwargs): name, input_nodes, attrs = get_inputs(node, kwargs) shape_list = convert_string_to_list(attrs["shape"]) initializer = kwargs["initializer"] output_shape_np = np.array(shape_list, dtype='int64') data_type = onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[output_shape_np.dtype] dims = np.shape(output_shape_np) output_shape_name = "expand_attr_tensor" + str(kwargs["idx"]) tensor_node = onnx.helper.make_tensor_value_info(output_shape_name, data_type, dims) initializer.append( onnx.helper.make_tensor( name=output_shape_name, data_type=data_type, dims=dims, vals=shape_list, raw=False, ) ) input_nodes.append(output_shape_name) expand_node = onnx.helper.make_node( "Expand", input_nodes, [name], name=name ) return [tensor_node, expand_node]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def convert_expand_as(g, op, block):\n\n x = g.get_node(op.input(\"X\")[0])\n target_shape = op.attr(\"target_shape\")\n out = _op.broadcast_to(x, target_shape)\n g.add_node(op.output(\"Out\")[0], out)", "def convert_broadcast_power(node, **kwargs):\n return create_basic_op_node('Pow', node, kwarg...
[ "0.6505846", "0.604694", "0.60059506", "0.59943974", "0.5957492", "0.5825268", "0.5748968", "0.57150435", "0.5620855", "0.5597498", "0.55732626", "0.5556347", "0.5554119", "0.5502286", "0.54921335", "0.5468958", "0.5464709", "0.5461847", "0.5448856", "0.54340446", "0.53191674...
0.74339557
0
Map MXNet's topk operator attributes to onnx's TopK operator and return the created node.
def convert_topk(node, **kwargs): name, input_nodes, attrs = get_inputs(node, kwargs) axis = int(attrs.get('axis', '-1')) k = int(attrs.get('k', '1')) ret_type = attrs.get('ret_typ') dtype = attrs.get('dtype') outputs = [name + '_output0'] if ret_type and ret_type == 'both': if dtype and dtype == 'int64': outputs.append(name + '_output1') else: raise NotImplementedError("ONNX expects indices to be of type int64") else: raise NotImplementedError("ONNX expects both value and indices as output") export_nodes = [] k = np.asarray([k], dtype=np.int) k_node = create_helper_tensor_node(k, name + '__k', kwargs) export_nodes.extend(k_node) k_node = k_node[-1].name input_node = input_nodes[0] topk_node = onnx.helper.make_node( "TopK", [input_node, k_node], outputs, axis=axis, name=name ) export_nodes.extend([topk_node]) return [topk_node]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def convert_topk(g, op, block):\n\n data = g.get_node(op.input(\"X\")[0])\n if op.input(\"K\"):\n k = g.get_node(op.input(\"K\")[0])\n else:\n k = op.attr(\"k\")\n\n largest = True\n axis = -1\n if op.has_attr(\"axis\"):\n axis = op.attr(\"axis\")\n if op.has_attr(\"larges...
[ "0.6419405", "0.6140541", "0.5742067", "0.54392713", "0.5406331", "0.5386023", "0.5383082", "0.5365803", "0.5237811", "0.5237811", "0.51466364", "0.51442844", "0.5112016", "0.509088", "0.5069989", "0.50558615", "0.50066954", "0.49857953", "0.49676558", "0.49675217", "0.496543...
0.68704766
0
Map MXNet's Take operator attributes to onnx's Gather operator.
def convert_take(node, **kwargs): name, input_nodes, attrs = get_inputs(node, kwargs) axis = int(attrs.get('axis', 0)) node = onnx.helper.make_node( "Gather", input_nodes, [name], axis=axis, name=name, ) return [node]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def convert_gather(g, op, block):\n\n x = g.get_node(op.input(\"X\")[0])\n index = g.get_node(op.input(\"Index\")[0])\n axis = op.attr(\"axis\")\n out = _op.take(x, index, axis)\n g.add_node(op.output(\"Out\")[0], out)", "def _create_gather(cls, onnx_node, inputs, opset_version):\n axis = o...
[ "0.5603602", "0.5088366", "0.4996854", "0.49716508", "0.49634594", "0.4908585", "0.48814094", "0.4826999", "0.46374422", "0.46165276", "0.46148214", "0.46006873", "0.4598241", "0.45922422", "0.45850176", "0.4572346", "0.45681974", "0.45681974", "0.45681974", "0.45255744", "0....
0.6138117
0
Returns list of detected objects.
def get_output(interpreter, score_threshold, top_k, image_scale=1.0): boxes = common.output_tensor(interpreter, 0) class_ids = common.output_tensor(interpreter, 1) scores = common.output_tensor(interpreter, 2) count = int(common.output_tensor(interpreter, 3)) def make(i): ymin, xmin, ymax, xmax = boxes[i] return Object( id=int(class_ids[i]), score=scores[i], bbox=BBox(xmin=np.maximum(0.0, xmin), ymin=np.maximum(0.0, ymin), xmax=np.minimum(1.0, xmax), ymax=np.minimum(1.0, ymax))) return [make(i) for i in range(top_k) if scores[i] >= score_threshold]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_object_detections(self):\n detections = self.__get_cropped_detections(self.image)\n return detections", "def detect_objects(snap):\n client = vision.ImageAnnotatorClient()\n print(snap)\n\n with open(snap, 'rb') as im_file:\n content = im_file.read()\n image = vision.Imag...
[ "0.7872937", "0.7200342", "0.7165433", "0.7144725", "0.7090058", "0.695256", "0.69507074", "0.6947512", "0.69345987", "0.6881886", "0.6862112", "0.68335724", "0.6699529", "0.6509898", "0.6494214", "0.6395166", "0.6335874", "0.6323961", "0.630811", "0.6308098", "0.6254732", ...
0.0
-1
raise from_none(ValueError('a')) == raise ValueError('a') from None
def from_none(exc): exc.__cause__ = None exc.__suppress_context__ = True return exc
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _fail(raise_):\n if raise_:\n raise _UnexpectedForm()\n return None", "def test_invalid_argument_type(self):\n t = TruthTable('A or B')\n\n with self.assertRaises(InvalidArgumentTypeError):\n t.equivalent_to(float())\n\n with self.assertRaises(InvalidArgumentTypeE...
[ "0.65491164", "0.6507355", "0.63295954", "0.61693627", "0.6143416", "0.60782343", "0.59815896", "0.5925763", "0.5909486", "0.5908904", "0.5904242", "0.58995235", "0.5848055", "0.5834902", "0.5818783", "0.57958984", "0.5760411", "0.5742847", "0.57380986", "0.5723444", "0.57004...
0.68841505
0
Try and get the option out of os.enviorn and cast it, otherwise return the default (casted)
def getenv(self, section, option, key=None, type=str, context=None): if key is None: key = option value = os.environ.get(key, None) if value is not None: try: return type(value) except TypeError: pass value = self.get(section, option, context=context) if value: return type(value)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getenv(option, default=undefined, cast=undefined):\n\n # We can't avoid __contains__ because value may be empty.\n if option in os.environ:\n value = os.environ[option]\n else:\n if isinstance(default, Undefined):\n raise UndefinedValueError('{} not found. Declare it as envvar...
[ "0.72295177", "0.70083475", "0.6605337", "0.6365441", "0.63379735", "0.63131976", "0.6185211", "0.6164362", "0.6162968", "0.6154077", "0.6150953", "0.6140299", "0.6121057", "0.6113948", "0.60905766", "0.6068026", "0.6061009", "0.6054365", "0.6050812", "0.60491484", "0.6044577...
0.69680715
2
Calculates the average price we would pay / receive per unit of `symbol` if we wanted to trade `quantity` of that `symbol`, based on its order book
def getOrderBookPrice(exchange, symbol, side, quantity, order_book=None): # TODO test it # print("obap1") order_book_side = order_book['asks'] \ if side == exchange.SIDE_SELL else order_book['bids'] quantity = Decimal(quantity) i, orders, price = 0, [], Decimal(0) accounted_for_quantity = Decimal(0) qtdif = Decimal(1) # print("obap2") while accounted_for_quantity < quantity or qtdif > Decimal(0.0001): try: order = order_book_side[i] except IndexError: raise Exception("There are not enough orders in the Order Book.") # return False qty = min(Decimal(order[1]), quantity - accounted_for_quantity) price += Decimal(order[0]) * qty accounted_for_quantity += qty qtdif = abs(Decimal(1) - accounted_for_quantity / quantity) i += 1 # print("obap3") return price / quantity
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_cost(self, symbol) -> float:\n if len(symbol) <= 6:\n search = self.trader.stock_positions + self.trader.crypto_positions\n for p in search:\n if p['symbol'] == symbol:\n return p['avg_price']\n return None\n else:\n ...
[ "0.64651424", "0.6410142", "0.63740027", "0.63389856", "0.6299657", "0.6286352", "0.6263074", "0.6189815", "0.6114266", "0.6032858", "0.60058963", "0.5941486", "0.5917772", "0.5853512", "0.5828471", "0.5826779", "0.580022", "0.57563347", "0.57001877", "0.5657362", "0.5588201"...
0.7126667
0
Inserts multiple new asks in the order book (assumes that the order book AND the new_asks list are sorted)
def insertAsks(previous_asks, received_asks): new_asks = [] if len(received_asks) < 1: return previous_asks if len(previous_asks) < 1: return received_asks # print("Prev") # pprint(previous_asks) # print("Recv") # pprint(received_asks) # Uses the merge-sort idea of popping the first element in the lists # (which should also be the lowest) while len(previous_asks) > 0 and len(received_asks) > 0: ask = None if Decimal(previous_asks[0][0]) < Decimal(received_asks[0][0]): ask = previous_asks.pop(0) # print('popped from prev') elif Decimal(previous_asks[0][0]) > Decimal(received_asks[0][0]): # print('popped from recv') ask = received_asks.pop(0) else: # print('equal, popped from both') previous_asks.pop(0) ask = received_asks.pop(0) # print(ask) if Decimal(ask[1]) > Decimal(0): # print("appended") new_asks.append(ask) # print("After Merge") # pprint(new_asks) if len(previous_asks) > 0: new_asks.extend(previous_asks) elif len(received_asks) > 0: new_asks.extend(received_asks) # print("Complete") # pprint(new_asks) return new_asks
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_answers(conn, cur, answers):\n \n print 'Adding answers...',\n \n for i, answer in enumerate(answers):\n cur.execute('INSERT INTO answers VALUES (\"{_id}\", \"{task_id}\", \"{text}\")'.format(\n _id = i+1,\n task_id = answer['task_id'],\n ...
[ "0.5846932", "0.5842225", "0.5782144", "0.5523276", "0.5449554", "0.53335917", "0.5256855", "0.5231915", "0.5231915", "0.51975715", "0.5142276", "0.51192087", "0.50733495", "0.5046175", "0.5029141", "0.49793863", "0.4966205", "0.49656373", "0.49622992", "0.49538177", "0.48968...
0.7092049
0
Inserts multiple new bids in the order book (assumes that the order book AND the new_bids list are sorted)
def insertBids(previous_bids, received_bids): new_bids = [] while len(previous_bids) > 0 and len(received_bids) > 0: bid = None if Decimal(previous_bids[0][0]) > Decimal(received_bids[0][0]): bid = previous_bids.pop(0) elif Decimal(previous_bids[0][0]) < Decimal(received_bids[0][0]): bid = received_bids.pop(0) else: previous_bids.pop(0) bid = received_bids.pop(0) if Decimal(bid[1]) > Decimal(0): new_bids.append(bid) if len(previous_bids) > 0: new_bids.extend(previous_bids) elif len(received_bids) > 0: new_bids.extend(received_bids) return new_bids
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def start_new_bids(self):\n for bidder in self._bidders:\n if bidder != self._highest_current_bidder:\n bid_price = bidder(self)\n if bid_price > self.current_bid:\n self.update_bid(bid_price, bidder)", "def add_boid(self, new_boid):\r\n s...
[ "0.6216382", "0.6018596", "0.5861003", "0.57200617", "0.5687406", "0.5627169", "0.5626909", "0.5589654", "0.5553824", "0.5513555", "0.54484546", "0.544297", "0.5307633", "0.52903914", "0.5283958", "0.5282783", "0.5269406", "0.52541256", "0.5221003", "0.51885104", "0.51821554"...
0.72524697
0
Calculate the straight line distance between two gps coordinates
def dist(coord_a, coord_b): lat1 = math.radians(coord_a[0]) lat2 = math.radians(coord_a[0]) lng1 = math.radians(coord_a[1]) lng2 = math.radians(coord_b[1]) # Haversine formula dlat = lat2 - lat1 dlng = lng2 - lng1 a = math.sin(dlat / 2)**2 + math.cos(lat1) * math.cos(lat2) * math.sin(dlng / 2)**2 distance_radians = 2 * math.asin(math.sqrt(a)) # Radius of earth in meters. r_earth = 6371000 # error in reading val_error = 1.7 # calculate the distane in meters return (distance_radians * r_earth)* val_error
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def distance(gps1, gps2):\n return haversine(gps1.lng, gps1.lat, gps2.lng, gps2.lat)", "def calculateDistanceBetweenPoints(lat1,lon1,lat2,lon2):\n\treturn Geodesic.WGS84.Inverse(lat1,lon1, lat2, lon2)['s12']", "def calculate_line_length(x1, y1, x2, y2):\n distance = np.sqrt((x2 - x1)**2 + (y2 - y1)**2)\n...
[ "0.7674099", "0.7567361", "0.7443959", "0.7391137", "0.7256764", "0.7159113", "0.7153204", "0.71042436", "0.70898855", "0.702371", "0.7000356", "0.69934964", "0.69846", "0.6979073", "0.6970385", "0.6955696", "0.69483423", "0.69429255", "0.6940593", "0.6924824", "0.6917241", ...
0.0
-1
Populate paths with lengths in database
def postPathLengths(map_area): paths = Path.query.filter(Path.map_area==map_area).all() for path in paths: start = Node.query.filter_by(id=path.start).first() end = Node.query.filter_by(id=path.end).first() # get the length of the two side of the paths avg_length = calculatePathLength(start, end) path.length = avg_length print(avg_length) # save the changes to the database db.session.commit()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def calculateWireLenght(path_list):\n\n total_length = 0\n for path in path_list:\n total_length += len(path)\n return total_length", "def path_length(self,path,num_repeats=10):\n begin_time=datetime.datetime.now()\n #num_repeats=100\n for i in range(num_repeats):\n ...
[ "0.58111256", "0.58005226", "0.5792587", "0.54623866", "0.5386073", "0.5357159", "0.5356317", "0.5353135", "0.5353135", "0.53270024", "0.53158593", "0.5290157", "0.52883023", "0.52871025", "0.5284866", "0.5262351", "0.52564347", "0.5213889", "0.5204303", "0.5164193", "0.51576...
0.6926739
0
Return true if x is in the range [start, end]
def time_in_range(start, end, time): if start <= end: return start <= time <= end else: return start <= time or time <= end
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def in_range(start, end, x):\n if start <= end:\n return start <= x <= end\n else:\n return start <= x or x <= end", "def date_in_range(start, end, x):\n\n if start <= end:\n return start <= x <= end\n else:\n return start <= x or x <= end", "def in_range(x, a, b):\n return (x >=...
[ "0.8966769", "0.7992362", "0.77772695", "0.7776177", "0.766966", "0.74990565", "0.74736047", "0.72713387", "0.72193664", "0.71821845", "0.7163457", "0.7140041", "0.7109383", "0.70654154", "0.7048866", "0.70405066", "0.7038097", "0.7010097", "0.6977196", "0.6975511", "0.696325...
0.6845803
26
Loggin In With Facebook
def login(): authorization_url, state = facebook.authorization_url(authorization_base_url) print 'Please authorize', authorization_url return redirect(authorization_url, code=302)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def facebook_login():\n if not facebook.authorized:\n return redirect(url_for('facebook.login'))\n account_info = facebook.get('me?fields=id,name,email')\n# print(account_info)\n if account_info.ok:\n account_info_json = account_info.json()\n user = {}\n user['email'] = acco...
[ "0.7702685", "0.7581268", "0.7554234", "0.75297165", "0.7491286", "0.7259731", "0.7058176", "0.6954533", "0.6849566", "0.6809354", "0.68011945", "0.6796922", "0.679441", "0.6732429", "0.66742224", "0.6617167", "0.65538794", "0.65296173", "0.6522063", "0.6515471", "0.64445364"...
0.7205264
6
Walk through all files located under a root path.
def iter_files(path): for dirpath, _, filenames in os.walk(path): for f in filenames: if f != '.DS_Store': yield os.path.join(dirpath, f)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def index_all_files(self, root_dir):\n pass", "def files_from_root(root, accept):\n for (dir_path, _dir_names, file_names) in os.walk(root, followlinks=True):\n for file_name in file_names:\n if accept(file_name):\n path = os.path.join(dir_path, file_name)\n ...
[ "0.7381147", "0.72097903", "0.7066387", "0.7058464", "0.7056831", "0.6993204", "0.6956753", "0.69543415", "0.6945345", "0.68726885", "0.6805766", "0.67303014", "0.6628771", "0.66223794", "0.6598162", "0.65677375", "0.65590584", "0.6550217", "0.65464854", "0.6530529", "0.65129...
0.59631664
81
Create the matrices from the datasets.
def prepare_data(seqs, labels, maxlen=None, xdim=3): # Trim all output seqs to have only maxlen steps if maxlen is not None: Iseqs = [] Oseqs = [] for i_seq, o_seq in zip(seqs, labels): if len(o_seq) < maxlen: Iseqs.append(i_seq) Oseqs.append(o_seq) seqs = Iseqs labels = Oseqs else: maxlen = 40 # Pad and compute masks ret_X = np.zeros((maxlen, len(seqs), xdim)) mask_X = np.zeros((maxlen, len(seqs))) labels_X = np.zeros((maxlen, len(seqs))) for k in range(len(seqs)): mask_X[:len(labels[k]), k] = 1 ret_X[:len(labels[k]), k] = np.asarray(seqs[k]) labels_X[:len(labels[k]), k] = labels[k] return ret_X, mask_X, labels_X
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _build_datasets(self):\n self._build_datasets_sis3302()\n self._build_datasets_sis3305()", "def matrices(self):\n return [ self.__class__(labels=self.labels,\n labels_map=self.labels_map,\n sets=[x]) for x in self.sets]", "d...
[ "0.6748821", "0.6716942", "0.6337572", "0.63359183", "0.62707675", "0.6173715", "0.6111475", "0.60522753", "0.6028459", "0.6023709", "0.6017404", "0.5932755", "0.58969504", "0.5868365", "0.58672017", "0.5865364", "0.58570284", "0.585554", "0.5840747", "0.5836187", "0.583551",...
0.0
-1
Optimize a neural network that classifies MNIST data.
def sgd_experiment(): batch_size = 128 layer_1_hidden_nodes = 80 ## Starting small so my computer can keep up with the ram requirements of LEEA :) (train_dataset, train_labels), (valid_dataset, valid_labels), (test_dataset, test_labels) = get_mnist() graph = tf.Graph() with graph.as_default(): ## Data variables. tf_train_dataset = tf.placeholder(tf.float32, shape=(batch_size, image_size * image_size)) tf_train_labels = tf.placeholder(tf.float32, shape=(batch_size, num_labels)) tf_valid_dataset = tf.constant(valid_dataset) tf_test_dataset = tf.constant(test_dataset) ## Weights describing single layer. weights1 = tf.Variable( tf.truncated_normal([image_size * image_size, layer_1_hidden_nodes]) ) biases1 = tf.Variable(tf.zeros([layer_1_hidden_nodes])) weights2 = tf.Variable( tf.truncated_normal([layer_1_hidden_nodes, num_labels]) ) biases2 = tf.Variable(tf.zeros([num_labels])) ## Training variables. lay1_train = tf.nn.relu(tf.matmul(tf_train_dataset, weights1) + biases1) logits = tf.matmul(lay1_train, weights2) + biases2 loss = tf.reduce_mean( tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=tf_train_labels) ) optimizer = tf.train.GradientDescentOptimizer(0.5).minimize(loss) train_prediction = tf.nn.softmax(logits) lay1_valid = tf.nn.relu(tf.matmul(tf_valid_dataset, weights1) + biases1) valid_prediction = tf.nn.softmax(tf.matmul(lay1_valid, weights2) + biases2) lay1_test = tf.nn.relu(tf.matmul(tf_test_dataset, weights1) + biases1) test_prediction = tf.nn.softmax(tf.matmul(lay1_test, weights2) + biases2) num_steps = 3001 with tf.Session(graph=graph) as session: tf.global_variables_initializer().run() for step in range(num_steps): offset = (step * batch_size) % (train_labels.shape[0] - batch_size) batch_data = train_dataset[offset:(offset + batch_size), :] batch_labels = train_labels[offset:(offset + batch_size), :] feed_dict = {tf_train_dataset: batch_data, tf_train_labels: batch_labels} _, l, predictions = session.run( [optimizer, loss, train_prediction], feed_dict=feed_dict) if (step % 250) == 0: print("Minibatch loss at step %d: %f" % (step, l)) print("Minibatch accuracy: %.1f%%" % accuracy(predictions, batch_labels)) print("Validation accuracy: %.1f%%" % accuracy( valid_prediction.eval(), valid_labels) ) print("Test accuracy: %.1f%%" % accuracy(test_prediction.eval(), test_labels))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def sgd_optimization_mnist(learning_rate=0.13, n_epochs=1000,\r\n dataset='mnist.pkl.gz',\r\n batch_size=600):\r\n datasets = load_data(dataset)\r\n\r\n train_set_x, train_set_y = datasets[0]\r\n valid_set_x, valid_set_y = datasets[1]\r\n test_set_x, ...
[ "0.68616706", "0.67666817", "0.639884", "0.62000936", "0.6169988", "0.6159703", "0.6147306", "0.608073", "0.60804075", "0.6076149", "0.6037147", "0.6005561", "0.6005367", "0.59805745", "0.59763664", "0.59754896", "0.5955019", "0.5887888", "0.58547395", "0.58541465", "0.584930...
0.0
-1
Optimize a neural network with LEEA and the MNIST dataset.
def leea_experiment(): layer_1_hidden_nodes = 80 ## Starting small so my computer can keep up with the ram requirements of LEEA :) (train_dataset, train_labels), (valid_dataset, valid_labels), (test_dataset, test_labels) = get_mnist() ## Copy pasted, I'm so sorry. graph = tf.Graph() with graph.as_default(): ## Data variables. tf_train_dataset = tf.placeholder(tf.float32, shape=(Params.SAMPLE_COUNT, image_size * image_size)) tf_train_labels = tf.placeholder(tf.float32, shape=(Params.SAMPLE_COUNT, num_labels)) tf_valid_dataset = tf.constant(valid_dataset) tf_test_dataset = tf.constant(test_dataset) ## Weights describing single layer. weights1 = tf.Variable( tf.truncated_normal([image_size * image_size, layer_1_hidden_nodes]) ) biases1 = tf.Variable(tf.zeros([layer_1_hidden_nodes])) weights2 = tf.Variable( tf.truncated_normal([layer_1_hidden_nodes, num_labels]) ) biases2 = tf.Variable(tf.zeros([num_labels])) ## Training variables. lay1_train = tf.nn.relu(tf.matmul(tf_train_dataset, weights1) + biases1) logits = tf.matmul(lay1_train, weights2) + biases2 loss = tf.reduce_mean( tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=tf_train_labels) ) optimizer = tf.train.GradientDescentOptimizer(0.5).minimize(loss) train_prediction = tf.nn.softmax(logits) lay1_valid = tf.nn.relu(tf.matmul(tf_valid_dataset, weights1) + biases1) valid_prediction = tf.nn.softmax(tf.matmul(lay1_valid, weights2) + biases2) lay1_test = tf.nn.relu(tf.matmul(tf_test_dataset, weights1) + biases1) test_prediction = tf.nn.softmax(tf.matmul(lay1_test, weights2) + biases2) with tf.Session(graph=graph) as session: op = tf.variables_initializer(tf.trainable_variables()) session.run(op) evaluator = Evaluator(session, loss, train_prediction) ## Take in the loss as a function (rather than TF operation). evolver = Evolver(tf.trainable_variables(), evaluator) for gen in range(Params.MAX_GENERATIONS): print("Generation: ", gen) offset = (gen * Params.SAMPLE_COUNT) % (train_labels.shape[0] - Params.SAMPLE_COUNT) batch_data = train_dataset[offset:(offset + Params.SAMPLE_COUNT), :] batch_labels = train_labels[offset:(offset + Params.SAMPLE_COUNT), :] evolver.doGeneration({tf_train_dataset: batch_data, tf_train_labels: batch_labels}) best = evolver.getBest() evolver.restore_variables(evolver.variables, session, *evolver.unflatten_tensors(best.weights, evolver.variables)) print("Minimum achieved loss: %f" % (-1 * best.fitness)) print("Validation accuracy: %.1f%%" % accuracy(valid_prediction.eval(), valid_labels)) print("Test accuracy: %f%%" % accuracy(test_prediction.eval(), test_labels))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def main(tetrode_number=TETRODE_NUMBER,num_hidden_units=500,num_hidden_units_2=300,num_hidden_units_3=200,num_code_units=50):\n \n print(\"Making the model...\")\n network = model((None,200),200,num_hidden_units,num_hidden_units_2,num_hidden_units_3,num_code_units)\n print(\"Done!\")\n\n\n for tetrod...
[ "0.6583897", "0.65585095", "0.65471804", "0.6538894", "0.6405767", "0.6403417", "0.6328416", "0.63117754", "0.6307124", "0.6301749", "0.62865406", "0.62524986", "0.62325215", "0.61932445", "0.61917716", "0.6189975", "0.61607337", "0.6145277", "0.6128466", "0.61265934", "0.611...
0.6448221
4
Create a database according to schema in JSON format.
def create_db(db, schema_json): with open(schema_json) as of: schema = json.load(of, object_pairs_hook=OrderedDict) # OrderedDict so that tables are created in the order specified, # allowing foreign keys to reference previously defined tables for table_name, columns in schema.items(): col_types = columns.items() # dict -> tuple make_table(db, table_name, col_types)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def main(db_path, schema_json):\n create_db(db_path, schema_json)", "def create_db():\n db.create_all()\n print('Database structure created successfully')", "def create_db():\n db.create_all()\n print(\"DB Created\")", "def create_schema(db_name, schema_name):\n # 1. Connect to database\n ...
[ "0.82075125", "0.7394558", "0.7111433", "0.7067049", "0.70018756", "0.7000087", "0.6994893", "0.6987741", "0.696631", "0.6925411", "0.69109344", "0.69109344", "0.69109344", "0.69109344", "0.69109344", "0.69109344", "0.69109344", "0.69109344", "0.69109344", "0.69109344", "0.69...
0.81120163
1
Create a database from a schema and populate it with CSV/JSON data.
def main(db_path, schema_json): create_db(db_path, schema_json)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_db(db, schema_json):\n with open(schema_json) as of:\n schema = json.load(of, object_pairs_hook=OrderedDict)\n # OrderedDict so that tables are created in the order specified,\n # allowing foreign keys to reference previously defined tables\n\n for table_name, columns in schem...
[ "0.7358652", "0.6983746", "0.6980258", "0.6947382", "0.68650365", "0.6854215", "0.68521404", "0.68436974", "0.6828379", "0.678138", "0.67692417", "0.67509025", "0.67440456", "0.67321527", "0.67321527", "0.67321527", "0.67321527", "0.67321527", "0.67321527", "0.67321527", "0.6...
0.7520764
0
Fails if log handler does not output expected log message
def test_sqs_log_handler_basic_json_format(self): try: self.logger.info("test info message") body = self.retrieve_message() expected = ("""{"asctime": "2016-01-01 00:00:00,000",""" """ "levelname": "INFO", "message": "test info message"}""") except BaseException as err: self.fail("Should not raise exception, got {} instead".format(err)) self.assertEqual(body, expected)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _log(self, message):\n pass", "def log(self, message):", "def _stab_log_error(self, logconf, msg):\n\t\tprint \"Error when logging %s: %s\" % (logconf.name, msg)", "def do_handle_log(self, workunit, level, *msg_elements):\r\n pass", "def log(self, message: str):", "def _stab_log_error(self,...
[ "0.6798006", "0.6634195", "0.65810263", "0.65154", "0.64977425", "0.6463785", "0.64605075", "0.6457076", "0.64166117", "0.6404903", "0.6383126", "0.63817513", "0.6376192", "0.636835", "0.63453645", "0.63350356", "0.63286597", "0.6290296", "0.6282357", "0.62702614", "0.6269678...
0.0
-1
Fails if log handler does not output expected log message with extras
def test_sqs_log_handler_extra_json_data(self): try: extra = { "test": "test logging", "num": 1, 5: "9", "float": 1.75, "nested": {"more": "data"} } self.logger.info("test info message with properties", extra=extra) body = self.retrieve_message() expected = ("""{"asctime": "2016-01-01 00:00:00,000", "levelname": "INFO",""" """ "message": "test info message with properties",""" """ "5": "9", "float": 1.75, "num": 1,""" """ "test": "test logging", "nested": {"more": "data"}}""") except BaseException as err: self.fail("Should not raise exception, got {} instead".format(err)) self.assertEqual(body, expected)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def log_extra(self, msg, *args):\n if self.settings.EXTRA_LOGGING.value:\n self.log.info(\"(Extra logging) \" + msg, *args)", "def log_message(self, format, *args):", "def _log(self, message):\n pass", "def log(self, message):", "def do_handle_log(self, workunit, level, *msg_elemen...
[ "0.6871636", "0.6594983", "0.65777576", "0.64990383", "0.6497", "0.6418214", "0.63011897", "0.6265279", "0.62583315", "0.6248939", "0.6216916", "0.61940885", "0.61799824", "0.6179446", "0.6169949", "0.6157681", "0.61575145", "0.61328405", "0.6126881", "0.6124526", "0.60975176...
0.5951401
37
Fails if log handler does not output expected log message at error level
def test_sqs_log_handler_error(self): try: extra = { "test": "test logging", "num": 1, 5: "9", "float": 1.75, "nested": {"more": "data"} } self.logger.error("test info message with properties", extra=extra) body = self.retrieve_message() expected = ("""{"asctime": "2016-01-01 00:00:00,000", "levelname": "ERROR",""" """ "message": "test info message with properties",""" """ "5": "9", "float": 1.75, "num": 1,""" """ "test": "test logging", "nested": {"more": "data"}}""") except BaseException as err: self.fail("Should not raise exception, got {} instead".format(err)) self.assertEqual(body, expected)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def error(self, msg, *args):\n if self.lvl<=logging.ERROR: return self._log(msg, *args)", "def _stab_log_error(self, logconf, msg):\n\t\tprint \"Error when logging %s: %s\" % (logconf.name, msg)", "def _stab_log_error(self, logconf, msg):\n print('Error when logging %s: %s' % (logconf.name, msg))...
[ "0.75329125", "0.74146676", "0.73945475", "0.718149", "0.71730816", "0.71730816", "0.7140554", "0.71289593", "0.7127636", "0.7127636", "0.7125438", "0.7094133", "0.7039443", "0.70359004", "0.70328724", "0.70328724", "0.70328724", "0.70328724", "0.70328724", "0.70328724", "0.7...
0.6979829
27
Fails if log handler does not output expected log message at error level
def test_sqs_log_global_extra(self): try: extra = { "test": "test logging", "num": 1, 5: "9", "float": 1.75, "nested": {"more": "data"} } global_extra = { "cluster_name": "regression", "node_name": "localhost", } log_handler = sqsloghandler.SQSHandler(self.log_queue_name, global_extra=global_extra) formatter = jsonlogger.JsonFormatter('%(asctime) %(levelname) %(message)') log_handler.setFormatter(formatter) logger = logging.getLogger(TestSQSLogHandler.__name__ + "global-extra") logger.addHandler(log_handler) logger.error("test info message with properties", extra=extra) body = self.retrieve_message() expected = ("""{"asctime": "2016-01-01 00:00:00,000", "levelname": "ERROR",""" """ "message": "test info message with properties", "5": "9", """ """"float": 1.75, "num": 1, "cluster_name": "regression",""" """ "test": "test logging", "nested": {"more": "data"}, "node_name": "localhost"}""") except BaseException as err: self.fail("Should not raise exception, got {} instead".format(err)) self.assertEqual(body, expected)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def error(self, msg, *args):\n if self.lvl<=logging.ERROR: return self._log(msg, *args)", "def _stab_log_error(self, logconf, msg):\n\t\tprint \"Error when logging %s: %s\" % (logconf.name, msg)", "def _stab_log_error(self, logconf, msg):\n print('Error when logging %s: %s' % (logconf.name, msg))...
[ "0.75332403", "0.7415585", "0.7395565", "0.7182279", "0.71737486", "0.71737486", "0.7142053", "0.7128932", "0.7128264", "0.7128264", "0.71270907", "0.7094948", "0.70400184", "0.7036283", "0.70337623", "0.70337623", "0.70337623", "0.70337623", "0.70337623", "0.70337623", "0.70...
0.0
-1
Traverse a tree rooted at in postorder
def leaves(node, res): leaf = True if node.lesser: leaf = False leaves(node.lesser, res) if node.greater: leaf = False leaves(node.greater, res) if leaf: res.append(node.indices)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def traverse_postorder(self, root):\n if root is not None:\n self.traverse_postorder(root.left)\n self.traverse_postorder(root.right)\n print(root.data)", "def postorder(root):\n if not root:\n return\n inorder(root.left)\n inorder(root.right)\n print(ro...
[ "0.81746197", "0.7900327", "0.77919316", "0.76695967", "0.7652187", "0.76341856", "0.75693804", "0.7548271", "0.75037867", "0.74601656", "0.7442373", "0.7406432", "0.73614275", "0.73541087", "0.7322688", "0.7294151", "0.72874355", "0.7265556", "0.7239447", "0.7193696", "0.716...
0.0
-1
Create a recursive dictionary mapping of a directory structure starting at pathname A leaf is filename>full path of file
def treedir(pathname): (t, d, f) = next(os.walk(pathname)) return {**{_dir : treedir(os.path.join(pathname, _dir)) for _dir in d}, **{file:os.path.join(t, file) for file in f}}
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def path_to_dict(self, someDir, level=9001, relativeFolders=True, relativeFiles=False):\n someDir = someDir.rstrip(os.path.sep)\n assert os.path.isdir(someDir)\n numSep = someDir.count(os.path.sep)\n\n outputDict = {}\n for root, dirs, files in os.walk(someDir):\n for ...
[ "0.71291417", "0.70161325", "0.67469096", "0.6707382", "0.6544313", "0.6477862", "0.64685744", "0.63074756", "0.6306378", "0.62825865", "0.6236606", "0.61944944", "0.61629915", "0.6132351", "0.6112128", "0.6096042", "0.6063349", "0.60022354", "0.59963495", "0.5958212", "0.593...
0.7011454
2
Map x,y coordinates to pixel size
def topix(x, y, pxsz): assert (x >= 0) assert (y >= 0) return np.array([int(round(x / pxsz)), int(round(y / pxsz))], dtype=np.uint)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getPixelSize(self):\n return (0.000013, 0.000013)", "def GetPixelDimensions(self):\n _, width, _, _, _, height = self.GetGeoTransform()\n return XY(x=width, y=height)", "def px_size(self):\n xp, yp = ct.c_float(), ct.c_float()\n\n self.lib.GetPixelSize(ct.pointer(xp), ct....
[ "0.7402808", "0.73856735", "0.73046285", "0.7287127", "0.7168122", "0.7099318", "0.69940174", "0.6974272", "0.6839358", "0.68056744", "0.6799161", "0.6775825", "0.6729077", "0.67045385", "0.6631168", "0.6580784", "0.65413487", "0.653211", "0.64739186", "0.64416355", "0.643086...
0.0
-1
Compute Local Effective Resolution
def computerecondensity(d3d, label, leafs=16, PIX=10, IMGMAX=40000): points = d3d[:, :2].copy() mx, MX = np.min(points[:, 0]), np.max(points[:, 0]) my, MY = np.min(points[:, 1]), np.max(points[:, 1]) if mx < 0: points[:, 0] += np.abs(mx) print("Neg pos for {}".format(label)) if my < 0: points[:, 1] += np.abs(my) print("Neg pos for {}".format(label)) tr = cKDTree(points, leafsize=leafs) lfs = [] root = tr.tree leaves(root, lfs) assert (MX < IMGMAX) assert (MY < IMGMAX) pixels = np.empty((len(lfs), 2), dtype=np.float64) imgsize = int(np.round(IMGMAX / PIX)) imarray = np.zeros((imgsize, imgsize), dtype=np.float32) for ip, p in enumerate(lfs): ps = points[p] # assert(ps.shape[0] <= leafs) assert (ps.shape[1] == 2) _mx = min(ps[:, 0]) _Mx = max(ps[:, 0]) _my = min(ps[:, 1]) _My = max(ps[:, 1]) _x, _y = topix(_mx, _my, PIX) # Lower left _X, _Y = topix(_Mx, _My, PIX) # Upper right # ELR = nr of points over area xy - XY # imarray[_x:_X, _y:_Y] += ps.shape[0] pixels[ip, 0] = ((_X - _x) + 1) * ((_Y - _y) + 1) # Superpixel area pixels[ip, 1] = ps.shape[0] / pixels[ip, 0] # Locs / px^2 for pri in range(ps.shape[0]): pti = ps[pri, :2] xp, yp = topix(pti[0], pti[1], PIX) assert (xp < IMGMAX) assert ( yp < IMGMAX) # You'd assume this check is not nec. as an index error would follow, but you'd be wrong in interesting cases try: imarray[xp, yp] += pixels[ip, 1] except OverflowError as e: print("OE {} , {} <- {} {}".format(xp, yp, pixels[ip, 1], pixels[ip, 0])) print("OE {} , {} <- {:.2f} {:.2f}".format(xp, yp, pti[0], pti[1])) raise return tr, imarray, pixels
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _residual_edp(self, params):\n data = self.F**2\n model = np.absolute(self._model())**2\n sigma = self.sigma\n return (data[self.mask]-model[self.mask]) / sigma[self.mask] \n \n # The following three lines do not reproduce Sun's results, which proves\n # that the fits were done through...
[ "0.64042413", "0.5862268", "0.5787392", "0.5745422", "0.5741281", "0.5721242", "0.56675434", "0.566455", "0.5620853", "0.55921644", "0.5583884", "0.5534082", "0.5525383", "0.55130315", "0.5511572", "0.5489824", "0.54889446", "0.5474182", "0.54559463", "0.5435168", "0.5424118"...
0.0
-1
Compute the local effective resolution for data
def computeSNRLE(leafs=2, pix=10, MAX=60000, data=None, outpath="."): lgr.info("Computing LRE for leaf size {}, {} nm/pixel".format(leafs, pix)) if data is None: return pixelmap ={} SNR = np.sqrt(leafs/2) for cell in data: lgr.info("Cell {}".format(cell)) for channel in data[cell]: lgr.info("Channel {}".format(channel)) d3d = data[cell][channel].points label = "{}_{}".format(cell, channel) tr, imarray, pixels = computerecondensity(d3d, label, leafs, pix, MAX) img = Image.fromarray((imarray/np.max(imarray)*255).astype(np.uint8), mode='L') img.save(os.path.join(outpath, '{}_oct_{:.2f}.tiff'.format(label, SNR))) # sns.distplot(pixels[:,1]) pixelmap[label] = pixels, imarray return pixelmap
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _residual_edp(self, params):\n data = self.F**2\n model = np.absolute(self._model())**2\n sigma = self.sigma\n return (data[self.mask]-model[self.mask]) / sigma[self.mask] \n \n # The following three lines do not reproduce Sun's results, which proves\n # that the fits were done through...
[ "0.6412861", "0.6363401", "0.6193123", "0.618786", "0.6109423", "0.59960175", "0.5980494", "0.5975609", "0.58518046", "0.5812452", "0.5804847", "0.57916963", "0.574673", "0.57214296", "0.56793344", "0.567626", "0.5666228", "0.56486475", "0.56465364", "0.564494", "0.56279874",...
0.0
-1
verify that, once send() is called, a tenant has been setup
def test_tenant_setup_celery(self): class interceptor(mock.Mock): tenant = None def send(self, *kw, **args): self.tenant = properties.tenant msg = interceptor() tenant = mock.Mock() tenant.client_name = 'mock-tenant' _send_celery_mail(msg, tenant, send=True) self.assertTrue(msg.tenant is tenant)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_tenant_setup_celery_reset(self):\n msg = mock.Mock()\n tenant = mock.Mock()\n tenant.client_name = 'mock-tenant'\n\n _send_celery_mail(msg, tenant, send=False)\n\n self.assertFalse(hasattr(properties, 'tenant'))\n self.assertEqual(properties.tenant_properties, {})...
[ "0.6928082", "0.60256696", "0.5895501", "0.5891009", "0.5843694", "0.57787097", "0.577437", "0.56721795", "0.56186384", "0.55741477", "0.5572975", "0.557291", "0.55664927", "0.5555025", "0.54957616", "0.54935724", "0.5484652", "0.5469827", "0.54646283", "0.5463046", "0.544083...
0.7089597
0
after _send_celery_mail finishes, the tenant should be cleared again
def test_tenant_setup_celery_reset(self): msg = mock.Mock() tenant = mock.Mock() tenant.client_name = 'mock-tenant' _send_celery_mail(msg, tenant, send=False) self.assertFalse(hasattr(properties, 'tenant')) self.assertEqual(properties.tenant_properties, {})
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_tenant_setup_celery(self):\n\n class interceptor(mock.Mock):\n tenant = None\n\n def send(self, *kw, **args):\n self.tenant = properties.tenant\n\n msg = interceptor()\n tenant = mock.Mock()\n tenant.client_name = 'mock-tenant'\n\n _s...
[ "0.60660076", "0.5897273", "0.5827525", "0.58136255", "0.57590765", "0.57500565", "0.57095784", "0.56320375", "0.5629329", "0.56141365", "0.560764", "0.55990505", "0.55975515", "0.5582091", "0.5547078", "0.5544799", "0.55407256", "0.5533396", "0.5488413", "0.54429895", "0.540...
0.7174306
0
Process a prepared payment
def process(self): result = self.processor.process(self.request) self.processorResults = result.success if result.payment: reason_code = result.payment.reason_code else: reason_code = "" self.processorReasonCode = reason_code self.processorMessage = result.message log.info("""Processing %s transaction with %s Order %i Results=%s Response=%s Reason=%s""", self.paymentModule.LABEL.value, self.paymentModule.KEY.value, self.order.id, self.processorResults, self.processorReasonCode, self.processorMessage) return self.processorResults
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def process_payment():\n\n url = 'https://api.worldpay.com/v1/orders'\n headers = {'Authorization': environ.get('WORLDPAY_API_KEY'),\n 'Content-type': 'application/json'}\n body = {\n \"paymentMethod\": {\n \"type\": \"Card\",\n \"name\": session['caller_name'],\...
[ "0.71470153", "0.6825788", "0.6774326", "0.6768605", "0.66278535", "0.6614318", "0.65593857", "0.651435", "0.6356622", "0.63481003", "0.6293071", "0.6278738", "0.6272433", "0.6199742", "0.61738306", "0.61488736", "0.6109061", "0.6077295", "0.6046448", "0.6045506", "0.6034854"...
0.6704594
4
Handles a success in payment. If the order is paidoff, sends success, else return page to pay remaining.
def _onSuccess(self, controller): if controller.order.paid_in_full: controller.cart.empty() for item in controller.order.orderitem_set.all(): if item.product.is_subscription: item.completed = True item.save() try: curr_status = controller.order.orderstatus_set.latest() except OrderStatus.DoesNotExist: curr_status = None if (curr_status is None) or (curr_status.notes and curr_status.status == "New"): controller.order.add_status(status='New', notes = "Order successfully submitted") else: # otherwise just update and save if not curr_status.notes: curr_status.notes = _("Order successfully submitted") curr_status.save() #Redirect to the success page url = controller.lookup_url('satchmo_checkout-success') return HttpResponseRedirect(url) else: log.debug('Order #%i not paid in full, sending to pay rest of balance', controller.order.id) #url = controller.order.get_balance_remaining_url() url = reverse('satchmo_balance_remaining') return HttpResponseRedirect(url)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def ui_redirect_success(self, order: Order = None) -> HttpResponse:\n ui_return_url = self.extract_ui_return_url()\n if ui_return_url:\n return self._redirect_to_ui(\n ui_return_url, \"success\", order, path=\"/payment-result\"\n )\n else:\n retu...
[ "0.7352169", "0.72179747", "0.7183236", "0.7149302", "0.70583147", "0.700066", "0.69343185", "0.68865967", "0.6824747", "0.68121576", "0.6804239", "0.6716359", "0.6692291", "0.661997", "0.6476802", "0.6473549", "0.6418588", "0.6369197", "0.63250667", "0.6308499", "0.6287491",...
0.74831283
0
| Ground Truth | Forehand Backhand Serve Forehand num_FF num_BF num_SF Backhand num_FB num_BB num_SB Serve num_FS num_BS num_SS No_action num_FN num_BN num_SN
def show_eval_class_level(num_BB, num_BF, num_BS, num_BN, num_FB, num_FF, num_FS, num_FN, num_SB, num_SF, num_SS, num_SN): print("************************************************") print(" | Ground Truth | ") print(" Forehand Backhand Serve ") print("Forehand " + str(num_FF) + " " + str(num_BF) + " " + str(num_SF)) print("Backhand " + str(num_FB) + " " + str(num_BB) + " " + str(num_SB)) print("Serve " + str(num_FS) + " " + str(num_BS) + " " + str(num_SS)) print("Noaction " + str(num_FN) + " " + str(num_BN) + " " + str(num_SN)) print("************************************************")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def handDecision(handIn):", "def part1b_2():\n xs = exampleInput\n z = 5.881\n forward = [\n Counter({'-FEAT-': 0.622, '-SIZE-': 0.377}), \n Counter({'-SIZE-': 0.761, '-FEAT-': 0.238}), \n Counter({'-SIZE-': 0.741, '-FEAT-': 0.258})]\n \n z_, forward_ = submission....
[ "0.58027256", "0.5786682", "0.5755825", "0.5679587", "0.56194", "0.56090933", "0.5567905", "0.5535476", "0.5521466", "0.54943043", "0.5431216", "0.5410346", "0.5410346", "0.54039884", "0.53975636", "0.53964627", "0.53595126", "0.5350653", "0.5346474", "0.53431875", "0.5331558...
0.58535993
0
This bidirectional stream makes it possible to send and receive Notes between 2 persons
def ReceiveMsg(self, request, context): context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def outgoing(self,message,isBinary=False,identifier=None):\n pass", "def bidi_streaming(self) -> global___Snippet.BidiStreaming:", "def play_protobuf(self, notes):\n with open_output(self.midi_port) as outport:\n\n outport.send(Message('program_change', program=12))\n \n ...
[ "0.5810446", "0.5711371", "0.55302083", "0.54603386", "0.5386901", "0.5341167", "0.5337179", "0.52391213", "0.52031493", "0.51937705", "0.5192062", "0.518014", "0.51069796", "0.50645936", "0.50393647", "0.50284076", "0.50284076", "0.50284076", "0.49854276", "0.49460483", "0.4...
0.0
-1
This bidirectional stream makes it possible to send and receive Notes between 2 persons
def AddUser(self, request, context): context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def outgoing(self,message,isBinary=False,identifier=None):\n pass", "def bidi_streaming(self) -> global___Snippet.BidiStreaming:", "def play_protobuf(self, notes):\n with open_output(self.midi_port) as outport:\n\n outport.send(Message('program_change', program=12))\n \n ...
[ "0.58094895", "0.5711503", "0.55307317", "0.54599893", "0.5386648", "0.53406537", "0.5336743", "0.52399313", "0.5202152", "0.51940846", "0.5191432", "0.51793194", "0.510636", "0.50639737", "0.5037723", "0.5028408", "0.5028408", "0.5028408", "0.49852347", "0.49451035", "0.4945...
0.0
-1
Decoding Key Value Json String
def loads(kv_data): dict_kv = {} if isinstance(kv_data, str): kvs = json.loads(kv_data) for kv in kvs: dict_kv[kv['Key']] = kv['Value'] else: print("To load Key Value Data it must be String Type") return dict_kv
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def json_decode(value):\r\n return json.loads(to_basestring(value))", "def JsonDecode(json_str):\n return JSON_DECODER.decode(json_str)", "def json_decode(json_str):\n return JSON_DECODER.decode(json_str)", "def _decode_key(self, key):\n return key if not key or isinstance(key, str) else key.de...
[ "0.7216333", "0.671068", "0.66782916", "0.6641796", "0.6561169", "0.6467437", "0.64546573", "0.63418275", "0.6303222", "0.6267742", "0.6266026", "0.6266026", "0.6258209", "0.625522", "0.62400687", "0.62128145", "0.620793", "0.62034374", "0.61571765", "0.6144871", "0.6067123",...
0.55143964
70
Remove Key from a Key Value pair Can be performed on Dictionary or Json key value string
def remove(kv_data, key): if isinstance(kv_data, str): kv_data = loads(kv_data) # Turn into Dictionary try: del kv_data[key] except NameError: print(key, " does not exists in key value pair.") kv_data = dumps(kv_data) else: print("Provide a Json Key Value String") sys.exit(6) return kv_data
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def remove_value(self, thing_key, dkey):\n if thing_key in self.things_dict:\n dic = self.things_dict[thing_key]\n if type(dic) != type({}):\n return\n dic.pop(dkey, None)", "def remove_value(self, key: str) -> None:\n raise NotImplementedError", "d...
[ "0.7145345", "0.7130893", "0.7018091", "0.6990667", "0.6903429", "0.6875397", "0.6862628", "0.6806682", "0.6771876", "0.6739055", "0.67074156", "0.6704598", "0.6693101", "0.6646352", "0.6637673", "0.66145486", "0.6510989", "0.6470834", "0.64574033", "0.64387095", "0.6428868",...
0.8426328
0
This will add a key, if key already exists it will overwrite
def add(kv_data, key, value): if isinstance(kv_data, str): kvs = loads(kv_data) # Turn json into dictionary kvs[key] = value # Add key kvs = dumps(kvs) # Turn back into json Key Value String else: print("Provide A JSON Key Value String") return kvs
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add(self, key, value):\n if not key in self:\n self.keys.append(key)\n self.dict[key] = value", "def add(self, key, value):", "def _newKey(self, key):\n self._testKeySubNsAdd()\n self._getKeyList().append(key)", "def addKey(self, key, val):\n self.dict[ke...
[ "0.76844144", "0.7508933", "0.7368967", "0.7365639", "0.72025496", "0.7181988", "0.7153343", "0.7138765", "0.7047213", "0.70134836", "0.6962179", "0.6927866", "0.69101334", "0.6902198", "0.6863071", "0.68489337", "0.6820259", "0.67976147", "0.67973626", "0.67802364", "0.67700...
0.0
-1
If JSON Key Value, Value contains this value
def contains_value(kv_json, value): if isinstance(kv_json, str): kv_dict = loads(kv_json) for key in kv_dict: if kv_dict[key] == value: # Found value in dictionary return True return False else: print("Provide A JSON Key Value String")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __is_key_in_json(self, key=str, json_dict=json):\n if key in json_dict:\n # noinspection PyUnresolvedReferences\n return json_dict[key]\n else:\n return self.NO_KEY_VALUE_FOR_ENTRY", "def is_item_in_the_response(key, value, jsonResponse):\n flag = False\n ...
[ "0.71973366", "0.68866235", "0.66816956", "0.64885634", "0.6401842", "0.6400266", "0.6142757", "0.61393195", "0.60865045", "0.60471123", "0.60368866", "0.60345954", "0.6024064", "0.6017378", "0.5997288", "0.59498996", "0.59224445", "0.58714145", "0.5839041", "0.58378595", "0....
0.71492994
1
If the JSON Key Value Pair conatins a specific key
def contains_key(kv_json, key): if isinstance(kv_json, str): kv_dict = loads(kv_json) try: res = kv_dict[key] return True except KeyError: return False else: print("Provide A JSON Key Value String")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __is_key_in_json(self, key=str, json_dict=json):\n if key in json_dict:\n # noinspection PyUnresolvedReferences\n return json_dict[key]\n else:\n return self.NO_KEY_VALUE_FOR_ENTRY", "def contains_value(kv_json, value):\n if isinstance(kv_json, str):\n ...
[ "0.74435276", "0.7416881", "0.71410507", "0.70549554", "0.65186876", "0.6266555", "0.6243399", "0.6222261", "0.62155443", "0.6150396", "0.61433333", "0.6076848", "0.60425204", "0.603984", "0.5970664", "0.5963968", "0.5870145", "0.5867332", "0.5800207", "0.57809585", "0.577749...
0.73385584
2
Checks if the JSON Key Value String are equal
def equals(kv_data, kv_data2): kv_dict1 = loads(kv_data) kv_dict2 = loads(kv_data2) if kv_dict1 == kv_dict2: return True return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_dict_keys_duplicate(self):\n assert (\n orjson.dumps({\"1\": True, 1: False}, option=orjson.OPT_NON_STR_KEYS)\n == b'{\"1\":true,\"1\":false}'\n )", "def contains_value(kv_json, value):\n if isinstance(kv_json, str):\n kv_dict = loads(kv_json)\n for k...
[ "0.6808316", "0.6743237", "0.6582611", "0.64539456", "0.6340551", "0.6310905", "0.6183923", "0.6180304", "0.61636674", "0.6138103", "0.61157376", "0.6100204", "0.6092481", "0.6077344", "0.6060001", "0.6018096", "0.5998856", "0.59183186", "0.59001577", "0.58963", "0.58931094",...
0.6284483
6
from all the information provided by the ONCat template, we are only interested by the following infos [name, path and units]. We isolate those into the template_information dictionary
def isolate_relevant_information(self): def get_formula(oncat_formula): """will need to go from something like "${value/10e11}`" to something more pythonic "{value/10e11}""" regular_expression = r'\$(?P<formula>.+)\`' m = re.search(regular_expression, oncat_formula) if m: return m.group('formula') else: return "" template_information = {} for _index, _element in enumerate(self._oncat_default_template): _title = _element["name"] _path = _element["path"] if "units" in _element: _units = _element["units"] else: _units = "" if "transform" in _element: _formula = get_formula(_element["transform"]) else: _formula = "" template_information[_index] = {'title': _title, 'path': _path, 'units': _units, 'formula': _formula} self.template_information = template_information
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def parse_template(self):\n for line in self.raw_template.split(\"\\n\"):\n line = line.strip()\n if line.startswith('#m3'):\n key, val = line[3:].strip().split('=', 1)\n key = key.strip()\n val = val.strip()\n self.variables[...
[ "0.6129451", "0.5694503", "0.5597461", "0.5589702", "0.5520051", "0.5466003", "0.5434218", "0.5390082", "0.5387614", "0.53731686", "0.53682715", "0.53276026", "0.5297058", "0.5292366", "0.52901614", "0.52713674", "0.5267604", "0.5235461", "0.52188444", "0.5214309", "0.5210592...
0.7443054
0
will need to go from something like "${value/10e11}`" to something more pythonic "{value/10e11}
def get_formula(oncat_formula): regular_expression = r'\$(?P<formula>.+)\`' m = re.search(regular_expression, oncat_formula) if m: return m.group('formula') else: return ""
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def usd(value):\n return f\"${value:,.2f}\"", "def usd(value):\n return f\"${value:,.2f}\"", "def _fmt(x, pos):\n a, b = '{:.2e}'.format(x).split('e')\n b = int(b)\n return r'${} \\times 10^{{{}}}$'.format(a, b)", "def usd(value):\n return f\"${int(value):,}\"", "def format_val(val):\n ...
[ "0.6952612", "0.6952612", "0.6528751", "0.6438116", "0.6148548", "0.607353", "0.6007281", "0.6003679", "0.6000689", "0.5998228", "0.5958189", "0.5958189", "0.5906514", "0.58572143", "0.5845689", "0.5830315", "0.5819316", "0.58074373", "0.5799571", "0.57672465", "0.5750976", ...
0.0
-1
Using the ONCat template to create projection used by oncat to return full information
def create_oncat_projection_from_template(with_location=False, template={}): projection = [] if with_location: projection = ['location'] nbr_columns = len(template) for _col in np.arange(nbr_columns): projection.append(template[_col]['path']) return projection
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def projection(self):\n pass", "def describe(self, template='projection_default.txt', engine='default'):\n raise NotImplementedError", "def output_projection(self):\n return self.projection(what='output')", "def makeProjection(self, variable, token, typed_token, constituent_dict):\n m...
[ "0.6503911", "0.61083275", "0.58378226", "0.53846645", "0.52837193", "0.52689284", "0.5229088", "0.52271664", "0.518275", "0.5146785", "0.51318693", "0.51146895", "0.5093305", "0.5028556", "0.50255054", "0.49924508", "0.49750137", "0.4933599", "0.49226946", "0.49170607", "0.4...
0.67057735
0
Applies selected activation function to intermediate output.
def apply_activation(intermediate_output, intermediate_activation): if intermediate_activation is None: return intermediate_output if intermediate_activation == 'gelu': intermediate_output = nn.gelu(intermediate_output) elif intermediate_activation == 'relu': intermediate_output = nn.relu(intermediate_output) elif intermediate_activation == 'sigmoid': intermediate_output = nn.sigmoid(intermediate_output) elif intermediate_activation == 'softmax': intermediate_output = nn.softmax(intermediate_output) elif intermediate_activation == 'celu': intermediate_output = nn.celu(intermediate_output) elif intermediate_activation == 'elu': intermediate_output = nn.elu(intermediate_output) elif intermediate_activation == 'log_sigmoid': intermediate_output = nn.log_sigmoid(intermediate_output) elif intermediate_activation == 'log_softmax': intermediate_output = nn.log_softmax(intermediate_output) elif intermediate_activation == 'soft_sign': intermediate_output = nn.soft_sign(intermediate_output) elif intermediate_activation == 'softplus': intermediate_output = nn.softplus(intermediate_output) elif intermediate_activation == 'swish': intermediate_output = nn.swish(intermediate_output) elif intermediate_activation == 'tanh': intermediate_output = jnp.tanh(intermediate_output) else: raise NotImplementedError('%s activation function is not yet supported.' % intermediate_activation) return intermediate_output
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def output_layer_activation(x):\n return x", "def uf_activate(self, output_reg):\n if len(self.inputs) is 2:\n self.two_activation(output_reg)\n elif len(self.inputs) is 3:\n self.three_activation(output_reg)\n else:\n self.large_activation(output_reg)", ...
[ "0.7604806", "0.7194517", "0.6661993", "0.6523873", "0.64537185", "0.6431659", "0.64278513", "0.6370167", "0.63165534", "0.6285312", "0.6285312", "0.6214801", "0.6194766", "0.6191587", "0.6181813", "0.614133", "0.6128814", "0.6084095", "0.607957", "0.6078586", "0.60711646", ...
0.7784417
0
Get loss and log probs for the masked LM.
def get_masked_lm_output(bert_config, input_tensor, output_weights, positions, label_ids, label_weights): input_tensor = gather_indexes(input_tensor, positions) with tf.variable_scope('cls/predictions'): # We apply one more non-linear transformation before the output layer. # This matrix is not used after pre-training. with tf.variable_scope('transform'): input_tensor = tf.layers.dense( input_tensor, units=bert_config.hidden_size, activation=modeling.get_activation(bert_config.hidden_act), kernel_initializer=modeling.create_initializer( bert_config.initializer_range)) input_tensor = modeling.layer_norm(input_tensor) # The output weights are the same as the input embeddings, but there is # an output-only bias for each token. output_bias = tf.get_variable( 'output_bias', shape=[bert_config.vocab_size], initializer=tf.zeros_initializer()) logits = tf.matmul(input_tensor, output_weights, transpose_b=True) logits = tf.nn.bias_add(logits, output_bias) log_probs = tf.nn.log_softmax(logits, axis=-1) label_ids = tf.reshape(label_ids, [-1]) label_weights = tf.reshape(label_weights, [-1]) one_hot_labels = tf.one_hot( label_ids, depth=bert_config.vocab_size, dtype=tf.float32) # The `positions` tensor might be zero-padded (if the sequence is too # short to have the maximum number of predictions). The `label_weights` # tensor has a value of 1.0 for every real prediction and 0.0 for the # padding predictions. per_example_loss = -tf.reduce_sum(log_probs * one_hot_labels, axis=[-1]) numerator = tf.reduce_sum(label_weights * per_example_loss) denominator = tf.reduce_sum(label_weights) + 1e-5 loss = numerator / denominator return (loss, per_example_loss, log_probs)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_loss_inputs(cls):\n return LOGITS", "def losses(self):\n # compute all kinds of losses \n\n # 1. Logits losses for classification \n\n # 2. regression loss for bbox \n\n return classification_loss, bbox_reg_loss", "def masked_l1_loss(prediction, target, mask):\n ab...
[ "0.6298937", "0.6172792", "0.6055308", "0.5973929", "0.58460975", "0.58436185", "0.58152527", "0.58060163", "0.5798398", "0.5763899", "0.5735554", "0.5732799", "0.57132846", "0.5705625", "0.5683144", "0.56716806", "0.5659647", "0.5657345", "0.5654184", "0.5651408", "0.565015"...
0.0
-1
Get loss and log probs for the next sentence prediction.
def get_next_sentence_output(bert_config, input_tensor, labels): # Simple binary classification. Note that 0 is "next sentence" and 1 is # "random sentence". This weight matrix is not used after pre-training. with tf.variable_scope('cls/seq_relationship'): output_weights = tf.get_variable( 'output_weights', shape=[2, bert_config.hidden_size], initializer=modeling.create_initializer(bert_config.initializer_range)) output_bias = tf.get_variable( 'output_bias', shape=[2], initializer=tf.zeros_initializer()) logits = tf.matmul(input_tensor, output_weights, transpose_b=True) logits = tf.nn.bias_add(logits, output_bias) log_probs = tf.nn.log_softmax(logits, axis=-1) labels = tf.reshape(labels, [-1]) one_hot_labels = tf.one_hot(labels, depth=2, dtype=tf.float32) per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1) loss = tf.reduce_mean(per_example_loss) return (loss, per_example_loss, log_probs)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def predict(self, X):\n words = X.split()\n chance = []\n for cur_label in self.model[\"labels\"]:\n probability = self.model[\"labels\"][cur_label][\"probability\"]\n total_grade = math.log(probability, math.e)\n for word in words:\n word_dict =...
[ "0.6818383", "0.67636436", "0.6717865", "0.6672301", "0.6578229", "0.6562069", "0.65501714", "0.6530699", "0.6514236", "0.6467297", "0.64552486", "0.6400093", "0.6400093", "0.64000565", "0.6378741", "0.6371581", "0.6352471", "0.6344506", "0.63184255", "0.629813", "0.62947714"...
0.63347507
18
Gathers the vectors at the specific positions over a minibatch.
def gather_indexes(sequence_tensor, positions): sequence_shape = modeling.get_shape_list(sequence_tensor, expected_rank=3) batch_size = sequence_shape[0] seq_length = sequence_shape[1] width = sequence_shape[2] flat_offsets = tf.reshape( tf.range(0, batch_size, dtype=tf.int32) * seq_length, [-1, 1]) flat_positions = tf.reshape(positions + flat_offsets, [-1]) flat_sequence_tensor = tf.reshape(sequence_tensor, [batch_size * seq_length, width]) output_tensor = tf.gather(flat_sequence_tensor, flat_positions) return output_tensor
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def map_vects(self, input_vects):\n \n if not self._trained:\n raise ValueError(\"SOM not trained yet\")\n \n to_return = []\n \n distances = []\n \n \n contador_adyacentes = 0\n \n matriz = np.array(list(self._neuron_locations(self._m, self...
[ "0.6250158", "0.6235054", "0.5918158", "0.590376", "0.5815052", "0.567955", "0.566976", "0.56412804", "0.55624056", "0.5546037", "0.55153716", "0.55076957", "0.54924893", "0.54811513", "0.5472844", "0.54588175", "0.54577035", "0.5457554", "0.54365367", "0.5420255", "0.5397297...
0.0
-1
Returns TF Bert config..
def get_tf_config(config_path): return modeling.BertConfig.from_json_file(config_path).__dict__
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_bert_config(config):\n if config.model_size == \"large\":\n args = {\"hidden_size\": 1024, \"num_hidden_layers\": 24}\n elif config.model_size == \"base\":\n args = {\"hidden_size\": 768, \"num_hidden_layers\": 12}\n elif config.model_size == \"small\":\n args = {\"hidden_size\": 256, \"num_hid...
[ "0.66096175", "0.6400916", "0.6374486", "0.63508105", "0.6265168", "0.61243945", "0.6115964", "0.61093426", "0.61093426", "0.60153896", "0.601387", "0.5967145", "0.59554666", "0.59397674", "0.5929531", "0.59017366", "0.5877108", "0.5862486", "0.5856384", "0.585002", "0.584904...
0.7430698
0
Return tf mlperf model parameters in a dictionary format. Use get_tf_model_variables if using kerasBERT checkpoint. This function works
def get_mlperf_model_variables(config_path, init_checkpoint): # Load saved model configuration bert_config = modeling.BertConfig.from_json_file(config_path) seq_length = bert_config.max_position_embeddings tf_variables = {} max_predictions_per_seq = 76 # Generate BERT TF model and initiate variable update from checkpoint graph = tf.Graph() sess = tf.Session(graph=graph) with graph.as_default(): input_ids = tf.zeros((4, seq_length), dtype=tf.int32) input_mask = tf.zeros((4, seq_length), dtype=tf.int32) segment_ids = tf.zeros((4, seq_length), dtype=tf.int32) masked_lm_positions = tf.zeros((4, max_predictions_per_seq), dtype=tf.int32) masked_lm_ids = tf.zeros((4, max_predictions_per_seq), dtype=tf.int32) masked_lm_weights = tf.zeros((4, max_predictions_per_seq), dtype=tf.float32) next_sentence_labels = tf.zeros((4), dtype=tf.int32) tf_model = modeling.BertModel( config=bert_config, is_training=True, input_ids=input_ids, input_mask=input_mask, token_type_ids=segment_ids, use_one_hot_embeddings=True) (masked_lm_loss, _, _) = get_masked_lm_output(bert_config, tf_model.get_sequence_output(), tf_model.get_embedding_table(), masked_lm_positions, masked_lm_ids, masked_lm_weights) (next_sentence_loss, _, _) = get_next_sentence_output(bert_config, tf_model.get_pooled_output(), next_sentence_labels) _ = masked_lm_loss + next_sentence_loss tvars = tf.trainable_variables() (assignment_map, _) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint) tf.compat.v1.train.init_from_checkpoint(init_checkpoint, assignment_map) sess.run(tf.initializers.global_variables()) tvars_vals = sess.run(tvars) for var, val in zip(tvars, tvars_vals): tf_variables[var.name[:-2]] = val tf_config = bert_config.__dict__ return tf_config, tf_variables, tf_model
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def describe_model():\n train_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)\n msg = [\"\"]\n total = 0\n for v in train_vars:\n shape = v.get_shape()\n ele = shape.num_elements()\n total += ele\n msg.append(\"{}: shape={}, dim={}\".format(\n v.name, s...
[ "0.65184647", "0.65112454", "0.638817", "0.63505626", "0.6223993", "0.6148675", "0.6122482", "0.6062248", "0.6055513", "0.6027013", "0.600799", "0.59653383", "0.59652144", "0.5957283", "0.59522676", "0.5892172", "0.58268005", "0.5810189", "0.58081865", "0.57768404", "0.577381...
0.7301645
0
Return tf model parameters in a dictionary format.
def get_tf_model_variables(config_path, init_checkpoint): # Load saved model configuration config = configs.BertConfig.from_json_file(config_path) # Generate BERT TF model and initiate variable update from checkpoint seq_len = 20 _, tf_model = bert_models.squad_model(config, seq_len) checkpoint = tf.train.Checkpoint(model=tf_model) checkpoint.restore(init_checkpoint).assert_existing_objects_matched() tf_config = config.__dict__ tf_variables = {v.name: v.numpy() for v in tf_model.variables} return tf_config, tf_variables, tf_model
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_model(self):\n\n parameters = {keys._topology:self.topology,\n keys._size:self.size,\n keys._name:self.name,\n #keys._output_activation:self._outActiv_fun_key,\n #keys._hidden_activation:self._hiddenActiv_fun_key,\n...
[ "0.73577815", "0.7218848", "0.7189309", "0.7117365", "0.71154326", "0.7032748", "0.70010006", "0.6994405", "0.69852734", "0.69549537", "0.6953729", "0.6948936", "0.69394284", "0.6904722", "0.6866829", "0.6861575", "0.6858673", "0.6855758", "0.6850728", "0.68482053", "0.681446...
0.0
-1
Convert TF BERT model config to be compatible with JAX BERT model.
def convert_tf_config_to_jax_bert(config): unnecessary_keys = ['initializer_range', 'backward_compatible', 'embedding_size'] for key in unnecessary_keys: if key in config: config.pop(key) # change TF parameter names to match JAX parameter names mapping = { 'attention_dropout_rate': 'attention_probs_dropout_prob', 'hidden_activation': 'hidden_act', 'dropout_rate': 'hidden_dropout_prob', 'emb_dim': 'hidden_size', 'mlp_dim': 'intermediate_size', 'max_len': 'max_position_embeddings', 'num_heads': 'num_attention_heads', 'num_layers': 'num_hidden_layers' } for jax_key, tf_key in mapping.items(): config[jax_key] = config.pop(tf_key) return config
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_bert_config(config):\n if config.model_size == \"large\":\n args = {\"hidden_size\": 1024, \"num_hidden_layers\": 24}\n elif config.model_size == \"base\":\n args = {\"hidden_size\": 768, \"num_hidden_layers\": 12}\n elif config.model_size == \"small\":\n args = {\"hidden_size\": 256, \"num_hid...
[ "0.61332077", "0.6126715", "0.5987451", "0.5933029", "0.57321113", "0.57210857", "0.57176137", "0.568357", "0.56809187", "0.56193393", "0.55681133", "0.5533706", "0.5505984", "0.54603016", "0.54229367", "0.5420582", "0.5410694", "0.54047483", "0.53842753", "0.535301", "0.5317...
0.7478179
0
Modify TF mlperf model parameter dict to be compatible with JAX parameter dict. Convert parameter names in tf_params to match JAX parameter names and create a nested dictionary of parameters for each layer in the model using `/` in each key as a delimeter. This function uses mlperf model naming convention. Use convert_tf_param_dict_to_jax when using kerasBERT model configuration.
def convert_mlperf_param_dict_to_jax(tf_params, emb_dim, num_heads): jax_params = {} # mapping between mlperf model and JAX model # works for model in //third_party/tensorflow_models/mlperf/models/rough/bert tf_key_to_jax_key = [ ('cls/seq_relationship/', 'classification/predictions_transform_logits/'), ('output_weights', 'kernel'), ('transform_logits/output_bias', 'transform_logits/bias'), ('cls/predictions/', 'masked_lm/cls_predictions_'), ('transform/dense', 'transform_dense'), ('transform/LayerNorm', 'transform_layernorm'), ('predictions_output_bias', 'predictions_output_bias/bias'), ('bert/embeddings/word_embeddings', 'word_embeddings/embedding'), ('bert/', 'transformer_encoder/'), ('embeddings/token_type_embeddings', 'type_embeddings/embedding'), ('embeddings/position_embeddings', 'position_embeddings/embedding'), ('attention/self', 'self_attention'), ('attention/output', 'self_attention_output'), ('layer_norm/layer_norm_', 'layer_norm/'), ('output/LayerNorm', 'output_layer_norm'), ('intermediate/dense', 'intermediate'), ('output/dense', 'output'), ('pooler/dense/', 'pooler_transform/'), ('self_attention_output_layer_norm', 'self_attention_layer_norm'), ('embeddings/LayerNorm', 'embeddings_layer_norm'), ('encoder/layer', 'encoder_layer'), (':0', ''), ('beta', 'bias'), ('gamma', 'scale') ] for tf_key, val in tf_params.items(): jax_key = tf_key for tf_name, jax_name in tf_key_to_jax_key: jax_key = jax_key.replace(tf_name, jax_name) # Reshape kernels if necessary jax_params[jax_key] = tf_params[tf_key] if 'self_attention_output/kernel' in jax_key: param = tf_params[tf_key] jax_params[jax_key] = param.reshape( (num_heads, -1, emb_dim)) # jax position embedding kernel has additional dimension pos_embedding = jax_params[ 'transformer_encoder/position_embeddings/embedding'] jax_params[ 'transformer_encoder/position_embeddings/embedding'] = pos_embedding[ np.newaxis, ...] # convert flat param dict into nested dict using `/` as delimeter outer_dict = {} for key, val in jax_params.items(): tokens = key.split('/') inner_dict = outer_dict # each token except the very last should add a layer to the nested dict for token in tokens[:-1]: if token not in inner_dict: inner_dict[token] = {} inner_dict = inner_dict[token] inner_dict[tokens[-1]] = val return outer_dict
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def convert_tf_param_dict_to_jax(tf_params):\n jax_params = {}\n tf_key_to_jax_key = [\n ('embeddings/layer_norm', 'embeddings_layer_norm'),\n ('transformer/layer', 'encoder_layer'), ('embeddings:0', 'embedding'),\n (':0', ''), ('beta', 'bias'), ('gamma', 'scale'),\n ('position_embedding/', '...
[ "0.7303152", "0.59408104", "0.544858", "0.5378973", "0.531307", "0.5166872", "0.50446033", "0.5018701", "0.49990663", "0.4953636", "0.49412417", "0.49221116", "0.49118844", "0.49070784", "0.49015772", "0.48552454", "0.48360878", "0.48341933", "0.48306793", "0.48175794", "0.48...
0.80498666
0
Modify TF parameter dict to be compatible with JAX parameter dict. Convert parameter names in tf_params to match JAX parameter names and create a nested dictionary of parameters for each layer in the model using `/` in each key as a delimeter.
def convert_tf_param_dict_to_jax(tf_params): jax_params = {} tf_key_to_jax_key = [ ('embeddings/layer_norm', 'embeddings_layer_norm'), ('transformer/layer', 'encoder_layer'), ('embeddings:0', 'embedding'), (':0', ''), ('beta', 'bias'), ('gamma', 'scale'), ('position_embedding/', 'position_embeddings/') ] for tf_key in tf_params: jax_key = tf_key for tf_name, jax_name in tf_key_to_jax_key: jax_key = jax_key.replace(tf_name, jax_name) jax_params[jax_key] = tf_params[tf_key] # jax position embedding kernel has additional dimension pos_embedding = jax_params['position_embeddings/embedding'] jax_params['position_embeddings/embedding'] = pos_embedding[np.newaxis, ...] # convert flat param dict into nested dict using `/` as delimeter outer_dict = {} for key, val in jax_params.items(): tokens = key.split('/') inner_dict = outer_dict # each token except the very last should add a layer to the nested dict for token in tokens[:-1]: if token not in inner_dict: inner_dict[token] = {} inner_dict = inner_dict[token] inner_dict[tokens[-1]] = val # this layer doesn't have parameters, but key is required to be present outer_dict['self_attention_mask'] = 0. return outer_dict
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def convert_mlperf_param_dict_to_jax(tf_params, emb_dim, num_heads):\n jax_params = {}\n # mapping between mlperf model and JAX model\n # works for model in //third_party/tensorflow_models/mlperf/models/rough/bert\n tf_key_to_jax_key = [\n ('cls/seq_relationship/', 'classification/predictions_transform_lo...
[ "0.6692091", "0.574194", "0.57411915", "0.55651945", "0.5472306", "0.54638326", "0.53691083", "0.53339547", "0.5273296", "0.5266139", "0.5258614", "0.5244856", "0.52444565", "0.5239768", "0.5239768", "0.52044886", "0.5198123", "0.5191683", "0.51802343", "0.51680446", "0.51627...
0.7722715
0
>>> get_comparison_value('AS KS QS JS TS'.split())[1] 'Royal Flush' >>> get_comparison_value('KS QS JS TS 9S'.split())[1] 'Straight Flush' >>> get_comparison_value('8S 8C 8D 8H 3S'.split())[1] 'Four of a Kind' >>> get_comparison_value('8S 8C 8D 3H 3S'.split())[1] 'Full House' >>> get_comparison_value('2S QS JS TS 9S'.split())[1] 'Flush' >>> get_comparison_value('KD QS JS TS 9S'.split())[1] 'Straight' >>> get_comparison_value('8S 8C 8D 4H 3S'.split())[1] 'Three of a Kind' >>> get_comparison_value('8S 8C 4D 4H 3S'.split())[1] 'Two Pairs' >>> get_comparison_value('8S 8C 5D 4H 3S'.split())[1] 'One Pair' >>> get_comparison_value('8S 7C 5D 4H 3S'.split())[1] 'High Card'
def get_comparison_value(hand): suits = set(get_suit(card) for card in hand) values = set(get_value(card) for card in hand) is_flush = len(suits) == 1 is_straight = (len(values) == 5 and min(values) + 4 == max(values)) kinds = get_kinds(hand) kind_counts = [k.count for k in kinds] if is_flush and values == {10, 11, 12, 13, 14}: result = (100, 'Royal Flush') elif is_flush and is_straight: result = (90, 'Straight Flush') elif kind_counts == [4, 1]: result = (80, 'Four of a Kind') elif kind_counts == [3, 2]: result = (70, 'Full House') elif is_flush: result = (60, 'Flush') elif is_straight: result = (50, 'Straight') elif kind_counts == [3, 1, 1]: result = (40, 'Three of a Kind') elif kind_counts == [2, 2, 1]: result = (30, 'Two Pairs') elif kind_counts == [2, 1, 1, 1]: result = (20, 'One Pair', kinds[0].value) else: assert kind_counts == [1]*5 result = (10, 'High Card') return result + (max(values),)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def comparison(self) -> str:\n return self._values.get('comparison')", "def comparison(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"comparison\")", "def testWinkler(self): # - - - - - - - - - - - - - - - - - - - - - - - - - -\n\n for pair in self.string_pairs:\n\n a...
[ "0.578843", "0.5683622", "0.5322386", "0.5317316", "0.5244176", "0.5244176", "0.52301955", "0.51247126", "0.5097643", "0.50959706", "0.5091257", "0.5083387", "0.50707597", "0.5040682", "0.5037145", "0.5023796", "0.5014674", "0.50132555", "0.50123906", "0.50123906", "0.4993311...
0.6191718
0
Compute the transformation matrix from Galactic spherical to Magellanic Stream coordinates.
def galactic_to_MS(): return MS_MATRIX
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def MS_to_galactic():\n return matrix_transpose(MS_MATRIX)", "def get_transformation_matrix(theta=45):\n\n theta = theta/360 * 2 * np.pi # in radians\n hx = np.cos(theta)\n sy = np.sin(theta)\n\n S = np.array([[1, hx, 0],\n [0, sy, 0],\n [0, 0, 1]])\n #S_inv = ...
[ "0.6793291", "0.6397168", "0.6347205", "0.6225888", "0.6009966", "0.5733559", "0.5673262", "0.56630766", "0.56511235", "0.5618374", "0.5604634", "0.5580951", "0.5580951", "0.5563582", "0.55473304", "0.5542603", "0.55239236", "0.547165", "0.546832", "0.5466667", "0.54391974", ...
0.6850105
0
Compute the transformation matrix from Magellanic Stream coordinates to spherical Galactic.
def MS_to_galactic(): return matrix_transpose(MS_MATRIX)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def galactic_to_MS():\n return MS_MATRIX", "def get_transformation_matrix(theta=45):\n\n theta = theta/360 * 2 * np.pi # in radians\n hx = np.cos(theta)\n sy = np.sin(theta)\n\n S = np.array([[1, hx, 0],\n [0, sy, 0],\n [0, 0, 1]])\n #S_inv = np.linalg.inv(S)\n...
[ "0.6758652", "0.6351473", "0.6289319", "0.6249192", "0.5900926", "0.58043545", "0.5739055", "0.55864763", "0.558487", "0.55819327", "0.55792", "0.55535346", "0.5547185", "0.5533851", "0.55322266", "0.55322266", "0.55189437", "0.551152", "0.5510809", "0.54767853", "0.5444235",...
0.67547804
1
Add the ``request.raven`` method and configure the `ravenjs` panel.
def includeme(config, get_raven=None, panel=None): # Compose. if get_raven is None: #pragma: no cover get_raven = get_raven_client if panel is None: #pragma: no cover panel = raven_js_panel # Unpack. settings = config.registry.settings # Provide the client as ``request.raven``. config.add_request_method(get_raven, 'raven', reify=True) # Configure the ``raven-js`` panel. if hasattr(config, "add_panel"): # Soft detect if we have pyramid_layout installed default_tmpl = 'pyramid_raven:templates/panel.mako' panel_tmpl = settings.get('pyramid_raven.panel_tmpl', default_tmpl) config.add_panel(panel, 'raven-js', renderer=panel_tmpl)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def includeme(config):\n config.add_subscriber(add_renderer_globals, BeforeRender)\n config.add_subscriber(add_localizer, NewRequest)\n config.add_subscriber(add_csrf_validation, NewRequest)\n config.add_subscriber(add_resources, NewRequest)", "def enable(self):\n LOGGER.info('Enabling WebAPI ...
[ "0.59271264", "0.52645963", "0.5064856", "0.5035225", "0.5023455", "0.50157934", "0.5003367", "0.49658138", "0.49607152", "0.49281287", "0.48860234", "0.4869646", "0.48251504", "0.48205826", "0.48137787", "0.47873187", "0.47489318", "0.46791732", "0.46751204", "0.46577823", "...
0.7125961
0
This is the method to be inherited for adding policies
def _get_policies(self, cr, uid, context=None): return [('optional', _('Optional')), ('always', _('Always')), ('never', _('Never'))]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def initialize_policies(self, policy_collection, options):", "def add_policy(self, policy_name, policy_text): \n self.policies.add(policy_name, policy_text)\n self.policies = set()", "def update_policy(self):\n pass", "def policies(self, policies):\n\n self._policies = policies", ...
[ "0.72758013", "0.72136664", "0.7203031", "0.71865726", "0.704924", "0.7013781", "0.6969776", "0.67410517", "0.66044307", "0.65886885", "0.64111924", "0.64080495", "0.6374745", "0.6372226", "0.6354436", "0.63291216", "0.6327667", "0.6274823", "0.6273386", "0.62506497", "0.6214...
0.6071868
23
Extension point to obtain analytic policy for an account
def _get_asset_category_policy(self, cr, uid, account, context=None): return account.user_type.asset_policy
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_policy(self, *args, **kwargs):\r\n pass", "def get_policy(usage_id):\r\n return policy.get(policy_key(usage_id), {})", "def get_account_settings():\n pass", "def policy(agent):", "def get_policies():\r\n policy = policies.values()\r\n return policy", "def PolicyStat...
[ "0.62874985", "0.592912", "0.592754", "0.5838627", "0.5768842", "0.5702864", "0.56812865", "0.5668994", "0.56547016", "0.5649048", "0.5649048", "0.5625525", "0.55135316", "0.5442621", "0.5390839", "0.53589445", "0.5355903", "0.5345013", "0.5333619", "0.5252388", "0.5235518", ...
0.47461304
80
Preprocess the inputs to a list.
def _inputs_to_list(self, inputs: InputsType) -> list: if isinstance(inputs, str): backend = get_file_backend(inputs) if hasattr(backend, 'isdir') and isdir(inputs): # Backends like HttpsBackend do not implement `isdir`, so only # those backends that implement `isdir` could accept the inputs # as a directory filename_list = list_dir_or_file(inputs, list_dir=False) inputs = [ join_path(inputs, filename) for filename in filename_list ] if not isinstance(inputs, (list, tuple)): inputs = [inputs] return list(inputs)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def preprocess(self, inputs, is_list_of_str=False):\n return self.vocab.transform(inputs, is_list_of_str)", "def controlled_list(input_list):\n output_list = input_list\n\n if not isinstance(input_list, list):\n\n dummy_list = []\n dummy_list.append(input_list)\n output_list = d...
[ "0.7020911", "0.6580381", "0.63203806", "0.62907684", "0.62907684", "0.62907684", "0.6103207", "0.6083461", "0.6045474", "0.6014713", "0.60038537", "0.59911287", "0.59911287", "0.59911287", "0.5957472", "0.5921157", "0.59046984", "0.58629376", "0.5854231", "0.5832996", "0.582...
0.60973823
7
Process the inputs into a modelfeedable format. Customize your preprocess by overriding this method. Preprocess should return an iterable object, of which each item will be used as the input of ``model.test_step``. ``BaseInferencer.preprocess`` will return an iterable chunked data,
def preprocess(self, inputs: InputsType, batch_size: int = 1, **kwargs): chunked_data = self._get_chunk_data( map(self.pipeline, inputs), batch_size) yield from map(self.collate_fn, chunked_data)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def preprocess_data(self):\n\n self._preprocess_train_data()\n self._preprocess_test_data()", "def handle(self, data, context):\n \n model_input = self.preprocess(data)\n model_out = self.inference(model_input)\n return self.postprocess(model_out)", "def inference(self...
[ "0.6673395", "0.6537345", "0.6424362", "0.63573146", "0.6273335", "0.6219987", "0.6157909", "0.6074433", "0.60611945", "0.60131466", "0.5940389", "0.59368104", "0.59285253", "0.59147906", "0.591146", "0.5900656", "0.5848106", "0.5789562", "0.5781073", "0.5774665", "0.57714385...
0.64281446
2
Feed the inputs to the model.
def forward(self, inputs: Union[dict, tuple], **kwargs) -> Any: return self.model.test_step(inputs)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def feed_inputs(self):\n feed_dict = collections.defaultdict(list)\n for i in range(self._pipe.batch_size):\n data = self.example_to_data(self._buffer.get())\n for k, v in data.items():\n feed_dict[k].append(v)\n for k, v in self.features.items():\n ...
[ "0.72261435", "0.71358067", "0.7098541", "0.6972215", "0.694485", "0.67344826", "0.67055213", "0.6651103", "0.6630101", "0.6612315", "0.6608851", "0.6608851", "0.6608851", "0.6608851", "0.65922177", "0.65685606", "0.651799", "0.6496179", "0.6496179", "0.6453505", "0.6434099",...
0.6084333
39
Visualize predictions. Customize your visualization by overriding this method. visualize should return visualization results, which could be np.ndarray or any other objects.
def visualize(self, inputs: list, preds: Any, show: bool = False, **kwargs) -> List[np.ndarray]:
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def draw_predictions(self):\n self.vis.draw_predictions()", "def visualizePredictions(testData,knn_predictions):\r\n testData.visualize.scatterPlot('Petal length','Petal width')\r\n testData.dataDict[testData.reference] = knn_predictions\r\n testData.visualize.scatterPlot('Petal length','Petal wi...
[ "0.8056009", "0.70886505", "0.70653015", "0.6991291", "0.69617945", "0.692556", "0.68410033", "0.68099743", "0.67139244", "0.6713704", "0.6668432", "0.6661632", "0.66547585", "0.6653662", "0.66434425", "0.66227466", "0.6610092", "0.65519696", "0.6549795", "0.6493545", "0.6469...
0.6486966
20
Process the predictions and visualization results from ``forward`` and ``visualize``.
def postprocess( self, preds: Any, visualization: List[np.ndarray], return_datasample=False, **kwargs, ) -> dict:
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def walk_forward_prediction(self):\r\n for output_name in self.output_names:\r\n print('\\t\\t\\t|--Prediction type: {}'.format(output_name))\r\n predictions_by_model = {}\r\n pred_metadata_by_model = {}\r\n \r\n print('\\t\\t\\t\\t|--SVM Model')\r\n ...
[ "0.67429686", "0.6646426", "0.6335832", "0.6304872", "0.6192201", "0.61361736", "0.6050557", "0.60094845", "0.59930474", "0.59825176", "0.59754235", "0.5935713", "0.59297657", "0.59130555", "0.5906629", "0.5894044", "0.5871319", "0.5870498", "0.58653355", "0.5862726", "0.5836...
0.55523545
70
Load config and weights from metafile.
def _load_model_from_metafile(self, model: str) -> Tuple[Config, str]: model = model.lower() assert self.scope is not None, ( 'scope should be initialized if you want ' 'to load config from metafile.') assert self.scope in MODULE2PACKAGE, ( f'{self.scope} not in {MODULE2PACKAGE}!,' 'please pass a valid scope.') repo_or_mim_dir = BaseInferencer._get_repo_or_mim_dir(self.scope) for model_cfg in BaseInferencer._get_models_from_metafile( repo_or_mim_dir): model_name = model_cfg['Name'].lower() model_aliases = model_cfg.get('Alias', []) if isinstance(model_aliases, str): model_aliases = [model_aliases.lower()] else: model_aliases = [alias.lower() for alias in model_aliases] if (model_name == model or model in model_aliases): cfg = Config.fromfile( osp.join(repo_or_mim_dir, model_cfg['Config'])) weights = model_cfg['Weights'] weights = weights[0] if isinstance(weights, list) else weights return cfg, weights raise ValueError(f'Cannot find model: {model} in {self.scope}')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_meta(self, meta_file):\n self._w2i = pickle.load(open(meta_file, \"rb\"))", "def _load_from_file(self):\n try:\n self.logger.debug('Load metafile %s.', self.meta_file_path)\n with codecs.open(self.meta_file_path, 'r', 'utf-8') as meta_file:\n self._meta...
[ "0.66893715", "0.611689", "0.61168337", "0.59983677", "0.59396636", "0.59336466", "0.5892341", "0.58317614", "0.5799049", "0.5787442", "0.578168", "0.5773452", "0.5759801", "0.57340556", "0.5724864", "0.5721602", "0.5648196", "0.5646341", "0.56409925", "0.56261986", "0.562360...
0.5922421
6
Get the directory where the ``Configs`` located when the package is installed or ``PYTHONPATH`` is set.
def _get_repo_or_mim_dir(scope): try: module = importlib.import_module(scope) except ImportError: if scope not in MODULE2PACKAGE: raise KeyError( f'{scope} is not a valid scope. The available scopes ' f'are {MODULE2PACKAGE.keys()}') else: project = MODULE2PACKAGE[scope] raise ImportError( f'Cannot import {scope} correctly, please try to install ' f'the {project} by "pip install {project}"') # Since none of OpenMMLab series packages are namespace packages # (https://docs.python.org/3/glossary.html#term-namespace-package), # The first element of module.__path__ means package installation path. package_path = module.__path__[0] if osp.exists(osp.join(osp.dirname(package_path), 'configs')): repo_dir = osp.dirname(package_path) return repo_dir else: mim_dir = osp.join(package_path, '.mim') if not osp.exists(osp.join(mim_dir, 'configs')): raise FileNotFoundError( f'Cannot find `configs` directory in {package_path}!, ' f'please check the completeness of the {scope}.') return mim_dir
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_config_dir():\n return Path(environ.get(CONFIG_DIR_ENV_VAR, _default_dir))", "def get_package_dir():\n return Path(__file__).parent", "def GetPackageDirectory():\n return os.path.dirname(__file__)", "def configPath(self):\n return os.path.dirname(__file__)", "def getConfigPath():\n\...
[ "0.75922424", "0.74677104", "0.739525", "0.73235977", "0.7267069", "0.7208713", "0.7202483", "0.71300155", "0.7091457", "0.70678365", "0.70390385", "0.70278746", "0.7002419", "0.6939253", "0.69215524", "0.69165456", "0.6855893", "0.6833579", "0.68331736", "0.6795784", "0.6786...
0.60847396
84
Initialize the model with the given config and checkpoint on the specific device.
def _init_model( self, cfg: ConfigType, weights: Optional[str], device: str = 'cpu', ) -> nn.Module: checkpoint: Optional[dict] = None if weights is not None: checkpoint = _load_checkpoint(weights, map_location='cpu') if not cfg: assert checkpoint is not None try: # Prefer to get config from `message_hub` since `message_hub` # is a more stable module to store all runtime information. # However, the early version of MMEngine will not save config # in `message_hub`, so we will try to load config from `meta`. cfg_string = checkpoint['message_hub']['runtime_info']['cfg'] except KeyError: assert 'meta' in checkpoint, ( 'If model(config) is not provided, the checkpoint must' 'contain the config string in `meta` or `message_hub`, ' 'but both `meta` and `message_hub` are not found in the ' 'checkpoint.') meta = checkpoint['meta'] if 'cfg' in meta: cfg_string = meta['cfg'] else: raise ValueError( 'Cannot find the config in the checkpoint.') cfg.update( Config.fromstring(cfg_string, file_format='.py')._cfg_dict) # Delete the `pretrained` field to prevent model from loading the # the pretrained weights unnecessarily. if cfg.model.get('pretrained') is not None: del cfg.model.pretrained model = MODELS.build(cfg.model) model.cfg = cfg self._load_weights_to_model(model, checkpoint, cfg) model.to(device) model.eval() return model
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def init_model(config: Union[str, Path, Config],\n checkpoint: Optional[str] = None,\n device: str = 'cuda:0',\n cfg_options: Optional[dict] = None):\n if isinstance(config, (str, Path)):\n config = Config.fromfile(config)\n elif not isinstance(config, Config)...
[ "0.77449846", "0.7433742", "0.73220265", "0.72701585", "0.7264649", "0.6990384", "0.6840998", "0.6713714", "0.65702015", "0.65702015", "0.6462767", "0.64526856", "0.6444339", "0.6407412", "0.63909423", "0.6390203", "0.63637805", "0.6350734", "0.62798935", "0.62494826", "0.623...
0.70993656
5
Loading model weights and meta information from cfg and checkpoint. Subclasses could override this method to load extra meta information from ``checkpoint`` and ``cfg`` to model.
def _load_weights_to_model(self, model: nn.Module, checkpoint: Optional[dict], cfg: Optional[ConfigType]) -> None: if checkpoint is not None: _load_checkpoint_to_model(model, checkpoint) else: warnings.warn('Checkpoint is not loaded, and the inference ' 'result is calculated by the randomly initialized ' 'model!')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_model(self, ckpt_name=\"best_model.pth\"):\n path = \"/\".join(ckpt_name.split(\"/\")[:-1])\n chkpt = torch.load(ckpt_name)\n self.start_epoch = chkpt['epoch']\n self.best_metric = chkpt['best_metric']\n\n # fix the DataParallel caused problem with keys names\n if...
[ "0.6934471", "0.69131255", "0.69076866", "0.67372805", "0.6686757", "0.6633907", "0.66300243", "0.6628675", "0.6621558", "0.65934366", "0.6577404", "0.6577404", "0.6514096", "0.6467032", "0.64553356", "0.64483273", "0.64396846", "0.6426327", "0.6404203", "0.6404203", "0.63859...
0.7453869
0
Initialize the ``collate_fn`` with the given config. The returned ``collate_fn`` will be used to collate the batch data.
def _init_collate(self, cfg: ConfigType) -> Callable: try: with FUNCTIONS.switch_scope_and_registry(self.scope) as registry: collate_fn = registry.get(cfg.test_dataloader.collate_fn) except AttributeError: collate_fn = pseudo_collate return collate_fn # type: ignore
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def collate_fn(self, *args):\n return TupleMiniBatch(default_collate(*args))", "def build_collate_fn(\n cls, args: argparse.Namespace, train: bool\n ) -> Callable[[Sequence[Dict[str, np.ndarray]]], Dict[str, torch.Tensor]]:\n raise NotImplementedError", "def collate_fn(batch):\n\n fl...
[ "0.6244159", "0.599894", "0.5865807", "0.5771731", "0.56962913", "0.5471204", "0.5424939", "0.5283143", "0.52531415", "0.52518207", "0.5231545", "0.51880026", "0.5077874", "0.5077874", "0.49971217", "0.49632436", "0.49623293", "0.4961835", "0.4936827", "0.49166656", "0.489373...
0.7657155
0
Initialize the test pipeline. Return a pipeline to handle various input data, such as ``str``, ``np.ndarray``. It is an abstract method in BaseInferencer, and should be implemented in subclasses. The returned pipeline will be used to process a single data.
def _init_pipeline(self, cfg: ConfigType) -> Callable:
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, pipeline, **kwargs):\n self.pipeline = pipeline\n\n self.batch_size = pipeline.batch_size\n self.X_train = None\n self.y_train = None\n self.X_valid = None\n self.y_valid = None\n\n # Child classes should have init signature:\n # (self, bat...
[ "0.6728112", "0.66963494", "0.64707565", "0.63527936", "0.63097256", "0.62483305", "0.62239105", "0.62218165", "0.62119925", "0.61996466", "0.61975336", "0.61403424", "0.61403066", "0.60585195", "0.60382646", "0.60304", "0.6011767", "0.60076314", "0.6005523", "0.5986289", "0....
0.59325844
21
Get batch data from dataset.
def _get_chunk_data(self, inputs: Iterable, chunk_size: int): inputs_iter = iter(inputs) while True: try: chunk_data = [] for _ in range(chunk_size): processed_data = next(inputs_iter) chunk_data.append(processed_data) yield chunk_data except StopIteration: if chunk_data: yield chunk_data break
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_batch_data(batch, ctx):\n data, label = batch\n return (mx.gluon.utils.split_and_load(data, ctx),\n mx.gluon.utils.split_and_load(label, ctx),\n data.shape[0])", "def get_batch(self, idxs):\r\n return self.data[(self.start + idxs) % self.maxlen]", "def _get_batch_dat...
[ "0.7814823", "0.73819196", "0.7299499", "0.71846867", "0.71441984", "0.7113388", "0.7087958", "0.704405", "0.6958586", "0.6905176", "0.6773217", "0.67312944", "0.6721615", "0.67039114", "0.6651075", "0.66494566", "0.66444975", "0.66299015", "0.6618811", "0.66071945", "0.65729...
0.0
-1
Dispatch kwargs to preprocess(), forward(), visualize() and postprocess() according to the actual demands.
def _dispatch_kwargs(self, **kwargs) -> Tuple[Dict, Dict, Dict, Dict]: # Ensure each argument only matches one function method_kwargs = self.preprocess_kwargs | self.forward_kwargs | \ self.visualize_kwargs | self.postprocess_kwargs union_kwargs = method_kwargs | set(kwargs.keys()) if union_kwargs != method_kwargs: unknown_kwargs = union_kwargs - method_kwargs raise ValueError( f'unknown argument {unknown_kwargs} for `preprocess`, ' '`forward`, `visualize` and `postprocess`') preprocess_kwargs = {} forward_kwargs = {} visualize_kwargs = {} postprocess_kwargs = {} for key, value in kwargs.items(): if key in self.preprocess_kwargs: preprocess_kwargs[key] = value elif key in self.forward_kwargs: forward_kwargs[key] = value elif key in self.visualize_kwargs: visualize_kwargs[key] = value else: postprocess_kwargs[key] = value return ( preprocess_kwargs, forward_kwargs, visualize_kwargs, postprocess_kwargs, )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def pre_process(cls, *args, **kwargs):\n pass", "def pre_process(cls, *args, **kwargs):\n pass", "def pre_process(cls, *args, **kwargs):\n pass", "def pre_process(cls, *args, **kwargs):\n pass", "def preprocess(self, real, **kwargs):\n self._preprocessed = True", "def p...
[ "0.6335919", "0.6335919", "0.6335919", "0.6335919", "0.6038999", "0.5977718", "0.5867384", "0.5712548", "0.56914", "0.5670015", "0.5632651", "0.5632651", "0.5632651", "0.5632651", "0.56184596", "0.5548038", "0.55419624", "0.55088145", "0.5503097", "0.5503062", "0.54987526", ...
0.5567695
15
Load model config defined in metafile from package path.
def _get_models_from_metafile(dir: str): meta_indexes = load(osp.join(dir, 'model-index.yml')) for meta_path in meta_indexes['Import']: # meta_path example: mmcls/.mim/configs/conformer/metafile.yml meta_path = osp.join(dir, meta_path) metainfo = load(meta_path) yield from metainfo['Models']
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _load_model_from_metafile(self, model: str) -> Tuple[Config, str]:\n model = model.lower()\n\n assert self.scope is not None, (\n 'scope should be initialized if you want '\n 'to load config from metafile.')\n assert self.scope in MODULE2PACKAGE, (\n f'{sel...
[ "0.6878292", "0.6540713", "0.64107656", "0.6372208", "0.63531864", "0.625004", "0.6243981", "0.6207827", "0.6201223", "0.6128199", "0.6121089", "0.6106509", "0.6062811", "0.5985855", "0.5977003", "0.5932948", "0.59321964", "0.58767205", "0.58388513", "0.5798539", "0.5768722",...
0.5608092
35
List models defined in metafile of corresponding packages.
def list_models(scope: Optional[str] = None, patterns: str = r'.*'): matched_models = [] if scope is None: default_scope = DefaultScope.get_current_instance() assert default_scope is not None, ( 'scope should be initialized if you want ' 'to load config from metafile.') assert scope in MODULE2PACKAGE, ( f'{scope} not in {MODULE2PACKAGE}!, please make pass a valid ' 'scope.') root_or_mim_dir = BaseInferencer._get_repo_or_mim_dir(scope) for model_cfg in BaseInferencer._get_models_from_metafile( root_or_mim_dir): model_name = [model_cfg['Name']] model_name.extend(model_cfg.get('Alias', [])) for name in model_name: if re.match(patterns, name) is not None: matched_models.append(name) output_str = '' for name in matched_models: output_str += f'model_name: {name}\n' print_log(output_str, logger='current') return matched_models
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_models_from_metafile(dir: str):\n meta_indexes = load(osp.join(dir, 'model-index.yml'))\n for meta_path in meta_indexes['Import']:\n # meta_path example: mmcls/.mim/configs/conformer/metafile.yml\n meta_path = osp.join(dir, meta_path)\n metainfo = load(meta_p...
[ "0.72860724", "0.6853126", "0.6804148", "0.6785551", "0.6757944", "0.66406256", "0.6595081", "0.65213096", "0.6515239", "0.64433444", "0.6440567", "0.6412613", "0.6402561", "0.63477504", "0.63247734", "0.63229364", "0.6300605", "0.6296517", "0.6291255", "0.6263", "0.62552387"...
0.70225334
1
By this save method we create user and make a relationship with contact model and secoud depend on conatc type.
def save(self,commit=True): instance = super(ClientSignupForm, self).save(commit=False) if commit: instance.username = self.cleaned_data['email'] instance.is_active = False instance.save() contact = Client.objects.create(first_name=self.cleaned_data['first_name'],last_name=self.cleaned_data['last_name'], user_role=self.cleaned_data['user_role'], department=self.cleaned_data['department'], user=instance) return instance
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def save_model(self, request, obj, form, change):\n if not change:\n if form.is_valid():\n user = form.save()\n user.identity = Users.SUPERVISOR\n user.set_password(form.data.get('password'))\n user.iCode = InviteCls.encode_invite_code(u...
[ "0.64130485", "0.6218375", "0.61476517", "0.60348797", "0.6014504", "0.5965818", "0.59625626", "0.5833716", "0.5819595", "0.5818768", "0.5805604", "0.5791148", "0.5760427", "0.575658", "0.5729577", "0.56765777", "0.5676002", "0.5647934", "0.56215864", "0.56215864", "0.5614676...
0.64635587
0
Must preserve data used at construction. Specifically for default averaging/length adjustments. averaging/length adjustments recalculate the underlying data
def _original_data(self, data: np.ndarray): if self._raw_data is None: self._raw_data = data
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _update_avg(self):\n if self._data_type == 'coords':\n # default averaging is supported only for 'matrix' dataTypes\n return\n elif self._data_type == 'image':\n\n x, y = self._averaging, self._averaging\n\n if (x,y) == (1, 1):\n self.vec...
[ "0.60771775", "0.5992512", "0.5975735", "0.5974339", "0.58970016", "0.5867703", "0.58252496", "0.58230865", "0.58149874", "0.57894266", "0.5773829", "0.5726125", "0.5725895", "0.570828", "0.56946415", "0.56483364", "0.56139916", "0.56139916", "0.5607044", "0.5600587", "0.5596...
0.60240406
1
Check on input data for proper shape and dtype
def _convert_to_vector_type(self, vectors): if vectors.shape[-1] == 4 and vectors.ndim == 2: coord_list = self._convert_coords_to_coordinates(vectors) self._data_type = self._data_types[1] elif vectors.shape[-1] == 2 and vectors.ndim == 3: coord_list = self._convert_image_to_coordinates(vectors) self._data_type = self._data_types[0] else: raise TypeError( "Vector data of shape %s is not supported" % str(vectors.shape)) return coord_list
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _check_shape(placeholder_shape, data_shape):\n\n return True", "def check_dims(self, data):\n if np.ndim(data) != 2:\n raise ValueError('Input data must be a two dimensional numpy array. '\n 'Data received has shape (%g, %g).' % data.shape)", "def check_data_shape(self, dat...
[ "0.7713703", "0.76080585", "0.74882436", "0.7184162", "0.71101797", "0.70855606", "0.7055508", "0.7006338", "0.6966901", "0.6945867", "0.6939449", "0.69268495", "0.69028056", "0.68751085", "0.6828708", "0.6807963", "0.67849463", "0.6784447", "0.6716908", "0.67096233", "0.6709...
0.0
-1
To convert an imagelike array with elements (yproj, xproj) into a position list of coordinates Every pixel position (n, m) results in two output coordinates of (N,2)
def _convert_image_to_coordinates(self, vect) -> np.ndarray: xdim = vect.shape[0] ydim = vect.shape[1] # stride is used during averaging and length adjustment stride_x, stride_y = self._averaging, self._averaging # create empty vector of necessary shape # every "pixel" has 2 coordinates pos = np.empty((2 * xdim * ydim, 2), dtype=np.float32) # create coordinate spacing for x-y # double the num of elements by doubling x sampling xspace = np.linspace(0, stride_x*xdim, 2 * xdim, endpoint=False) yspace = np.linspace(0, stride_y*ydim, ydim, endpoint=False) xv, yv = np.meshgrid(xspace, yspace) # assign coordinates (pos) to all pixels pos[:, 0] = xv.flatten() pos[:, 1] = yv.flatten() # pixel midpoints are the first x-values of positions midpt = np.zeros((xdim * ydim, 2), dtype=np.float32) midpt[:, 0] = pos[0::2, 0]+(stride_x-1)/2 midpt[:, 1] = pos[0::2, 1]+(stride_y-1)/2 # rotate coordinates about midpoint to represent angle and length pos[0::2, 0] = midpt[:, 0] - (stride_x / 2) * (self._length/2) * \ vect.reshape((xdim*ydim, 2))[:, 0] pos[0::2, 1] = midpt[:, 1] - (stride_y / 2) * (self._length/2) * \ vect.reshape((xdim*ydim, 2))[:, 1] pos[1::2, 0] = midpt[:, 0] + (stride_x / 2) * (self._length/2) * \ vect.reshape((xdim*ydim, 2))[:, 0] pos[1::2, 1] = midpt[:, 1] + (stride_y / 2) * (self._length/2) * \ vect.reshape((xdim*ydim, 2))[:, 1] return pos
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def proj_coords(coords, proj_in, proj_out): \n return [proj_coord(coord, proj_in, proj_out) for coord in coords]", "def coordinates_to_imgpts(x, y):\n pts = np.array([np.flipud(np.transpose(np.vstack([x, y])))])\n return pts", "def get_pix_coordinates(pts, proj_mat, w, h):\n points_t = np.ones(shap...
[ "0.71438277", "0.68676543", "0.68333745", "0.6638456", "0.66094863", "0.65278304", "0.6524649", "0.6480489", "0.64521027", "0.63857985", "0.63771254", "0.636936", "0.6328122", "0.6313248", "0.62968314", "0.62734854", "0.62452483", "0.6242931", "0.6173009", "0.6140261", "0.613...
0.6165102
19
To convert a list of coordinates of shape (ycenter, xcenter, yproj, xproj) into a list of coordinates Input coordinate of (N,4) becomes two output coordinates of (N,2)
def _convert_coords_to_coordinates(self, vect) -> np.ndarray: # create empty vector of necessary shape # one coordinate for each endpoint of the vector pos = np.empty((2 * len(vect), 2), dtype=np.float32) # create pairs of points pos[0::2, 0] = vect[:, 0] pos[1::2, 0] = vect[:, 0] pos[0::2, 1] = vect[:, 1] pos[1::2, 1] = vect[:, 1] # adjust second of each pair according to x-y projection pos[1::2, 0] += vect[:, 2] pos[1::2, 1] += vect[:, 3] return pos
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def proj_coords(coords, proj_in, proj_out): \n return [proj_coord(coord, proj_in, proj_out) for coord in coords]", "def get_coordinate_lists(self, crs=None):\n x, y = self.vertices.vectors()[:2]\n if crs is not None and (crs != self.crs):\n x, y = _reproject((x,y), self.crs, crs)\n ...
[ "0.7183775", "0.6618891", "0.6581095", "0.652824", "0.64622325", "0.6417996", "0.63773674", "0.6365129", "0.63329947", "0.63210684", "0.6282502", "0.626608", "0.6254573", "0.6216185", "0.61284137", "0.61090726", "0.6102427", "0.6072899", "0.60686827", "0.6067336", "0.6059041"...
0.5870924
44
Calculates an average vector over a kernel
def averaging(self, value: int): self._averaging = value self.events.averaging() self._update_avg() self.refresh()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def avg(u: np.ndarray, v: np.ndarray) -> np.ndarray:\n \n return (u + v) / 2.0", "def kernels_bayes_average(g_y, w):\n return np.dot(g_y, w)", "def get_input_vector(img):\n parts = partition_matrix(blurmap(img), 10)\n return numpy.array([numpy.mean(part) for part in parts],\n ...
[ "0.6827808", "0.65671843", "0.6564396", "0.654793", "0.6526386", "0.63922465", "0.63613415", "0.6356005", "0.6356005", "0.6356005", "0.63499135", "0.63343555", "0.63088894", "0.63088894", "0.62725526", "0.6247251", "0.62032187", "0.6195959", "0.6177069", "0.61551315", "0.6155...
0.0
-1
Method for calculating average Implemented ONLY for imagelike vector data
def _update_avg(self): if self._data_type == 'coords': # default averaging is supported only for 'matrix' dataTypes return elif self._data_type == 'image': x, y = self._averaging, self._averaging if (x,y) == (1, 1): self.vectors = self._original_data # calling original data return tempdat = self._original_data range_x = tempdat.shape[0] range_y = tempdat.shape[1] x_offset = int((x - 1) / 2) y_offset = int((y - 1) / 2) kernel = np.ones(shape=(x, y)) / (x*y) output_mat = np.zeros_like(tempdat) output_mat_x = signal.convolve2d(tempdat[:, :, 0], kernel, mode='same', boundary='wrap') output_mat_y = signal.convolve2d(tempdat[:, :, 1], kernel, mode='same', boundary='wrap') output_mat[:, :, 0] = output_mat_x output_mat[:, :, 1] = output_mat_y self.vectors = (output_mat[x_offset:range_x-x_offset:x, y_offset:range_y-y_offset:y])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def average(data):\n return np.average(data)", "def avg(vector):\n if len(vector) == 0:\n return 0\n return sum(vector) / len(vector)", "def _calc_avg_img(self, data: Union[Sequence[np.ndarray],\n Sequence[Sequence[np.ndarray]]]\n ) ->...
[ "0.7302006", "0.7168423", "0.7156485", "0.71019864", "0.694467", "0.6885104", "0.6851713", "0.6831221", "0.6690518", "0.6667299", "0.65953726", "0.65504265", "0.64968616", "0.6474742", "0.64647216", "0.6450318", "0.6422171", "0.6420864", "0.64044243", "0.63954943", "0.6390917...
0.7117909
3
width of the line in pixels widths greater than 1px only guaranteed to work with "agg" method
def width(self, width: Union[int, float]): self._width = width vertices, triangles = self._generate_meshes(self.vectors, self._width) self._mesh_vertices = vertices self._mesh_triangles = triangles self.events.width() self.refresh()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def line_width(et: pd.DataFrame, lw_by: Hashable):\n if lw_by is not None:\n return encodings.data_linewidth(et[lw_by], et[lw_by])\n return pd.Series([1] * len(et), name=\"lw\")", "def get_line_width(self):\n return self.lwidth", "def line_width(self, width=2.0):\n self._impl.line_wi...
[ "0.67635524", "0.64742523", "0.6465246", "0.639043", "0.639043", "0.61956435", "0.6169401", "0.61666083", "0.6136135", "0.6110745", "0.6110745", "0.6110745", "0.602749", "0.602131", "0.5990465", "0.5978512", "0.59784377", "0.59361607", "0.5847792", "0.5825186", "0.5816536", ...
0.0
-1
Change the length of all lines
def length(self, length: Union[int, float]): self._length = length self._update_length() self.events.length() self.refresh()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def setLength(self, new_length):\n\n self.length = new_length", "def set_max_lines(self, n):\n\t\tself._maxLines = n\n\t\tself._trunc_lines()", "def set_line_width(self, val):\n self.lwidth = val", "def change_tail_length(self, value):\n self.layer.tail_length = value", "def longlines(...
[ "0.67900187", "0.66235274", "0.6572527", "0.64778185", "0.630202", "0.62923074", "0.62599736", "0.62461704", "0.62156516", "0.6177423", "0.6118437", "0.6068556", "0.6058712", "0.60584587", "0.60579914", "0.5999391", "0.5994094", "0.5986776", "0.59459186", "0.5893659", "0.5886...
0.5355139
86
Method for calculating vector lengths Implemented ONLY for imagelike vector data
def _update_length(self): if self._data_type == 'coords': return "length adjustment not allowed for coordinate-style data" elif self._data_type == 'image': self._vectors = self._convert_to_vector_type(self._current_data) vertices, triangles = self._generate_meshes(self.vectors, self.width) self._mesh_vertices = vertices self._mesh_triangles = triangles
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def length_vector(v):\n return sqrt(dot_vectors(v, v))", "def veclength(vec):\n vec = np.array(vec, copy=False).reshape(-1, 3)\n return np.sqrt(np.einsum('ij,ij->i', vec, vec))", "def vec_len(x):\r\n \r\n length = math.sqrt(x[0]**2 + x[1]**2)\r\n return length", "def get_vector_length(vecto...
[ "0.69771564", "0.6968557", "0.69287777", "0.68751884", "0.68681043", "0.6843323", "0.67979896", "0.6776573", "0.6688724", "0.66632444", "0.6648538", "0.6634671", "0.6530027", "0.65078795", "0.64724904", "0.6456348", "0.64492345", "0.6435357", "0.6399516", "0.6285586", "0.6256...
0.5496276
84
Fully refresh the underlying visual.
def _refresh(self): self._need_display_update = True self._update()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def redraw(self):\n self.vispy_viewer.canvas.update()", "def refresh(self):\n\t\tself.win.refresh()\n\t\tfor c in self.components:\n\t\t\tc.refresh()", "def refresh(self):\n self.getWindow().getDecorView().postInvalidate()", "def update_visualization(self) -> None:\n pass", "def redraw...
[ "0.7797752", "0.76777506", "0.7624245", "0.7607786", "0.75885034", "0.74661344", "0.74655384", "0.74655384", "0.7456086", "0.7437477", "0.7407238", "0.7366163", "0.7329085", "0.7314451", "0.722998", "0.7220857", "0.7200671", "0.7197712", "0.7181533", "0.71769416", "0.7154286"...
0.80525124
0
Generates list of mesh vertices and triangles from a list of vectors
def _generate_meshes(self, vectors, width): centers = np.repeat(vectors, 2, axis=0) offsets = segment_normal(vectors[::2, :], vectors[1::2, :]) offsets = np.repeat(offsets, 4, axis=0) signs = np.ones((len(offsets), 2)) signs[::2] = -1 offsets = offsets*signs vertices = centers + width*offsets/2 triangles = np.array([[2*i, 2*i+1, 2*i+2] if i % 2 == 0 else [2*i-1, 2*i, 2*i+1] for i in range(len(vectors))]).astype(np.uint32) return vertices, triangles
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def vertices(tri, vertex_list):\n dim = len(vertex_list[0])\n p = numpy.zeros((3, dim))\n for j in range(3):\n p[j] = vertex_list[tri[j]]\n return p", "def get_vertices(self):\n vertices = []\n V = [[-self.base_vectors[:,n], self.base_vectors[:,n]] for n in range(self.base_vector...
[ "0.70674706", "0.67971975", "0.6688014", "0.6595674", "0.65747494", "0.65282524", "0.6382806", "0.63815814", "0.63469017", "0.623005", "0.62253344", "0.6019658", "0.60096925", "0.60038024", "0.6003564", "0.59902227", "0.59812725", "0.5978787", "0.59492654", "0.59366006", "0.5...
0.6878562
1
Update the underlying visual.
def _update(self): if self._need_display_update: self._need_display_update = False self._set_view_slice(self.viewer.dims.indices) if self._need_visual_update: self._need_visual_update = False self._node.update()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update_visualization(self) -> None:\n pass", "def update(self):\n self.redraw()\n self._changed = False", "def update(self):\n self.redraw()\n self._changed = False", "def update_figure(self):\n\n self.draw()", "def redraw(self):\n self.vispy_viewer.canv...
[ "0.80167747", "0.767551", "0.767551", "0.7650333", "0.7572356", "0.7398728", "0.735658", "0.73552585", "0.7344015", "0.7229653", "0.71321124", "0.7061494", "0.70430195", "0.7035277", "0.70231766", "0.69140226", "0.68243873", "0.6794981", "0.67679983", "0.6736001", "0.6723893"...
0.72630036
9
Sets the view given the indices to slice with.
def _set_view_slice(self, indices): vertices = self._mesh_vertices faces = self._mesh_triangles if len(faces) == 0: self._node.set_data(vertices=None, faces=None) else: self._node.set_data(vertices=vertices[:, ::-1], faces=faces, color=self.color) self._need_visual_update = True self._update()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, indices: Tuple[int, ...], slices: Tuple[slice, ...] = (slice(0, 0, 0),)):\n self.indices = indices\n self.slices = slices", "def _set_neighs_slice(self, key):\n ## Condition to use slice type\n self._constant_neighs = True\n self.ks = range(1) if self.ks is N...
[ "0.62833464", "0.61686224", "0.6001683", "0.58963263", "0.5595757", "0.5592054", "0.5561091", "0.55347645", "0.5515838", "0.55009526", "0.5443674", "0.5426867", "0.5424508", "0.53574157", "0.52848315", "0.52843475", "0.5270382", "0.5265261", "0.52636987", "0.5232416", "0.5228...
0.77993363
0
Utilities for spatio temporal analysis zed.uchicago.edu Utilities for spatio temporal analysis zed.uchicago.edu Given location tile boundaries and type category filter, creates the corresponding timeseries as a pandas DataFrame
def getTS(self,_types=None,tile=None): assert(self._END is not None) TS_NAME = ('#'.join(str(x) for x in tile))+"#"+stringify(_types) lat_ = tile[0:2] lon_ = tile[2:4] if self._value_limits is None: df = self._logdf[self._columns]\ .loc[self._logdf[self._EVENT].isin(_types)]\ .sort_values(by=self._DATE).dropna() else: df = self._logdf[self._columns]\ .loc[self._logdf[self._EVENT]\ .between(self._value_limits[0], self._value_limits[1])]\ .sort_values(by=self._DATE).dropna() df = df.loc[(df[self._coord1] > lat_[0]) & (df[self._coord1] <= lat_[1]) & (df[self._coord2] > lon_[0]) & (df[self._coord2] <= lon_[1])] df.index = df[self._DATE] df=df[[self._EVENT]] ts = [df.loc[self._trng[i]:self._trng[i + 1]].size for i in np.arange(self._trng.size - 1)] return pd.DataFrame(ts, columns=[TS_NAME], index=self._trng[:-1]).transpose()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def filter(df: pd.DataFrame = pd.DataFrame()):\n if df.empty:\n df = read()\n print('Filtering data...')\n df = df.dropna()\n df2 = pd.DataFrame()\n df2['Longitude'] = df['Longitude']\n df2['Latitude'] = df['Latitude']\n df2['Month'] = df['Date'].dt.strftime('%m').astype(int)\n df2['...
[ "0.6118439", "0.579679", "0.5540201", "0.55036247", "0.5491236", "0.5480218", "0.5434643", "0.54258335", "0.54027617", "0.5400543", "0.53993887", "0.539527", "0.5395043", "0.53789306", "0.53724855", "0.5362963", "0.5352141", "0.53505415", "0.5327444", "0.5325912", "0.53214043...
0.5729195
2
Utilities for spatio temporal analysis zed.uchicago.edu Creates DataFrame of location tiles and their respective timeseries from input datasource with significance threshold THRESHOLD latitude, longitude coordinate boundaries given by LAT, LON calls on getTS for individual tile then concats them together
def timeseries(self,LAT,LON,EPS,_types,CSVfile='TS.csv',THRESHOLD=None): if THRESHOLD is None: if self._THRESHOLD is None: THRESHOLD=0.1 else: THRESHOLD=self._THRESHOLD if self._trng is None: self._trng = pd.date_range(start=self._INIT, end=self._END,freq=self._FREQ) _TS = pd.concat([self.getTS(tile=[i, i + EPS, j, j + EPS], _types=_types) for i in tqdm(LAT) for j in tqdm(LON)]) LEN=pd.date_range(start=self._INIT, end=self._END,freq=self._FREQ).size+0.0 statbool = _TS.astype(bool).sum(axis=1) / LEN _TS = _TS.loc[statbool > THRESHOLD] self._ts_dict[repr(_types)] = _TS if CSVfile is not None: _TS.to_csv(CSVfile, sep=' ') return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def outliers_solid_tsds():\n timestamp1 = np.arange(np.datetime64(\"2021-01-01\"), np.datetime64(\"2021-02-10\"))\n target1 = [np.sin(i) for i in range(len(timestamp1))]\n target1[10] += 10\n\n timestamp2 = np.arange(np.datetime64(\"2021-01-01\"), np.datetime64(\"2021-02-10\"))\n target2 = [np.sin(i...
[ "0.63389176", "0.61668277", "0.60331327", "0.59823436", "0.58627486", "0.5774627", "0.5769538", "0.575066", "0.574341", "0.5693412", "0.5659913", "0.5631676", "0.5619256", "0.5589875", "0.5581725", "0.5581672", "0.55765", "0.55755305", "0.5565926", "0.5553959", "0.5542568", ...
0.5494842
26
Utilities for spatio temporal analysis zed.uchicago.edu Fit dataproc with specified grid parameters and create timeseries for date boundaries specified by INIT, THRESHOLD, and END which do not have to match the arguments first input to the dataproc
def fit(self,grid=None,INIT=None,END=None,THRESHOLD=None,csvPREF='TS'): if INIT is not None: self._INIT=INIT if END is not None: self._END=END if grid is not None: self._grid=grid assert(self._END is not None) assert(self._coord1 in self._grid) assert(self._coord2 in self._grid) assert('Eps' in self._grid) if self._types is not None: for key in self._types: self.timeseries(self._grid[self._coord1], self._grid[self._coord2], self._grid['Eps'], key, CSVfile=csvPREF+stringify(key)+'.csv', THRESHOLD=THRESHOLD) return else: assert(self._value_limits is not None) self.timeseries(self._grid[self._coord1], self._grid[self._coord2], self._grid['Eps'], None, CSVfile=csvPREF+'.csv', THRESHOLD=THRESHOLD) return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def fit_timeseries(xdates, ydata):\n\n pass", "def fit_surface(tdata,\n scale_params=[40.0, 1.25]):\n agg_tdata = aggregate_data(tdata)\n\n # unpack data\n mt = agg_tdata.mean_temp\n dt = agg_tdata.daily_temp\n scaled_dt = utils.scale_daily_temp(mt, dt, scale_params)\n\n obs_m...
[ "0.6216409", "0.61393374", "0.60502726", "0.59144783", "0.5902388", "0.56675595", "0.56113106", "0.55587673", "0.55573833", "0.5554575", "0.55389374", "0.55208224", "0.55186224", "0.54923666", "0.5484279", "0.5448603", "0.5447749", "0.5442355", "0.5438024", "0.5410568", "0.54...
0.6764931
0
Utilities for spatio temporal analysis zed.uchicago.edu Pulls new entries from datasource
def pull(self, domain="data.cityofchicago.org",dataset_id="crimes",\ token="ZIgqoPrBu0rsvhRr7WfjyPOzW",store=True, out_fname="pull_df.p", pull_all=False): pdb.set_trace() client = Socrata(domain, token) if domain == "data.cityofchicago.org" and dataset_id=="crimes": self._coord1 = "latitude" self._coord2 = "longitude" self._EVENT = "primary_type" if pull_all: new_data = client.get(dataset_id) # pull_df = pd.DataFrame(new_data).dropna(\ # subset=[self._coord1, self._coord2, self._DATE, self._EVENT],\ # axis=1).sort_values(self._DATE) # NOTE: running into columns encoded in unicode not accepting subset # specific filtering of NaN's by column error # columns defined in subset aren't columns in the pulled DataFrame pull_df = pd.DataFrame(new_data).dropna().sort_values(self._DATE) self._logdf = pull_df else: self._logdf.sort_values(self._DATE) pull_after_date = "'"+str(self._logdf[self._DATE].iloc[-1]).replace(\ " ", "T")+"'" new_data = client.get(dataset_id, where=\ ("date > "+pull_after_date)) if domain == "data.cityofchicago.org" and dataset_id=="crimes": self._DATE = "date" # pull_df = pd.DataFrame(new_data).dropna(\ # subset=[self._coord1, self._coord2, self._DATE, self._EVENT],\ # axis=1).sort_values(self._DATE) pull_df = pd.DataFrame(new_data).dropna().sort_values(self._DATE) self._logdf = self._logdf.append(pull_df) if store: assert out_fname is not None, "Out filename not specified" self._logdf.to_pickle(out_fname) return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _load_data(self, datasource):\n import pandas as pd\n if not isinstance(datasource, pd.DataFrame):\n raise TypeError('DfFeature must loaded from pd.DataFrame')\n self.data = datasource\n self.data['thetime']=self.data['thetime'].apply(lambda x:try_to_parse_date(x))", "d...
[ "0.6014438", "0.5713395", "0.5670081", "0.5654937", "0.55900925", "0.5535862", "0.55139977", "0.5509106", "0.5435431", "0.54309124", "0.54095095", "0.53560346", "0.53487986", "0.5339302", "0.53324765", "0.5331317", "0.5316162", "0.5309659", "0.52839315", "0.52791053", "0.5277...
0.5656978
3