query
stringlengths
9
3.4k
document
stringlengths
9
87.4k
metadata
dict
negatives
listlengths
4
101
negative_scores
listlengths
4
101
document_score
stringlengths
3
10
document_rank
stringclasses
102 values
Helper function to create a basic operator node that doesn't contain op specific attrs
def create_basic_op_node(op_name, node, kwargs): name, input_nodes, _ = get_inputs(node, kwargs) node = onnx.helper.make_node( op_name, input_nodes, [name], name=name ) return [node]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _create_concat(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('axis', op.axis),\n ])\n return node", "def op(self) -> Literal[\"==\"] | Literal[\"<=\"] | Literal[\">=\"]:\n ...", "def...
[ "0.63253284", "0.632039", "0.62180704", "0.62112194", "0.6123615", "0.61039203", "0.61005336", "0.6060044", "0.600828", "0.6002395", "0.60003734", "0.595907", "0.5952342", "0.5919387", "0.59105414", "0.5905878", "0.589422", "0.58619446", "0.58419603", "0.58360064", "0.5822752...
0.7472675
0
Helper function to convert weights and inputs.
def convert_weights_and_inputs(node, **kwargs): name, _, _ = get_inputs(node, kwargs) if kwargs["is_input"] is False: weights = kwargs["weights"] initializer = kwargs["initializer"] np_arr = weights[name] data_type = onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[np_arr.dtype] dims = np.shape(np_arr) tensor_node = onnx.helper.make_tensor_value_info(name, data_type, dims) initializer.append( onnx.helper.make_tensor( name=name, data_type=data_type, dims=dims, vals=np_arr.flatten().tolist(), raw=False, ) ) return [tensor_node] else: tval_node = onnx.helper.make_tensor_value_info(name, kwargs["in_type"], kwargs["in_shape"]) return [tval_node]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _input(self, inputs):\n return sum([w*i for w,i in zip(self._weights, inputs)])", "def normalize_input(inputs: [float]) -> [float]:", "def inputs_weights_init(self):\n input_user, input_item, input_rating = self.inputs_init()\n user_embeddings, item_embeddings = self.embeddings_layers_...
[ "0.68360084", "0.64592713", "0.6424411", "0.6227467", "0.6222293", "0.61991525", "0.618634", "0.61317295", "0.61289394", "0.6118224", "0.6114964", "0.6105876", "0.6105316", "0.60550016", "0.6040807", "0.6006805", "0.60003716", "0.5966255", "0.5961394", "0.59462756", "0.592056...
0.7170687
0
Map MXNet's convolution operator attributes to onnx's Conv operator and return the created node.
def convert_convolution(node, **kwargs): name, input_nodes, attrs = get_inputs(node, kwargs) kernel_dims = list(parse_helper(attrs, "kernel")) stride_dims = list(parse_helper(attrs, "stride", [1, 1])) pad_dims = list(parse_helper(attrs, "pad", [0, 0])) num_group = int(attrs.get("num_group", 1)) dilations = list(parse_helper(attrs, "dilate", [1, 1])) pad_dims = pad_dims + pad_dims conv_node = onnx.helper.make_node( "Conv", inputs=input_nodes, outputs=[name], kernel_shape=kernel_dims, strides=stride_dims, dilations=dilations, pads=pad_dims, group=num_group, name=name ) return [conv_node]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _create_conv_pool(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n k = [op.handle.kernel_h, op.handle.kernel_w]\n s = [op.handle.stride_h, op.handle.stride_w]\n oddp = op.odd_padding\n p = [\n op.handle.pad_h + oddp[0],\n op.h...
[ "0.69920486", "0.66528916", "0.6432573", "0.59314674", "0.59063995", "0.5811342", "0.5794324", "0.57784176", "0.57155585", "0.5674647", "0.5611912", "0.5560294", "0.552098", "0.5466808", "0.5450267", "0.54139715", "0.5381454", "0.538118", "0.53697866", "0.5363581", "0.5358975...
0.7379639
0
Map MXNet's deconvolution operator attributes to onnx's ConvTranspose operator and return the created node.
def convert_deconvolution(node, **kwargs): name, inputs, attrs = get_inputs(node, kwargs) kernel_dims = list(parse_helper(attrs, "kernel")) stride_dims = list(parse_helper(attrs, "stride", [1, 1])) pad_dims = list(parse_helper(attrs, "pad", [0, 0])) num_group = int(attrs.get("num_group", 1)) dilations = list(parse_helper(attrs, "dilate", [1, 1])) adj_dims = list(parse_helper(attrs, "adj", [0, 0])) pad_dims = pad_dims + pad_dims deconv_node = onnx.helper.make_node( "ConvTranspose", inputs=inputs, outputs=[name], kernel_shape=kernel_dims, strides=stride_dims, dilations=dilations, output_padding=adj_dims, pads=pad_dims, group=num_group, name=name ) return [deconv_node]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _create_transpose(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('perm', op.perm),\n ])\n return node", "def deconv(inp):\n num_filters = inp.get_shape().as_list()[-1]\n\n x = Conv2DTran...
[ "0.6097374", "0.601943", "0.58937025", "0.58428407", "0.57710683", "0.5762493", "0.56841415", "0.55704516", "0.55147016", "0.55106026", "0.5437586", "0.5421498", "0.53722894", "0.53698725", "0.53527224", "0.5335422", "0.5334541", "0.53216076", "0.5319174", "0.5318343", "0.530...
0.790625
0
Map MXNet's crop operator attributes to onnx's Crop operator and return the created node.
def convert_crop(node, **kwargs): name, inputs, attrs = get_inputs(node, kwargs) start = np.array([0, 0, 0, 0], dtype=np.int) # index是int类型 export_nodes = [] start_node = create_helper_tensor_node(start, name + '__starts', kwargs) export_nodes.extend(start_node) start_node = start_node[-1].name shape_node = create_helper_shape_node(inputs[1], inputs[1] + '__shape') export_nodes.extend(shape_node) shape_node = shape_node[-1].name crop_node = onnx.helper.make_node( "Slice", inputs=[inputs[0], name + '__starts', inputs[1] + '__shape'], # data、start、end outputs=[name], name=name ) logging.warning( "Using an experimental ONNX operator: Crop. " \ "Its definition can change.") export_nodes.extend([crop_node]) return export_nodes
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _create_clip(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n if op.min is not None:\n node.input.append(op.name + \":min\")\n else:\n node.input.append(\"\")\n if op.max is not None:\n node.input.append(op.name + \":max\")\...
[ "0.5632038", "0.55095476", "0.5424357", "0.5367828", "0.5309837", "0.5078018", "0.5071087", "0.49778563", "0.49666363", "0.491709", "0.4914572", "0.49061403", "0.49024212", "0.4902058", "0.4891922", "0.48729894", "0.48681563", "0.4843361", "0.4805982", "0.47791198", "0.475751...
0.63828367
0
Map MXNet's UpSampling operator attributes to onnx's Upsample operator and return the created node.
def convert_upsample(node, **kwargs): name, input_nodes, attrs = get_inputs(node, kwargs) sample_type = attrs.get('sample_type', 'nearest') sample_type = 'linear' if sample_type == 'bilinear' else sample_type scale = convert_string_to_list(attrs.get('scale')) scaleh = scalew = float(scale[0]) if len(scale) > 1: scaleh = float(scale[0]) scalew = float(scale[1]) scale = np.array([1.0, 1.0, scaleh, scalew], dtype=np.float32) roi = np.array([], dtype=np.float32) export_nodes = [] node_roi = create_helper_tensor_node(roi, name + 'roi', kwargs) export_nodes.extend(node_roi) node_roi = node_roi[-1].name node_sca = create_helper_tensor_node(scale, name + 'scale', kwargs) export_nodes.extend(node_sca) node_sca = node_sca[-1].name node = onnx.helper.make_node( 'Resize', inputs=[input_nodes[0], node_roi, node_sca], outputs=[name], coordinate_transformation_mode='asymmetric', mode=sample_type, nearest_mode='floor', name=name ) export_nodes.extend([node]) return export_nodes
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_upsample_layer(self, in_channels=None, out_channels=None):\n if self.expand_strategy == \"upsample\":\n return nn.Upsample(scale_factor=2, mode=\"nearest\")\n elif self.expand_strategy == \"transpose_convolution\":\n return nn.ConvTranspose2d(\n in_channe...
[ "0.5554982", "0.5521366", "0.54900855", "0.5459652", "0.5323029", "0.5225662", "0.5160975", "0.51450866", "0.5143616", "0.5027456", "0.5023463", "0.49990726", "0.49227133", "0.48704767", "0.48497036", "0.48493198", "0.48339114", "0.47982448", "0.47692373", "0.4767733", "0.475...
0.64564914
0
Map MXNet's FullyConnected operator attributes to onnx's Gemm operator and return the created node.
def convert_fully_connected(node, **kwargs): name, input_nodes, attrs = get_inputs(node, kwargs) initializer = kwargs["initializer"] no_bias = get_boolean_attribute_value(attrs, "no_bias") fcnode = [] op_name = "flatten_" + str(kwargs["idx"]) flatten_node = onnx.helper.make_node( 'Flatten', inputs=[input_nodes[0]], outputs=[op_name], name=op_name ) input_nodes[0] = op_name fcnode.append(flatten_node) if no_bias: data_type = onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[np.dtype('int64')] bias_name = "bias" + str(kwargs["idx"]) tensor_node = onnx.helper.make_tensor_value_info(bias_name, data_type, (1,)) initializer.append( onnx.helper.make_tensor( name=bias_name, data_type=data_type, dims=(1,), vals=[0], raw=False, ) ) input_nodes.append(bias_name) fcnode.append(tensor_node) node = onnx.helper.make_node( "Gemm", input_nodes, # input (A, B, C) - C can be in place [name], # output alpha=1.0, beta=1.0, transA=False, transB=True, name=name ) fcnode.append(node) return fcnode
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _create_gemm(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('alpha', float(op.alpha)),\n helper.make_attribute('beta', float(op.beta)),\n helper.make_attribute('transA', op.transA),\n ...
[ "0.6879449", "0.5942417", "0.58318627", "0.5746459", "0.5413726", "0.53791827", "0.5367006", "0.5331266", "0.527826", "0.5257274", "0.52468914", "0.5242588", "0.5240532", "0.51549006", "0.515457", "0.51430964", "0.5114177", "0.5093029", "0.50854313", "0.5052347", "0.50358003"...
0.5959116
1
Map MXNet's BatchNorm operator attributes to onnx's BatchNormalization operator and return the created node.
def convert_batchnorm(node, **kwargs): name, input_nodes, attrs = get_inputs(node, kwargs) momentum = float(attrs.get("momentum", 0.9)) eps = float(attrs.get("eps", 0.001)) bn_node = onnx.helper.make_node( "BatchNormalization", input_nodes, [name], name=name, epsilon=eps, momentum=momentum, # MXNet computes mean and variance per channel for batchnorm. # Default for onnx is across all spatial features. Relying on default # ONNX behavior of spatial=1 for ONNX opset 8 and below. As the spatial # attribute is deprecated in opset 9 and above, not explicitly encoding it. ) return [bn_node]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _create_batchnorm(cls, op, op_t):\n # first, we init batchnorm node\n epsilon = 1e-5 # the epsilon value used in singa\n bn_node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n bn_node.attribute.extend([\n helper.make_attribute('momentum', op.handle.factor),\n ...
[ "0.6464468", "0.6376962", "0.61264753", "0.5883445", "0.57059985", "0.57012653", "0.56393033", "0.5572092", "0.55718195", "0.551708", "0.54826975", "0.5470176", "0.5425131", "0.5422429", "0.54030514", "0.5381643", "0.5354029", "0.5310859", "0.52805203", "0.5256326", "0.522801...
0.7146321
0
Map MXNet's tanh operator attributes to onnx's Tanh operator and return the created node.
def convert_tanh(node, **kwargs): return create_basic_op_node('Tanh', node, kwargs)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def tanh(self):\n return type(self)(self.parent(),\n self._simplify(self._express.tanh()))", "def _create_hardsigmoid(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('alpha', op.a...
[ "0.66637284", "0.66221046", "0.6183689", "0.6041877", "0.6009536", "0.60078925", "0.6001827", "0.59675324", "0.59085935", "0.5895642", "0.5892149", "0.58851", "0.5857893", "0.5766065", "0.5756888", "0.5718928", "0.56849617", "0.5675731", "0.56675726", "0.566487", "0.5653042",...
0.7406357
0
Map MXNet's cos operator attributes to onnx's Cos operator and return the created node.
def convert_cos(node, **kwargs): return create_basic_op_node('Cos', node, kwargs)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cos(self):\n return type(self)(self.parent(),\n self._simplify(self._express.cos()))", "def convert_acos(node, **kwargs):\n return create_basic_op_node('Acos', node, kwargs)", "def cos(obj):\n\tif isinstance(obj, Variable):\n\t\tval = np.cos(obj.val)\n\t\tder = -np.sin(ob...
[ "0.68186814", "0.64716417", "0.6203887", "0.6197016", "0.6076361", "0.60689795", "0.6031383", "0.5917666", "0.5856112", "0.5803058", "0.5774136", "0.57586396", "0.5689945", "0.56162506", "0.56064403", "0.5498463", "0.5487711", "0.5412239", "0.53912306", "0.53586626", "0.53557...
0.77887577
0
Map MXNet's sin operator attributes to onnx's Sin operator and return the created node.
def convert_sin(node, **kwargs): return create_basic_op_node('Sin', node, kwargs)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def sin(self):\n return type(self)(self.parent(),\n self._simplify(self._express.sin()))", "def sin(obj):\n\tif isinstance(obj, Variable):\n\t\tval = np.sin(obj.val)\n\t\tder = np.cos(obj.val)\n\t\tif len(obj.der.shape)>len(der.shape):\n\t\t\tder = np.expand_dims(der,1)\n\t\tder =...
[ "0.6925629", "0.63005567", "0.62578577", "0.6207433", "0.60740703", "0.5979021", "0.5886588", "0.5854158", "0.5850431", "0.58287054", "0.57712215", "0.5768085", "0.57123333", "0.56800777", "0.55576193", "0.54214805", "0.54193765", "0.5405628", "0.53958225", "0.53906035", "0.5...
0.7659397
0
Map MXNet's tan operator attributes to onnx's tan operator and return the created node.
def convert_tan(node, **kwargs): return create_basic_op_node('Tan', node, kwargs)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def convert_tanh(node, **kwargs):\n return create_basic_op_node('Tanh', node, kwargs)", "def tan(self):\n return type(self)(self.parent(),\n self._simplify(self._express.tan()))", "def tan(obj):\n\tif isinstance(obj, Variable):\n\t\tval = np.tan(obj.val)\n\t\tder = 1+np.tan(o...
[ "0.7066365", "0.69418204", "0.62932664", "0.6291011", "0.6098471", "0.6070739", "0.60638016", "0.6008856", "0.595648", "0.5951899", "0.581182", "0.57857805", "0.57706344", "0.5729434", "0.5726495", "0.5701578", "0.56677294", "0.5623807", "0.56201595", "0.560823", "0.5586857",...
0.760391
0
Map MXNet's acos operator attributes to onnx's acos operator and return the created node.
def convert_acos(node, **kwargs): return create_basic_op_node('Acos', node, kwargs)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def convert_cos(node, **kwargs):\n return create_basic_op_node('Cos', node, kwargs)", "def create_dot1p_to_cos_mapping(self, ports, rx_attr_flag=False, **kwargs):\n pass", "def arccos(self):\n return type(self)(self.parent(),\n self._simplify(self._express.arccos()))",...
[ "0.66714954", "0.5832328", "0.5820055", "0.54579884", "0.5427718", "0.54265505", "0.5377508", "0.5375877", "0.5216904", "0.5200265", "0.5194074", "0.51886547", "0.51454943", "0.50762826", "0.50367916", "0.50269526", "0.50130564", "0.50080115", "0.498638", "0.49777353", "0.497...
0.72537535
0
Map MXNet's asin operator attributes to onnx's asin operator and return the created node.
def convert_asin(node, **kwargs): return create_basic_op_node('Asin', node, kwargs)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _onnx_node_to_singa_op(cls,\n onnx_node,\n inputs,\n opset_version=_known_opset_version):\n if onnx_node.op_type in cls._special_operators:\n translator = getattr(cls, cls._special_operators[onnx_node.op...
[ "0.5615745", "0.5525026", "0.5475919", "0.5348382", "0.52133733", "0.512875", "0.5086857", "0.50501585", "0.49973157", "0.49967095", "0.49640378", "0.49598986", "0.49447057", "0.49313256", "0.49242523", "0.4889783", "0.48892346", "0.48825702", "0.4869105", "0.48690882", "0.48...
0.68138564
0
Map MXNet's atan operator attributes to onnx's atan operator and return the created node.
def convert_atan(node, **kwargs): return create_basic_op_node('Atan', node, kwargs)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def convert_acos(node, **kwargs):\n return create_basic_op_node('Acos', node, kwargs)", "def atan (cls, x) :\n return Angle_R (math.atan (x))", "def convert_tan(node, **kwargs):\n return create_basic_op_node('Tan', node, kwargs)", "def convert_asin(node, **kwargs):\n return create_basic_op_no...
[ "0.65847087", "0.6237189", "0.6214043", "0.60928404", "0.6050979", "0.5992298", "0.5854942", "0.5778332", "0.5749585", "0.5689177", "0.56575197", "0.5505356", "0.5486537", "0.5469867", "0.54422086", "0.5406414", "0.5399801", "0.5374126", "0.5345535", "0.5344025", "0.53360784"...
0.78846675
0
Map MXNet's sigmoid operator attributes to onnx's Sigmoid operator and return the created node.
def convert_sigmoid(node, **kwargs): return create_basic_op_node('Sigmoid', node, kwargs)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _create_hardsigmoid(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('alpha', op.alpha),\n helper.make_attribute('beta', op.gamma),\n ])\n return node", "def _create_hardsigmoid(cls, ...
[ "0.7370613", "0.7041682", "0.6546022", "0.648128", "0.6382078", "0.6377375", "0.6302575", "0.6283234", "0.6230437", "0.6167952", "0.6156127", "0.60304093", "0.59833264", "0.5979358", "0.5970586", "0.593866", "0.59101063", "0.5903408", "0.5903408", "0.5856696", "0.58536077", ...
0.7811297
0
Map MXNet's relu operator attributes to onnx's Relu operator and return the created node.
def convert_relu(node, **kwargs): return create_basic_op_node('Relu', node, kwargs)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _create_elu(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('alpha', op.alpha),\n ])\n return node", "def relu(input, inplace=False):\n return FunctionLib.apply(\n 'Relu', input.devic...
[ "0.6200267", "0.60108554", "0.57318294", "0.57223237", "0.55151415", "0.5508822", "0.55065703", "0.5483933", "0.5419298", "0.5369124", "0.53125054", "0.52744555", "0.52538085", "0.5234616", "0.5223587", "0.52179486", "0.51698667", "0.51464564", "0.51462084", "0.51363164", "0....
0.7071261
0
Map MXNet's Activation operator attributes to onnx's Tanh/Relu operator and return the created node.
def convert_activation(node, **kwargs): name, input_nodes, attrs = get_inputs(node, kwargs) act_type = attrs["act_type"] # Creating a dictionary here, but if this titlecase pattern # mxnet_name.title() act_types = { "tanh": "Tanh", "relu": "Relu", "sigmoid": "Sigmoid", "softrelu": "Softplus", "softsign": "Softsign" } act_name = act_types.get(act_type) if act_name: node = onnx.helper.make_node( act_name, input_nodes, [name], name=name ) else: raise AttributeError( "Activation %s not implemented or recognized in the converter" % act_type ) return [node]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _create_elu(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('alpha', op.alpha),\n ])\n return node", "def _create_gemm(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, ...
[ "0.6525348", "0.6170288", "0.6057955", "0.583563", "0.5773526", "0.5732154", "0.55703044", "0.5537313", "0.54962003", "0.5491791", "0.54823685", "0.548114", "0.54719126", "0.54137206", "0.5379066", "0.53318", "0.5318398", "0.53031856", "0.5283017", "0.52783245", "0.5272741", ...
0.6768296
0
Map MXNet's pad operator attributes to onnx's Pad operator and return the created node.
def convert_pad(node, **kwargs): name, input_nodes, attrs = get_inputs(node, kwargs) mxnet_pad_width = convert_string_to_list(attrs.get("pad_width")) onnx_pad_width = transform_padding(mxnet_pad_width) pad_mode = attrs.get("mode") if pad_mode == "constant": pad_value = float(attrs.get("constant_value")) \ if "constant_value" in attrs else 0.0 node = onnx.helper.make_node( 'Pad', inputs=input_nodes, outputs=[name], mode='constant', value=pad_value, pads=onnx_pad_width, name=name ) else: node = onnx.helper.make_node( 'Pad', inputs=input_nodes, outputs=[name], mode=pad_mode, pads=onnx_pad_width, name=name ) return [node]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_pads(onnx_node): # type: (NodeWrapper) -> Tuple[int, int, int]\n auto_pad = onnx_node.get_attribute_value('auto_pad')\n pads = onnx_node.get_attribute_value('pads', ()) # Padding along each axis\n kernel_shape = onnx_node.get_attribute_value('kernel_shape')\n\n # Attribute 'auto_pad' is depre...
[ "0.57889557", "0.5579466", "0.54904", "0.5398082", "0.52853227", "0.5275017", "0.5262226", "0.52524114", "0.51949155", "0.51621807", "0.5159108", "0.508906", "0.50823164", "0.5075376", "0.5003475", "0.49345222", "0.49055016", "0.49049303", "0.49036154", "0.4897707", "0.488882...
0.7317738
0
create extra tensor node from numpy values
def create_helper_tensor_node(input_vals, output_name, kwargs): data_type = onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[input_vals.dtype] tensor_node = onnx.helper.make_tensor_value_info( name=output_name, elem_type=data_type, shape=input_vals.shape ) kwargs["initializer"].append( onnx.helper.make_tensor( name=output_name, data_type=data_type, dims=input_vals.shape, vals=input_vals.flatten(), raw=False, ) ) return [tensor_node]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _create_constant(cls, onnx_node, inputs, opset_version):\n tmp_tensor = onnx_node.getattr('value')\n np_dtype = onnx.mapping.TENSOR_TYPE_TO_NP_TYPE[tmp_tensor.data_type]\n np_tensor = np.frombuffer(tmp_tensor.raw_data, dtype=np_dtype)\n if np_tensor.dtype == \"int64\":\n ...
[ "0.6670378", "0.6648633", "0.63642025", "0.62701637", "0.6126209", "0.61022186", "0.6095189", "0.60797447", "0.6028599", "0.6016147", "0.6001126", "0.59771246", "0.59069496", "0.5865564", "0.5849665", "0.58315593", "0.5821287", "0.57957333", "0.57812244", "0.57678527", "0.572...
0.68433195
0
create extra reshape node with static shape
def create_helper_reshape_node(input_name, output_name, shape, kwargs): shape_tensor_node, = create_helper_tensor_node( np.asarray(shape, dtype=np.int64), output_name + "__shape", kwargs ) reshape_node = onnx.helper.make_node( "Reshape", inputs=[input_name, shape_tensor_node.name], outputs=[output_name], name=output_name ) return [shape_tensor_node, reshape_node]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _create_reshape(cls, op, op_t):\n # make the shape node\n # because the reshape in singa does not provide its shape as input tensor\n shape_node_name = op.name + \":shape\"\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n node.input.extend([shape_node_name])\n ...
[ "0.78215057", "0.7107456", "0.69461006", "0.67939496", "0.6732763", "0.6498097", "0.6490464", "0.6484981", "0.6472887", "0.6448186", "0.6354322", "0.6187885", "0.61778134", "0.6151803", "0.613624", "0.61251277", "0.6119176", "0.61077476", "0.6069696", "0.60536623", "0.6050781...
0.736639
1
create extra transpose node
def create_helper_trans_node(input_name, output_name, perm=None): attrs = {} if perm is not None: attrs['perm'] = perm trans_node = onnx.helper.make_node( 'Transpose', inputs=[input_name], outputs=[output_name], name=output_name, **attrs ) return [trans_node]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _create_transpose(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('perm', op.perm),\n ])\n return node", "def convert_transpose(node, **kwargs):\n name, input_nodes, attrs = get_inputs(node,...
[ "0.78907984", "0.7416823", "0.73450136", "0.71588725", "0.7157814", "0.7062122", "0.7027833", "0.6752265", "0.67203134", "0.6479318", "0.64648455", "0.63611174", "0.62729925", "0.6166361", "0.6111399", "0.6110929", "0.60243756", "0.5987572", "0.5926361", "0.58813286", "0.5878...
0.6801901
7
create extra concat node
def create_helper_concat_node(inputs, output_name, axis=0): concat_node = onnx.helper.make_node( "Concat", inputs=inputs, outputs=[output_name], name=output_name, axis=axis, ) return [concat_node]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def convert_concat(node, **kwargs):\n name, input_nodes, attrs = get_inputs(node, kwargs)\n\n axis = int(attrs.get(\"dim\", 1))\n concat_node = onnx.helper.make_node(\n \"Concat\",\n input_nodes,\n [name],\n axis=axis,\n name=name\n )\n return [concat_node]", "de...
[ "0.7233822", "0.70961404", "0.6707305", "0.6454889", "0.64343643", "0.6412976", "0.6336564", "0.63213706", "0.60288745", "0.60278517", "0.59759504", "0.58231926", "0.57735884", "0.57064074", "0.5681187", "0.565455", "0.5654235", "0.5651357", "0.5630985", "0.5547733", "0.55437...
0.7367524
0
create extra expand node
def create_helper_expand_node(input_name, output_name, expand_shape): expand_node = onnx.helper.make_node( "Expand", inputs=[input_name, expand_shape], outputs=[output_name], name=output_name, ) return [expand_node]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def mutate_expand_node(\n child, node=None, pb_en_out_link=config.MUTPB_EN_OUT_LINK):\n # TODO: can maybe be improved by sparqling\n if not node:\n nodes = list(child.nodes)\n node = random.choice(nodes)\n new_triple, _, _ = _mutate_expand_node_helper(node, pb_en_out_link)\n return...
[ "0.6341064", "0.6288607", "0.62806153", "0.6086077", "0.5995333", "0.5976243", "0.59042525", "0.58790493", "0.58487433", "0.5824369", "0.5777672", "0.57740724", "0.5769714", "0.5760391", "0.5726708", "0.5703688", "0.56856406", "0.5679697", "0.5621045", "0.5600409", "0.5491694...
0.7317582
0
create extra gather node with static indices
def create_helper_gather_node( input_name, output_name, indices, kwargs, axis=None ): attrs = {} if axis is not None: attrs['axis'] = axis gather_tensor_node, = create_helper_tensor_node( np.asarray(indices, np.int64), output_name + "__indices", kwargs ) gather_node = onnx.helper.make_node( "Gather", inputs=[input_name, gather_tensor_node.name], outputs=[output_name], name=output_name, **attrs ) return [gather_tensor_node, gather_node]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _create_gather(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('axis', op.axis),\n ])\n node.input.append(op.name + \":indices\")\n return node", "def _create_gather(cls, onnx_node, inpu...
[ "0.71886444", "0.6769806", "0.62645483", "0.6221015", "0.5946375", "0.5838221", "0.5673979", "0.56684947", "0.5541291", "0.53054255", "0.52893925", "0.52365243", "0.5235921", "0.52214915", "0.5213144", "0.5213144", "0.5213144", "0.5213144", "0.5213144", "0.5213144", "0.520076...
0.64779496
2
create extra node, with specified values (allows mixing node names and static values)
def create_helper_build_values_node( inputs, output_name, dtype, kwargs, axis=0 ): values = [] tensor_nodes = [] for idx, inp in enumerate(inputs): if not isinstance(inp, (str, bytes)): inp, = create_helper_tensor_node( np.array([inp], dtype=dtype), output_name + "__value" + str(idx), kwargs ) tensor_nodes.append(inp) inp = inp.name values.append(inp) concat_node, = create_helper_concat_node(values, output_name, axis=axis) return tensor_nodes + [concat_node,]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _create_node(\n self,\n name,\n ):\n pass", "def createNode(*args, name: AnyStr=\"\", parent: AnyStr=\"\", shared: bool=True, skipSelect:\n bool=True, **kwargs)->AnyStr:\n pass", "def new_node(name):\n\n return name, []", "def add_node(self, node):"...
[ "0.68213207", "0.67030686", "0.6458598", "0.6282724", "0.61542964", "0.61530226", "0.61383206", "0.6117894", "0.6106326", "0.6085056", "0.6042397", "0.6037692", "0.6005436", "0.59107155", "0.59007114", "0.584901", "0.58368856", "0.58071303", "0.5780777", "0.5764826", "0.57229...
0.0
-1
create extra shape node for specified input node
def create_helper_shape_node(input_name, output_name): shape_node = onnx.helper.make_node( "Shape", inputs=[input_name], outputs=[output_name], name=output_name, ) return [shape_node]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def convert_shape(node, **kwargs):\n return create_basic_op_node('Shape', node, kwargs)", "def add_input_and_output_shape(self, input_shape, output_shape):", "def ashape(node):\n shp = node.shape\n assert shp is not None\n return shp", "def _create_reshape(cls, op, op_t):\n # make the shap...
[ "0.6897857", "0.6676977", "0.65623456", "0.6395802", "0.6266121", "0.6219381", "0.6160772", "0.6141284", "0.6076785", "0.60708976", "0.60563433", "0.6043666", "0.6030497", "0.59740275", "0.59445137", "0.5934724", "0.5927032", "0.5918327", "0.59053826", "0.58898586", "0.585948...
0.7563421
0
Map MXNet's dot operator attributes to onnx's MatMul and Transpose operators based on the values set for transpose_a, transpose_b attributes.
def convert_dot(node, **kwargs): name, input_nodes, attrs = get_inputs(node, kwargs) input_node_a = input_nodes[0] input_node_b = input_nodes[1] trans_a_node = None trans_b_node = None trans_a = get_boolean_attribute_value(attrs, "transpose_a") trans_b = get_boolean_attribute_value(attrs, "transpose_b") op_name = "transpose" + str(kwargs["idx"]) if trans_a: input_node_a = op_name + "_a" trans_a_node, = create_helper_trans_node(input_nodes[0], input_node_a) if trans_b: input_node_b = op_name + "_b" trans_b_node, = create_helper_trans_node(input_nodes[1], input_node_b) matmul_node = onnx.helper.make_node( 'MatMul', inputs=[input_node_a, input_node_b], outputs=[name], name=name ) if not trans_a and not trans_b: return [matmul_node] elif trans_a and not trans_b: return [trans_a_node, matmul_node] elif trans_b and not trans_a: return [trans_b_node, matmul_node] else: return [trans_a_node, trans_b_node, matmul_node]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def transpose_dot(self, other):\n from divisi2 import operators\n return operators.transpose_dot(self, other)", "def _convert_operator(op_name, attrs, identity_list=None, convert_map=None):\n identity_list = identity_list if identity_list else _identity_list\n convert_map = convert_map if con...
[ "0.5712867", "0.56176335", "0.554931", "0.5514312", "0.5504516", "0.54418373", "0.5436186", "0.54350364", "0.54332083", "0.5432765", "0.5416694", "0.53294843", "0.53082806", "0.5298505", "0.52948254", "0.5290505", "0.52547127", "0.52381", "0.5236047", "0.5189316", "0.5149108"...
0.6954135
0
Map MXNet's _linalg_gemm2 operator attributes to onnx's MatMul and Transpose operators based on the values set for transpose_a, transpose_b attributes. Return multiple nodes created.
def convert_linalg_gemm2(node, **kwargs): name, input_nodes, attrs = get_inputs(node, kwargs) # Getting the attributes and assigning default values. alpha = float(attrs.get("alpha", 1.0)) trans_a = get_boolean_attribute_value(attrs, "transpose_a") trans_b = get_boolean_attribute_value(attrs, "transpose_b") op_name = "transpose" + str(kwargs["idx"]) if alpha == 1.0 and trans_a == 0 and trans_b == 0: matmul_node = onnx.helper.make_node( 'MatMul', inputs=input_nodes, outputs=[name], name=name ) return [matmul_node] elif trans_a == 1 and trans_b == 0: op_name = "transpose" + str(kwargs["idx"]) node_name = op_name+"_a" trans_a_node = onnx.helper.make_node( 'Transpose', inputs=[input_nodes[0]], outputs=[op_name+"_a"], name=node_name ) matmul_node = onnx.helper.make_node( 'MatMul', inputs=[node_name, input_nodes[1]], outputs=[name], name=name ) return [trans_a_node, matmul_node] elif trans_a == 0 and trans_b == 1: node_name = op_name + "_b" trans_b_node = onnx.helper.make_node( 'Transpose', inputs=[input_nodes[1]], outputs=[op_name+"_b"], name=node_name ) matmul_node = onnx.helper.make_node( 'MatMul', inputs=[input_nodes[0], node_name], outputs=[name], name=name ) return [trans_b_node, matmul_node] else: node_name_a = op_name+"_a" trans_a_node = onnx.helper.make_node( 'Transpose', inputs=[input_nodes[0]], outputs=[op_name+"_a"], name=node_name_a ) node_name_b = op_name + "_b" trans_b_node = onnx.helper.make_node( 'Transpose', inputs=[input_nodes[1]], outputs=[op_name+"_b"], name=node_name_b ) matmul_node = onnx.helper.make_node( 'MatMul', inputs=input_nodes, outputs=[name], name=name ) return [trans_a_node, trans_b_node, matmul_node]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _create_gemm(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('alpha', float(op.alpha)),\n helper.make_attribute('beta', float(op.beta)),\n helper.make_attribute('transA', op.transA),\n ...
[ "0.6389869", "0.62474555", "0.59834313", "0.5800283", "0.57034826", "0.56631166", "0.5639739", "0.542331", "0.5402771", "0.53584194", "0.53427106", "0.53198403", "0.5156288", "0.51285404", "0.50952226", "0.5079322", "0.50783587", "0.5072455", "0.5072435", "0.5067956", "0.5063...
0.7864781
0
Map MXNet's Pooling operator attributes to onnx's MaxPool/AveragePool/GlobalMaxPool/GlobalAveragePool operators based on the input node's attributes and return the created node.
def convert_pooling(node, **kwargs): name, input_nodes, attrs = get_inputs(node, kwargs) kernel = eval(attrs["kernel"]) pool_type = attrs["pool_type"] if attrs.get("pool_type") else "max" stride = eval(attrs["stride"]) if attrs.get("stride") else (1, 1) global_pool = get_boolean_attribute_value(attrs, "global_pool") p_value = attrs.get('p_value', 'None') pooling_convention = attrs.get('pooling_convention', 'valid') ceil_mode = False if pooling_convention == 'full': if onnx.__version__ < "1.5.0": pooling_warning = "Pooling: ONNX lower than 1.5.0 doesn't support pooling_convention. " \ "This might lead to shape or accuracy issues. " \ "https://github.com/onnx/onnx/issues/549" ceil_mode = True logging.warning(pooling_warning) pad_dims = list(parse_helper(attrs, "pad", [0, 0])) pad_dims = pad_dims + pad_dims pool_types = {"max": "MaxPool", "avg": "AveragePool", "lp": "LpPool"} global_pool_types = {"max": "GlobalMaxPool", "avg": "GlobalAveragePool", "lp": "GlobalLpPool"} if pool_type == 'lp' and p_value == 'None': raise AttributeError('ONNX requires a p value for LpPool and GlobalLpPool') if global_pool: if pool_type == 'lp': node = onnx.helper.make_node( global_pool_types[pool_type], input_nodes, # input [name], p=int(p_value), name=name ) else: node = onnx.helper.make_node( global_pool_types[pool_type], input_nodes, # input [name], name=name ) else: if pool_type == 'lp': node = onnx.helper.make_node( pool_types[pool_type], input_nodes, # input [name], p=int(p_value), kernel_shape=kernel, pads=pad_dims, strides=stride, name=name ) else: if onnx.__version__ >= "1.5.0": node = onnx.helper.make_node( pool_types[pool_type], input_nodes, # input [name], kernel_shape=kernel, pads=pad_dims, strides=stride, name=name, ceil_mode=ceil_mode ) else: node = onnx.helper.make_node( pool_types[pool_type], input_nodes, # input [name], kernel_shape=kernel, pads=pad_dims, strides=stride, name=name ) return [node]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def convert_roipooling(node, **kwargs):\n name, input_nodes, attrs = get_inputs(node, kwargs)\n\n pooled_shape = convert_string_to_list(attrs.get('pooled_size'))\n scale = float(attrs.get(\"spatial_scale\"))\n\n node = onnx.helper.make_node(\n 'MaxRoiPool',\n input_nodes,\n [name],...
[ "0.6422184", "0.6282772", "0.61572933", "0.6030548", "0.59187293", "0.5839444", "0.5670332", "0.55067396", "0.546821", "0.53994155", "0.5370744", "0.5343035", "0.52772164", "0.5261299", "0.5253784", "0.52436227", "0.5200312", "0.51936644", "0.51801187", "0.51740825", "0.51138...
0.7350024
0
Map MXNet's exp operator attributes to onnx's Exp operator and return the created node.
def convert_exp(node, **kwargs): return create_basic_op_node('Exp', node, kwargs)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def exp(self):\n return type(self)(self.parent(), self._simplify(self._express.exp()))", "def exp(self: Float[LinearOperator, \"*batch M N\"]) -> Float[LinearOperator, \"*batch M N\"]:\n return self.__class__(self._diag.exp())", "def expr(self):\n return self._express", "def expon(*args,...
[ "0.63551813", "0.6045158", "0.5848142", "0.58281314", "0.5785661", "0.5784021", "0.5707887", "0.56942314", "0.5616022", "0.5590413", "0.5572627", "0.5540329", "0.5512824", "0.5411621", "0.53919", "0.5374348", "0.535856", "0.5315022", "0.5296157", "0.5277749", "0.52776116", ...
0.7500115
0
Map MXNet's _copy operator attributes to onnx's Identity operator and return the created node.
def convert_copy(node, **kwargs): return create_basic_op_node('Identity', node, kwargs)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def clone(self):\r\n cp = self.__class__(self.op, self.inputs, [output.clone() for output in self.outputs])\r\n cp.tag = copy(self.tag)\r\n return cp", "def copy_as_new(self) -> \"Individual\":\n return Individual(self.main_node.copy(), to_pipeline=self._to_pipeline)", "def __call__...
[ "0.6040742", "0.59664553", "0.59002656", "0.58588904", "0.5807119", "0.5798085", "0.572463", "0.57207906", "0.56919736", "0.56849563", "0.567741", "0.5644937", "0.55868983", "0.55787975", "0.5568176", "0.55487376", "0.5499865", "0.5467768", "0.5450066", "0.5446349", "0.543404...
0.7632108
0
Map MXNet's identity operator attributes to onnx's ConstantFill operator and return the created node.
def convert_identity(node, **kwargs): return create_basic_op_node('ConstantFill', node, kwargs)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def convert_copy(node, **kwargs):\n return create_basic_op_node('Identity', node, kwargs)", "def _create_constantOfShape(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n tensor_type = onnx.TensorProto.FLOAT if isinstance(\n op.value, float) else onnx.TensorPr...
[ "0.5793247", "0.55757725", "0.53495646", "0.5342002", "0.5303849", "0.5282525", "0.52815294", "0.5273177", "0.5256617", "0.5214077", "0.5213285", "0.5184088", "0.51712525", "0.51261264", "0.511096", "0.50941354", "0.5083022", "0.50758964", "0.5066469", "0.50372505", "0.502145...
0.76081395
0
Map MXNet's InstanceNorm operator attributes to onnx's InstanceNormalization operator based on the input node's attributes and return the created node.
def convert_instancenorm(node, **kwargs): name, input_nodes, attrs = get_inputs(node, kwargs) eps = float(attrs.get("eps", 0.001)) node = onnx.helper.make_node( 'InstanceNormalization', inputs=input_nodes, outputs=[name], name=name, epsilon=eps) return [node]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def convert_instance_norm(g, op, block):\n\n x = g.get_node(op.input(\"X\")[0])\n gamma = g.get_node(op.input(\"Scale\")[0])\n beta = g.get_node(op.input(\"Bias\")[0])\n epsilon = op.attr(\"epsilon\")\n\n scale = center = True\n out = _op.nn.instance_norm(x, gamma, beta, axis=1, epsilon=epsilon, ...
[ "0.68201405", "0.6210069", "0.6158336", "0.5857216", "0.5686284", "0.56264687", "0.55710334", "0.55510676", "0.54553306", "0.5442757", "0.54066503", "0.53916603", "0.53645897", "0.536249", "0.5269938", "0.5257382", "0.52224195", "0.51939166", "0.5191482", "0.51271725", "0.507...
0.74606353
0
Map MXNet's LeakyReLU operator attributes to onnx's Elu/LeakyRelu/PRelu operators based on the input node's attributes and return the created node.
def convert_leakyrelu(node, **kwargs): name, input_nodes, attrs = get_inputs(node, kwargs) initializer = kwargs["initializer"] act_type = attrs.get("act_type", "leaky") alpha = float(attrs.get("slope", 0.25)) act_name = {"elu": "Elu", "leaky": "LeakyRelu", "prelu": "PRelu", "selu": "Selu"} reshape_val_name = 'reshape' + str(kwargs["idx"]) input_type = onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[np.dtype('int64')] reshape_value = np.array([1, -1, 1, 1], dtype='int64') dims = np.shape(reshape_value) shape_node = onnx.helper.make_tensor_value_info(reshape_val_name, input_type, dims) initializer.append( onnx.helper.make_tensor( name=reshape_val_name, data_type=input_type, dims=dims, vals=reshape_value, raw=False, ) ) slope_op_name = 'slope' + str(kwargs["idx"]) lr_node = [] if act_type == "prelu" or act_type == "selu": reshape_slope_node = onnx.helper.make_node( 'Reshape', inputs=[input_nodes[1], reshape_val_name], outputs=[slope_op_name], name=slope_op_name ) node = onnx.helper.make_node( act_name[act_type], inputs=[input_nodes[0], slope_op_name], outputs=[name], name=name) lr_node.append(shape_node) lr_node.append(reshape_slope_node) lr_node.append(node) else: node = onnx.helper.make_node( act_name[act_type], inputs=input_nodes, outputs=[name], name=name, alpha=alpha) lr_node.append(node) return lr_node
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _create_leakyrelu(cls, onnx_node, inputs, opset_version):\n alpha = onnx_node.getattr(\"alpha\", 0.01)\n _, forward = cls._common_onnx_node_to_singa_op(onnx_node, inputs,\n opset_version)\n return _, forward(alpha)", "def convert_relu...
[ "0.6645275", "0.64907336", "0.64269954", "0.6418857", "0.6116449", "0.59967774", "0.5878855", "0.5858891", "0.5726611", "0.567721", "0.5667963", "0.56440437", "0.55542094", "0.5381494", "0.5375752", "0.5368928", "0.535867", "0.5342397", "0.5317905", "0.5286128", "0.5262697", ...
0.67764103
0
Map MXNet's softmax operator attributes to onnx's Softmax operator and return the created node.
def convert_softmax(node, **kwargs): name, input_nodes, attrs = get_inputs(node, kwargs) axis = int(attrs.get("axis", -1)) c_softmax_node = [] axis = -1 transpose_node1 = onnx.helper.make_node( "Transpose", inputs=input_nodes, perm=(0, 2, 3, 1), # NCHW--NHWC--(NHW,C) name=name + '_tr1', outputs=[name + '_tr1'] ) softmax_node = onnx.helper.make_node( "Softmax", inputs=[name + '_tr1'], axis=axis, name=name + '', outputs=[name + ''] ) transpose_node2 = onnx.helper.make_node( "Transpose", inputs=[name + ''], perm=(0, 3, 1, 2), # NHWC--NCHW name=name + '_tr2', outputs=[name + '_tr2'] ) c_softmax_node.append(transpose_node1) c_softmax_node.append(softmax_node) c_softmax_node.append(transpose_node2) return c_softmax_node
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _create_softmax(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('axis', op.axis),\n ])\n return node", "def _create_softmax(cls, onnx_node, inputs, opset_version):\n factor = onnx_node.g...
[ "0.7634255", "0.681238", "0.6320847", "0.6268783", "0.6250723", "0.58051175", "0.57759976", "0.5714223", "0.5651559", "0.5626642", "0.5621024", "0.56144696", "0.5528945", "0.54495025", "0.54494274", "0.54198587", "0.54136854", "0.5412554", "0.54050344", "0.5390913", "0.538304...
0.6541797
2
Map MXNet's Concat operator attributes to onnx's Concat operator and return the created node.
def convert_concat(node, **kwargs): name, input_nodes, attrs = get_inputs(node, kwargs) axis = int(attrs.get("dim", 1)) concat_node = onnx.helper.make_node( "Concat", input_nodes, [name], axis=axis, name=name ) return [concat_node]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _create_concat(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('axis', op.axis),\n ])\n return node", "def create_helper_concat_node(inputs, output_name, axis=0):\n concat_node = onnx.helper...
[ "0.7497081", "0.6968671", "0.6428399", "0.62781763", "0.6080984", "0.59952056", "0.5681301", "0.5657255", "0.5648929", "0.56483614", "0.5587412", "0.5580695", "0.5526628", "0.5516698", "0.54851794", "0.5478145", "0.54658675", "0.5455962", "0.5449425", "0.5396655", "0.53957415...
0.6945871
2
Map MXNet's RNN operator attributes to onnx's RNN operator and return the created node.
def convert_RNN(node, **kwargs): name, input_nodes, attrs = get_inputs(node, kwargs) nodes = [] # ============================== Attributes ============================== mode = attrs['mode'].upper() rnn_kwargs = {} if mode != 'LSTM': raise NotImplementedError( "Only LSTM mode RNN conversion to ONNX is currently supported." ) hidden_size = rnn_kwargs['hidden_size'] = int(attrs.get("state_size")) if eval(attrs.get('bidirectional', 'False')): rnn_kwargs['direction'] = 'bidirectional' num_directions = 2 else: rnn_kwargs['direction'] = 'forward' num_directions = 1 clip_min = eval(attrs.get('lstm_state_clip_min', 'None')) clip_max = eval(attrs.get('lstm_state_clip_max', 'None')) if clip_min is not None or clip_max is not None: # ONNX LSTMs have the `clip` attribute, however it seems to give # slightly different results, when compared to the MXNet equivalent raise NotImplementedError( "Conversion of RNNs with lstm_state_clip_min/max " "to ONNX is currently not supported." ) if eval(attrs.get('lstm_state_clip_nan', 'False')): raise NotImplementedError( "ONNX RNN operator doesn't support lstm_state_clip_nan" ) if eval(attrs.get('use_sequence_length', 'False')): # This can maybe be implemented using the `sequence_len` optional input raise NotImplementedError( "Conversion of RNNs with variable input sequence length " "to ONNX is currently not supported." ) if eval(attrs.get('num_layers', '1')) != 1: raise NotImplementedError( "Conversion of RNNs with num_layers > 1 " "to ONNX is currently not supported." ) if eval(attrs.get('p', '0')) != 0: # WARNING! The `p` attribute in mxnet is "dropout probability" while # the `p` optional input of ONNX LSTMs is the peephole weights tensor. raise NotImplementedError( "Conversion of RNNs with dropout " "to ONNX is currently not supported." ) if eval(attrs.get('projection_size', 'None')) is not None: raise NotImplementedError( "Conversion of RNNs with custom projection_size " "to ONNX is currently not supported." ) if not eval(attrs.get('state_outputs', 'True')): raise NotImplementedError( "Conversion of RNNs with state_outputs=False " "to ONNX is currently not supported." ) # ============================== Parameters ============================== # (See _rnn_param_concat for part 1 of this comment section) # Unfortunately, mxnets version of _rnn_param_concat concatenates *ALL* # the parameters, instead of grouping them like ONNX. The workaround, # used here, is that the _rnn_param_concat node conversion code will # produce multiple nodes with names ending in rnn_param_concatN__P # (Where P is the parameter group name W, R or B) # We then use regular expressions to get the "extra outputs" of the # _rnn_param_concat node. x, param_concat, *initial_states = input_nodes param_pattern = re.compile(r'(.*rnn_param_concat[0-9]+__)[WRB]$') if not param_pattern.match(param_concat): # ToDo: Maybe do something more sane after Issue #17621 gets resolved raise NotImplementedError( "The order of RNN parameters is different between mxnet and ONNX. " "Currently, an automatic conversion is only possible, if the RNN " "parameters were concatenated using the internal " "_rnn_param_concat operator." ) w, r, b = ( param_pattern.sub(r'\1' + param, param_concat) for param in 'WRB' ) # The second conversion step handles # * parameter shapes, since mxnet uses flattened parameters, while # ONNX requires specific tensor shapes # * gate order, since both frameworks require the weights and biases # of the 4 basic gates (forget, input, cell and output) to be # concatenated, but in different order # ([ifco] for mxnet and [iofc] for ONNX) def fix_rnn_parameter(p, p_shape_in, p_shape_out, p_order=(0, 3, 1, 2)): p_ = p # 1) Reshape flat parameters to their original shape, such that # the gates are concatenated along axis=1 p_reshaped_in = create_helper_reshape_node( p, p_ + "__reshaped_in", p_shape_in, kwargs ) nodes.extend(p_reshaped_in) p = p_reshaped_in[-1].name # 2) Use a Gather node to pick gates along axis=1, permuting them p_reordered = create_helper_gather_node( p, p_ + "__reordered", p_order, kwargs, axis=1 ) nodes.extend(p_reordered) p = p_reordered[-1].name # 3) Reshape the parameters to their final shape, squeezing the gate # and hidden dimensions together p_reshaped_out = create_helper_reshape_node( p, p_ + "__reshaped_out", p_shape_out, kwargs ) nodes.extend(p_reshaped_out) return p_reshaped_out[-1].name w = fix_rnn_parameter( w, p_shape_in=(num_directions, 4, hidden_size, -1), p_shape_out=(num_directions, 4 * hidden_size, -1), ) r = fix_rnn_parameter( r, p_shape_in=(num_directions, 4, hidden_size, hidden_size), p_shape_out=(num_directions, 4 * hidden_size, hidden_size), ) b = fix_rnn_parameter( b, p_shape_in=(2 * num_directions, 4, hidden_size), p_shape_out=(num_directions, 8 * hidden_size), ) # ============================= Inputs/States ============================ input_shape = create_helper_shape_node(x, x + "__shape") nodes.extend(input_shape) input_shape = input_shape[-1].name batch_size = create_helper_gather_node( input_shape, x + "__batch_size", indices=[1], axis=0, kwargs=kwargs, ) nodes.extend(batch_size) batch_size = batch_size[-1].name state_shape = create_helper_build_values_node( [num_directions, batch_size, hidden_size], name + "__state_shape", dtype=np.int64, kwargs=kwargs, ) nodes.extend(state_shape) state_shape = state_shape[-1].name expanded_states = [] for state in initial_states: expanded_state = create_helper_expand_node( state, state + "__expanded", state_shape ) nodes.extend(expanded_state) expanded_states.append(expanded_state[-1].name) initial_states = expanded_states # =========================== RNN node/outputs =========================== y_out = [onnx.helper.make_node( mode, # RNN or LSTM or GRU inputs=[x, w, r, b, '', *initial_states], outputs=[name + '__Y'], name=name + '__Y', **rnn_kwargs )] nodes.extend(y_out) y = y_out[-1].name # We are almost done. The only thing left to do is to convert the output # of the RNN node from the [S, D, B, H] layout, which ONNX returns # to the [S, B, D*H] layout, which mxnet uses # 1) Transpose [S, D, B, H] -> [S, B, D, H] y_perm = (0, 2, 1, 3) y_transposed = create_helper_trans_node( y, y + "__transposed", y_perm ) nodes.extend(y_transposed) y = y_transposed[-1].name # 2) Reshape [S, B, D, H] -> [S, B, D*H] y_shape = (0, 0, -1) y_reshaped = create_helper_reshape_node(y, name, y_shape, kwargs) nodes.extend(y_reshaped) return nodes
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _create_cast(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n map_dict = {\n tensor.float32: TensorProto.FLOAT, # FLOAT to float32\n tensor.int32: TensorProto.INT32, # INT32 to int32\n }\n node.attribute.extend([\n helpe...
[ "0.573704", "0.5696308", "0.5613529", "0.56037074", "0.55654407", "0.5430018", "0.54215723", "0.5411738", "0.540513", "0.5397167", "0.5390512", "0.5343584", "0.53244823", "0.53124344", "0.5312318", "0.5296085", "0.5295705", "0.52775544", "0.5250277", "0.52421296", "0.52008814...
0.6088374
0
Map MXNet's _rnn_param_concat operator attributes to onnx's Concat operator and return the created node.
def convert_rnn_param_concat(node, **kwargs): name, input_nodes, attrs = get_inputs(node, kwargs) axis = int(attrs.get("dim")) # mxnet RNN node and ONNX RNN/LSTM/GRU nodes # use different ways to store their parameters # The conversion between these formats is broken into 2 steps # The first step (performed here in _rnn_param_concat) regroups the # flattened parameters according to the table below. # The second step corrects the shapes and orders of gates and is # performed and described in more detail in the RNN node # mxnet [ONNX] -> ONNX (group) # i2h_weights [W (+ WB)] -> W (input weights) # h2h_weights [R (+ RB)] -> R (recurrence weights) # i2h_biases [Wb (+ WBb)] -> B = [Wb + Rb (+ WBb + RBb)] # h2h_biases [Rb (+ RBb)] -> (biases) split = len(input_nodes) // 2 weights, biases = input_nodes[:split], input_nodes[split:] i2h_weights = weights[::2] h2h_weights = weights[1::2] i2h_biases = biases[::2] h2h_biases = biases[1::2] reordered_biases = [ bias for pair in zip(i2h_biases, h2h_biases) for bias in pair ] # The order of mxnet parameters in the inputs is: # [ # '{}{}_{}_{}'.format(d, l, g, t) # for t in ['weight', 'bias'] # for l in range(num_layers) # for d in ['l', 'r'][:num_directions] # for g in ['i2h', 'h2h'] # ] w = onnx.helper.make_node( "Concat", inputs=i2h_weights, outputs=[name + "__W"], axis=axis, name=name + "__W" ) r = onnx.helper.make_node( "Concat", inputs=h2h_weights, outputs=[name + "__R"], axis=axis, name=name + "__R" ) b = onnx.helper.make_node( "Concat", inputs=reordered_biases, outputs=[name + "__B"], axis=axis, name=name + "__B" ) return [w, r, b]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _create_concat(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('axis', op.axis),\n ])\n return node", "def convert_concat(node, **kwargs):\n name, input_nodes, attrs = get_inputs(node, kwarg...
[ "0.73427945", "0.7049503", "0.67380154", "0.6468759", "0.6443857", "0.60110635", "0.56656677", "0.5663238", "0.5535425", "0.5531253", "0.54857755", "0.5458668", "0.537242", "0.531605", "0.5313958", "0.5275782", "0.5205251", "0.5205095", "0.51890177", "0.5185862", "0.5184087",...
0.7519251
0
Map MXNet's _zeros, _ones and _full operators attributes to onnx's tensors and return the created node.
def convert_full(node, **kwargs): # ToDo: Use Constant or ConstantOfShape, when Issue #15101 is resolved? name, input_nodes, attrs = get_inputs(node, kwargs) del input_nodes # Convert "0"s dimensions to "1"s. This is a workaround for the case, where # mxnet symbols can broadcast "0"s, while ONNX can only broadcast over "1"s shape = convert_string_to_list(attrs["shape"]) shape = tuple(dim if dim else 1 for dim in shape) value = { '_zeros': 0.0, '_ones': 1.0, '_full': eval(attrs.get('value', '0')), }[node['op']] dtype = attrs.get('dtype') data = np.full(shape, value, dtype) return create_helper_tensor_node(data, name, kwargs)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def onnx_extract_operator(node, model, nodes_dict):\n op_type = node.op_type\n input_tensors = []\n output_tensors = []\n\n \"\"\" input_tensors\n each input_tensor has its own soure op, but same dest op\n so both have single string\n \"\"\"\n input_names = []\n # name list\n input_te...
[ "0.68711054", "0.6554576", "0.6425993", "0.6287211", "0.6215032", "0.6175954", "0.6146256", "0.6092229", "0.60895646", "0.60729265", "0.5951745", "0.58593535", "0.5829599", "0.5824289", "0.5799787", "0.5786587", "0.57831514", "0.5759402", "0.5740158", "0.57297534", "0.5698125...
0.5842791
12
Map MXNet's transpose operator attributes to onnx's Transpose operator and return the created node.
def convert_transpose(node, **kwargs): name, input_nodes, attrs = get_inputs(node, kwargs) axes = attrs.get("axes", ()) if axes: axes = tuple(map(int, re.findall(r'\d+', axes))) transpose_node = onnx.helper.make_node( "Transpose", input_nodes, [name], perm=axes, name=name ) else: transpose_node = onnx.helper.make_node( "Transpose", input_nodes, [name], name=name ) return [transpose_node]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _create_transpose(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('perm', op.perm),\n ])\n return node", "def _create_transpose(cls, onnx_node, inputs, opset_version):\n shape = inputs[0...
[ "0.78956044", "0.7164064", "0.69711393", "0.69439864", "0.6919916", "0.6686743", "0.6489473", "0.63730687", "0.6343207", "0.61527866", "0.61493826", "0.6142635", "0.6139026", "0.6132935", "0.60538316", "0.60287285", "0.5990234", "0.59900707", "0.59613836", "0.59398377", "0.59...
0.74619085
1
Map MXNet's LRN operator attributes to onnx's LRN operator and return the created node.
def convert_lrn(node, **kwargs): name, input_nodes, attrs = get_inputs(node, kwargs) alpha = float(attrs.get("alpha", 0.0001)) beta = float(attrs.get("beta", 0.75)) bias = float(attrs.get("knorm", 1.0)) size = int(attrs.get("nsize")) lrn_node = onnx.helper.make_node( "LRN", inputs=input_nodes, outputs=[name], name=name, alpha=alpha, beta=beta, bias=bias, size=size ) return [lrn_node]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def calcLnLFromNode(self, nd):\n #---+----|----+----|----+----|----+----|----+----|----+----|----+----|\n return TreeLikelihoodBase.calcLnLFromNode(self, nd)", "def convert_attributes(cls, attrs):\n if attrs.get_int(\"axis\") != 1:\n raise RuntimeError(\n f\"Unsuppo...
[ "0.5615053", "0.5551711", "0.5397873", "0.5347614", "0.53341866", "0.5257164", "0.5226096", "0.5202186", "0.5176221", "0.5120736", "0.5096025", "0.5051921", "0.50324756", "0.5027518", "0.50227654", "0.49056938", "0.48989987", "0.48988622", "0.4888863", "0.4885868", "0.4884632...
0.6714993
0
Map MXNet's L2Normalization operator attributes to onnx's LpNormalization operator and return the created node.
def convert_l2normalization(node, **kwargs): name, input_nodes, attrs = get_inputs(node, kwargs) mode = attrs.get("mode", "instance") if mode != "channel": raise AttributeError("L2Normalization: ONNX currently supports channel mode only") l2norm_node = onnx.helper.make_node( "LpNormalization", input_nodes, [name], axis=1, # channel only name=name ) return [l2norm_node]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def convert_norm(g, op, block):\n\n x = g.get_node(op.input(\"X\")[0])\n axis = op.attr(\"axis\")\n axis_l = [axis]\n epsilon = op.attr(\"epsilon\")\n out = _op.nn.l2_normalize(x, epsilon, axis_l)\n g.add_node(op.output(\"Out\")[0], out)", "def convert_norm(node, **kwargs):\n name, input_nod...
[ "0.59415656", "0.5882721", "0.58236057", "0.5708504", "0.5625045", "0.54728884", "0.54538465", "0.53243077", "0.5301751", "0.52801716", "0.5251732", "0.51723593", "0.5098865", "0.50875914", "0.5062995", "0.5036322", "0.5006597", "0.4991197", "0.49699366", "0.49565876", "0.491...
0.71821564
0
Map MXNet's Dropout operator attributes to onnx's Dropout operator and return the created node.
def convert_dropout(node, **kwargs): name, input_nodes, attrs = get_inputs(node, kwargs) probability = float(attrs.get("p", 0.5)) probability = np.array(probability, dtype=np.float32) training_mode = False training_mode = np.array(training_mode, dtype=np.bool) node_ratio = create_helper_tensor_node(probability, name + '_ratio', kwargs) node_ratio = create_helper_tensor_node(training_mode, name + '_mode', kwargs) dropout_node = onnx.helper.make_node( "Dropout", [input_nodes[0], name + '_ratio', name + '_mode'], [name], name=name ) return [dropout_node]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _create_dropout(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('ratio', op.ratio),\n ])\n return node", "def _create_dropout(cls, onnx_node, inputs, opset_version):\n ratio = onnx_node....
[ "0.7353928", "0.65540075", "0.6395461", "0.6285999", "0.5709964", "0.55830383", "0.55629826", "0.55001", "0.5478698", "0.5474093", "0.546457", "0.53771776", "0.53521633", "0.5260273", "0.5242218", "0.51851434", "0.5174336", "0.5164886", "0.5147427", "0.51048636", "0.5103446",...
0.7049092
1
Map MXNet's Flatten operator attributes to onnx's Flatten operator and return the created node.
def convert_flatten(node, **kwargs): return create_basic_op_node('Flatten', node, kwargs)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _create_flatten(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('axis', op.axis),\n ])\n return node", "def _create_flatten(cls, onnx_node, inputs, opset_version):\n factor = onnx_node.g...
[ "0.70053905", "0.6535054", "0.6155151", "0.61142373", "0.6006348", "0.5795786", "0.5709457", "0.56205976", "0.5437379", "0.54225713", "0.5404203", "0.5383438", "0.5313076", "0.5282197", "0.5249121", "0.5248736", "0.522337", "0.5183563", "0.51568246", "0.51036185", "0.51030135...
0.68925685
1
Map MXNet's Clip operator attributes to onnx's Clip operator and return the created node.
def convert_clip(node, **kwargs): name, input_nodes, attrs = get_inputs(node, kwargs) a_min = np.float(attrs.get('a_min', -np.inf)) a_max = np.float(attrs.get('a_max', np.inf)) clip_node = onnx.helper.make_node( "Clip", input_nodes, [name], name=name, min=a_min, max=a_max ) return [clip_node]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _create_clip(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n if op.min is not None:\n node.input.append(op.name + \":min\")\n else:\n node.input.append(\"\")\n if op.max is not None:\n node.input.append(op.name + \":max\")\...
[ "0.64989394", "0.5577588", "0.5251773", "0.5233911", "0.50637215", "0.50445", "0.5005481", "0.4997189", "0.48964328", "0.4827775", "0.48206207", "0.48055732", "0.47918075", "0.47716203", "0.47590274", "0.47138596", "0.46755806", "0.4661917", "0.4658839", "0.4649198", "0.46434...
0.6301358
1
Helper function for scalar arithmetic operations
def scalar_op_helper(node, op_name, **kwargs): name, input_nodes, attrs = get_inputs(node, kwargs) from onnx import numpy_helper input_type = kwargs["in_type"] scalar_value = np.array([attrs.get("scalar", 1)], dtype=onnx.mapping.TENSOR_TYPE_TO_NP_TYPE[input_type]) initializer = kwargs["initializer"] flag = True # If the input value is in initializer, just multiply with scalar input # and create a new initializer for i in initializer: if i.name == input_nodes[0]: if op_name == 'Mul': new_initializer = numpy_helper.to_array(i) * scalar_value[0] elif op_name == 'Sub': if name.startswith("_rminusscalar"): new_initializer = scalar_value[0] - numpy_helper.to_array(i) else: new_initializer = numpy_helper.to_array(i) - scalar_value[0] elif op_name == 'Add': new_initializer = numpy_helper.to_array(i) + scalar_value[0] elif op_name == 'Div': if name.startswith("_rdivscalar"): new_initializer = scalar_value[0] / numpy_helper.to_array(i) else: new_initializer = numpy_helper.to_array(i) / scalar_value[0] elif op_name == 'Pow': new_initializer = numpy_helper.to_array(i) ** scalar_value[0] flag = False break # else create a new tensor of the scalar value, add it in initializer if flag is True: dims = np.shape(scalar_value) scalar_op_name = "scalar_op" + str(kwargs["idx"]) tensor_node = onnx.helper.make_tensor_value_info(scalar_op_name, input_type, dims) initializer.append( onnx.helper.make_tensor( name=scalar_op_name, data_type=input_type, dims=dims, vals=scalar_value, raw=False, ) ) mul_node = onnx.helper.make_node( op_name, [input_nodes[0], scalar_op_name], [name], name=name ) return [tensor_node, mul_node] else: data_type = onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[new_initializer.dtype] dims = np.shape(new_initializer) new_a_node = input_nodes[0] + str(kwargs["idx"]) tensor_node = onnx.helper.make_tensor_value_info(new_a_node, data_type, dims) initializer.append( onnx.helper.make_tensor( name=new_a_node, data_type=data_type, dims=dims, vals=new_initializer, raw=False, ) ) return [tensor_node]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __rmul__(self, _scalar):\n\t\treturn self * _scalar", "def scalar_function(x, y):\n if x <= y:\n return x*y\n else:\n return x/y", "def convert_add_scalar(node, **kwargs):\n return scalar_op_helper(node, 'Add', **kwargs)", "def __truediv__(self, scalar):\n return self.div(sc...
[ "0.6927999", "0.683415", "0.6819326", "0.6775894", "0.67277527", "0.6601178", "0.6585634", "0.6537345", "0.6529052", "0.65214366", "0.65030617", "0.64891243", "0.64875793", "0.6379192", "0.63672125", "0.6356587", "0.63362074", "0.63251966", "0.63225913", "0.62615013", "0.6252...
0.6782312
3
Map MXNet's _mul_scalar operator attributes to onnx's Mul operator. Creates a new node for the input scalar value, adds it to the initializer and return multiple created nodes.
def convert_mul_scalar(node, **kwargs): return scalar_op_helper(node, 'Mul', **kwargs)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def convert_elemwise_mul(node, **kwargs):\n return create_basic_op_node('Mul', node, kwargs)", "def convert_broadcast_mul(node, **kwargs):\n return create_basic_op_node('Mul', node, kwargs)", "def tree_add_scalar_mul(tree_x, scalar, tree_y):\n return tree_multimap(lambda x, y: x + scalar * y, tree_x, tr...
[ "0.79224896", "0.7241136", "0.6973371", "0.6796328", "0.6784119", "0.6776615", "0.67680424", "0.6747651", "0.67148656", "0.6702787", "0.66282004", "0.6625424", "0.6572571", "0.65414375", "0.652442", "0.6481082", "0.63871235", "0.63544697", "0.63508964", "0.6326876", "0.632687...
0.8387702
0
Map MXNet's _minus_scalar operator attributes to onnx's Minus operator. Creates a new node for the input scalar value, adds it to the initializer and return multiple created nodes.
def convert_minus_scalar(node, **kwargs): return scalar_op_helper(node, 'Sub', **kwargs)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def convert_rminus_scalar(node, **kwargs):\n return scalar_op_helper(node, 'Sub', **kwargs)", "def convert_minimum(node, **kwargs):\n return create_basic_op_node('Min', node, kwargs)", "def _create_clip(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n if op.min is ...
[ "0.6305766", "0.6050129", "0.60270244", "0.6017436", "0.597134", "0.58471966", "0.56328523", "0.5549238", "0.553047", "0.5523445", "0.55042356", "0.54917306", "0.5459456", "0.5451665", "0.5436429", "0.5424267", "0.53898084", "0.5378877", "0.53705645", "0.5368082", "0.53500354...
0.6904257
0
Map MXNet's _rminus_scalar operator attributes to onnx's Sub operator. Creates a new node for the input scalar value, adds it to the initializer and return multiple created nodes.
def convert_rminus_scalar(node, **kwargs): return scalar_op_helper(node, 'Sub', **kwargs)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def convert_minus_scalar(node, **kwargs):\n return scalar_op_helper(node, 'Sub', **kwargs)", "def convert_elementwise_sub(node, **kwargs):\n return create_basic_op_node('Sub', node, kwargs)", "def __sub__(self, other):\r\n if isinstance(other, Node):\r\n new_node = sub_op(self, other)\r...
[ "0.79085666", "0.68994915", "0.6750096", "0.65244937", "0.61306584", "0.61295164", "0.61034423", "0.6103109", "0.6042899", "0.6030476", "0.6012048", "0.60119057", "0.5996009", "0.5993797", "0.5971871", "0.59533864", "0.5938247", "0.588912", "0.58856267", "0.57691115", "0.5764...
0.8166036
0
Map MXNet's _plus_scalar operator attributes to onnx's Add operator. Creates a new node for the input scalar value, adds it to the initializer and return multiple created nodes.
def convert_add_scalar(node, **kwargs): return scalar_op_helper(node, 'Add', **kwargs)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def convert_elementwise_add(node, **kwargs):\n return create_basic_op_node('Add', node, kwargs)", "def convert_addn(node, **kwargs):\n return create_basic_op_node('Sum', node, kwargs)", "def covert_broadcast_add(node, **kwargs):\n return create_basic_op_node('Add', node, kwargs)", "def __add__(self,...
[ "0.767562", "0.72351784", "0.69635", "0.6916363", "0.69029146", "0.6712746", "0.65971774", "0.6551346", "0.6480433", "0.6455669", "0.64297163", "0.64250094", "0.64143014", "0.63674563", "0.6350933", "0.63499177", "0.63171613", "0.6313307", "0.62899745", "0.6258752", "0.625835...
0.80610985
0
Map MXNet's _div_scalar operator attributes to onnx's Div operator. Creates a new node for the input scalar value, adds it to the initializer and return multiple created nodes.
def convert_div_scalar(node, **kwargs): return scalar_op_helper(node, 'Div', **kwargs)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def convert_elemwise_div(node, **kwargs):\n return create_basic_op_node('Div', node, kwargs)", "def convert_rdiv_scalar(node, **kwargs):\n return scalar_op_helper(node, 'Div', **kwargs)", "def convert_broadcast_div(node, **kwargs):\n return create_basic_op_node('Div', node, kwargs)", "def __div__(se...
[ "0.7483501", "0.69537693", "0.6816418", "0.6603985", "0.6457433", "0.63764226", "0.635181", "0.6023572", "0.5944247", "0.59403896", "0.5891626", "0.5882687", "0.5876528", "0.58643323", "0.5856814", "0.5833514", "0.5809807", "0.5758597", "0.55864036", "0.5551182", "0.5471876",...
0.8018971
0
Map MXNet's _rdiv_scalar operator attributes to onnx's Div operator. Creates a new node for the input scalar value, adds it to the initializer and return multiple created nodes.
def convert_rdiv_scalar(node, **kwargs): return scalar_op_helper(node, 'Div', **kwargs)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def convert_div_scalar(node, **kwargs):\n return scalar_op_helper(node, 'Div', **kwargs)", "def convert_elemwise_div(node, **kwargs):\n return create_basic_op_node('Div', node, kwargs)", "def __rdiv__(self, _scalar):\n\t\treturn self / _scalar", "def convert_broadcast_div(node, **kwargs):\n return c...
[ "0.7794921", "0.73854715", "0.6798597", "0.67867917", "0.6434399", "0.63413435", "0.6173485", "0.6116096", "0.6076342", "0.5998175", "0.5978865", "0.592204", "0.59028167", "0.5873986", "0.587386", "0.57993555", "0.575969", "0.5733526", "0.5695951", "0.56446165", "0.5617565", ...
0.82317907
0
Map MXNet's _pow_scalar operator attributes to onnx's Pow operator. Creates a new node for the input scalar value, adds it to the initializer and return multiple created nodes.
def convert_pow_scalar(node, **kwargs): return scalar_op_helper(node, 'Pow', **kwargs)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def convert_power(node, **kwargs):\n return create_basic_op_node('Pow', node, kwargs)", "def convert_pow(g, op, block):\n\n x = g.get_node(op.input(\"X\")[0])\n dtype = block.var(op.output(\"Out\")[0]).dtype\n dtype = _convert_dtype_value(dtype)\n factor = op.attr(\"factor\")\n factor = _expr.c...
[ "0.73938245", "0.71553993", "0.69053197", "0.6618425", "0.6509602", "0.64128435", "0.6384842", "0.6307531", "0.62721306", "0.6149648", "0.6126736", "0.60709953", "0.60470843", "0.5954794", "0.59512776", "0.59302795", "0.5929934", "0.58751506", "0.58481187", "0.58359647", "0.5...
0.821569
0
Map MXNet's argmax operator attributes to onnx's ArgMax operator and return the created node.
def convert_argmax(node, **kwargs): name, input_nodes, attrs = get_inputs(node, kwargs) axis = int(attrs.get("axis")) keepdims = get_boolean_attribute_value(attrs, "keepdims") node = onnx.helper.make_node( 'ArgMax', inputs=input_nodes, axis=axis, keepdims=keepdims, outputs=[name], name=name ) return [node]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def convert_maximum(node, **kwargs):\n return create_basic_op_node('Max', node, kwargs)", "def convert_max(node, **kwargs):\n name, input_nodes, attrs = get_inputs(node, kwargs)\n\n mx_axis = attrs.get(\"axis\", None)\n axes = convert_string_to_list(str(mx_axis)) if mx_axis is not None else None\n\n ...
[ "0.70940566", "0.6874972", "0.61902833", "0.61549073", "0.61046344", "0.60914123", "0.6080087", "0.5927143", "0.5927143", "0.5880221", "0.58306336", "0.57700604", "0.57442945", "0.57169616", "0.56744826", "0.5666791", "0.56665546", "0.5664926", "0.5634875", "0.5621766", "0.55...
0.7944788
0
Map MXNet's argmin operator attributes to onnx's ArgMin operator and return the created node.
def convert_argmin(node, **kwargs): name, input_nodes, attrs = get_inputs(node, kwargs) axis = int(attrs.get("axis")) keepdims = get_boolean_attribute_value(attrs, "keepdims") node = onnx.helper.make_node( 'ArgMin', inputs=input_nodes, axis=axis, keepdims=keepdims, outputs=[name], name=name ) return [node]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def convert_minimum(node, **kwargs):\n return create_basic_op_node('Min', node, kwargs)", "def convert_min(node, **kwargs):\n name, input_nodes, attrs = get_inputs(node, kwargs)\n\n mx_axis = attrs.get(\"axis\", None)\n axes = convert_string_to_list(str(mx_axis)) if mx_axis is not None else None\n\n ...
[ "0.7161309", "0.7048762", "0.653032", "0.6511046", "0.61759573", "0.6156072", "0.61389035", "0.60431165", "0.60163444", "0.5919297", "0.58403546", "0.5791767", "0.57676524", "0.5744902", "0.5734494", "0.57265365", "0.5693712", "0.56826967", "0.5673407", "0.5659327", "0.565932...
0.7900277
0
Map MXNet's _maximum operator attributes to onnx's Max operator and return the created node.
def convert_maximum(node, **kwargs): return create_basic_op_node('Max', node, kwargs)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def convert_max(node, **kwargs):\n name, input_nodes, attrs = get_inputs(node, kwargs)\n\n mx_axis = attrs.get(\"axis\", None)\n axes = convert_string_to_list(str(mx_axis)) if mx_axis is not None else None\n\n keepdims = get_boolean_attribute_value(attrs, \"keepdims\")\n\n if axes is not None:\n ...
[ "0.70796645", "0.6651025", "0.64678437", "0.6413021", "0.6399445", "0.63507855", "0.63063204", "0.6268924", "0.624761", "0.62005275", "0.6182359", "0.6182359", "0.6120072", "0.6118509", "0.61165994", "0.6114771", "0.60751885", "0.6062354", "0.60444164", "0.60346764", "0.60116...
0.79425055
0
Map MXNet's _minimum operator attributes to onnx's Min operator and return the created node.
def convert_minimum(node, **kwargs): return create_basic_op_node('Min', node, kwargs)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def convert_min(node, **kwargs):\n name, input_nodes, attrs = get_inputs(node, kwargs)\n\n mx_axis = attrs.get(\"axis\", None)\n axes = convert_string_to_list(str(mx_axis)) if mx_axis is not None else None\n\n keepdims = get_boolean_attribute_value(attrs, \"keepdims\")\n\n if axes is not None:\n ...
[ "0.7165784", "0.66267216", "0.65062666", "0.6468117", "0.6441443", "0.643317", "0.63803333", "0.6362957", "0.6308952", "0.628914", "0.628914", "0.6285821", "0.62643987", "0.6242156", "0.62365615", "0.6184856", "0.618154", "0.61484843", "0.6128366", "0.61151135", "0.61137015",...
0.79770553
0
Map MXNet's min operator attributes to onnx's ReduceMin operator and return the created node.
def convert_min(node, **kwargs): name, input_nodes, attrs = get_inputs(node, kwargs) mx_axis = attrs.get("axis", None) axes = convert_string_to_list(str(mx_axis)) if mx_axis is not None else None keepdims = get_boolean_attribute_value(attrs, "keepdims") if axes is not None: node = onnx.helper.make_node( 'ReduceMin', inputs=input_nodes, outputs=[name], axes=axes, keepdims=keepdims, name=name ) return [node] else: node = onnx.helper.make_node( 'ReduceMin', inputs=input_nodes, outputs=[name], keepdims=keepdims, name=name ) return [node]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def convert_minimum(node, **kwargs):\n return create_basic_op_node('Min', node, kwargs)", "def min(x, reduce_instance_dims=True, name=None): # pylint: disable=redefined-builtin\n return _numeric_combine(x, np.min, reduce_instance_dims, name)", "def produce_min(self, meta, raven_vars, dispatch, t):\n if...
[ "0.74088675", "0.6358284", "0.61328006", "0.61307657", "0.5984475", "0.5984475", "0.59366184", "0.59366184", "0.59366184", "0.586159", "0.58153784", "0.58141047", "0.5771208", "0.5747477", "0.5725165", "0.57177365", "0.56871665", "0.5682544", "0.5676312", "0.5637121", "0.5634...
0.76065016
0
Map MXNet's max operator attributes to onnx's ReduceMax operator and return the created node.
def convert_max(node, **kwargs): name, input_nodes, attrs = get_inputs(node, kwargs) mx_axis = attrs.get("axis", None) axes = convert_string_to_list(str(mx_axis)) if mx_axis is not None else None keepdims = get_boolean_attribute_value(attrs, "keepdims") if axes is not None: node = onnx.helper.make_node( 'ReduceMax', inputs=input_nodes, outputs=[name], axes=axes, keepdims=keepdims, name=name ) return [node] else: node = onnx.helper.make_node( 'ReduceMax', inputs=input_nodes, outputs=[name], keepdims=keepdims, name=name ) return [node]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def convert_maximum(node, **kwargs):\n return create_basic_op_node('Max', node, kwargs)", "def max(x, reduce_instance_dims=True, name=None): # pylint: disable=redefined-builtin\n return _numeric_combine(x, np.max, reduce_instance_dims, name)", "def convert_argmax(node, **kwargs):\n name, input_nodes, a...
[ "0.7659286", "0.6377665", "0.6145082", "0.6139816", "0.6064376", "0.6055874", "0.6041896", "0.6037136", "0.5989364", "0.5909807", "0.5900593", "0.5855207", "0.58143365", "0.5796772", "0.57937384", "0.57896626", "0.5788211", "0.578123", "0.5748129", "0.57333666", "0.57123965",...
0.7726849
0
Map MXNet's mean operator attributes to onnx's ReduceMean operator and return the created node.
def convert_mean(node, **kwargs): name, input_nodes, attrs = get_inputs(node, kwargs) mx_axis = attrs.get("axis", None) axes = convert_string_to_list(str(mx_axis)) if mx_axis is not None else None keepdims = get_boolean_attribute_value(attrs, "keepdims") if axes is not None: node = onnx.helper.make_node( 'ReduceMean', inputs=input_nodes, outputs=[name], axes=axes, keepdims=keepdims, name=name ) return [node] else: node = onnx.helper.make_node( 'ReduceMean', inputs=input_nodes, outputs=[name], keepdims=keepdims, name=name ) return [node]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_reduce_mean_02():\n\n class ReduceMeanTester(Base):\n def create_onnx(self) -> onnx.ModelProto:\n node = make_node(\"ReduceMean\", inputs=[\"v0\"], outputs=[\"v1\"], axes=[1, 2])\n inputs = [info(\"v0\", TensorProto.FLOAT, (1, 3, 4, 5))]\n outputs = [info(\"v1\",...
[ "0.6283045", "0.6170426", "0.61373425", "0.58561695", "0.5818263", "0.5725782", "0.5599807", "0.5562414", "0.5543096", "0.5514586", "0.5514586", "0.5514586", "0.5514586", "0.5514586", "0.54903483", "0.5438709", "0.5438554", "0.53819174", "0.53819174", "0.5381861", "0.5380131"...
0.7579308
0
Map MXNet's prod operator attributes to onnx's ReduceProd operator and return the created node.
def convert_prod(node, **kwargs): name, input_nodes, attrs = get_inputs(node, kwargs) mx_axis = attrs.get("axis", None) axes = convert_string_to_list(str(mx_axis)) if mx_axis is not None else None keepdims = get_boolean_attribute_value(attrs, "keepdims") if axes is not None: node = onnx.helper.make_node( 'ReduceProd', inputs=input_nodes, outputs=[name], axes=axes, keepdims=keepdims, name=name ) return [node] else: node = onnx.helper.make_node( 'ReduceProd', inputs=input_nodes, outputs=[name], keepdims=keepdims, name=name ) return [node]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _create_reduceOp(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('axes', list(op.axes)),\n helper.make_attribute('keepdims', op.keepdims),\n ])\n return node", "def _create_reduceOp(...
[ "0.62867284", "0.5835511", "0.57357043", "0.5722716", "0.5617108", "0.5607751", "0.55567515", "0.5480656", "0.54691935", "0.543317", "0.54207885", "0.5408181", "0.53855884", "0.52865595", "0.5275297", "0.5250045", "0.5214458", "0.52013505", "0.5191116", "0.51694757", "0.51609...
0.75327003
0
Map MXNet's elemwise_add operator attributes to onnx's Add operator and return the created node.
def convert_elementwise_add(node, **kwargs): return create_basic_op_node('Add', node, kwargs)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def covert_broadcast_add(node, **kwargs):\n return create_basic_op_node('Add', node, kwargs)", "def __add__(self, other):\r\n if isinstance(other, Node):\r\n new_node = add_op(self, other)\r\n else:\r\n # Add by a constant stores the constant in the new node's const_attr fi...
[ "0.7189428", "0.655496", "0.6537227", "0.6472508", "0.6449052", "0.64310604", "0.6380455", "0.62438035", "0.6235679", "0.6205862", "0.6189562", "0.6052324", "0.6052324", "0.60426134", "0.60199106", "0.60036284", "0.5988608", "0.59885174", "0.5946734", "0.5942936", "0.5912339"...
0.79225135
0
Map MXNet's broadcast_add operator attributes to onnx's Add operator and return the created node.
def covert_broadcast_add(node, **kwargs): return create_basic_op_node('Add', node, kwargs)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def convert_elementwise_add(node, **kwargs):\n return create_basic_op_node('Add', node, kwargs)", "def _add_node(self, node_entry, idx):\n if node_entry[\"op\"].name not in relay_to_onnx_op_mapping:\n raise NotImplementedError(\n f\"Currently the operator '{node_entry['op'].na...
[ "0.6780478", "0.63417214", "0.60001606", "0.58787817", "0.5827999", "0.5784491", "0.5780997", "0.5736768", "0.56432146", "0.5639515", "0.5625344", "0.56026065", "0.5514841", "0.5495319", "0.5493361", "0.54555446", "0.54506385", "0.544259", "0.543173", "0.5420061", "0.541649",...
0.8338817
0
Map MXNet's elemwise_sub operator attributes to onnx's Sub operator and return the created node.
def convert_elementwise_sub(node, **kwargs): return create_basic_op_node('Sub', node, kwargs)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def covert_broadcast_sub(node, **kwargs):\n return create_basic_op_node('Sub', node, kwargs)", "def __sub__(self, other):\r\n if isinstance(other, Node):\r\n new_node = sub_op(self, other)\r\n else:\r\n # Add by a constant stores the constant in the new node's const_attr fi...
[ "0.7141842", "0.68545747", "0.615163", "0.6033808", "0.5878628", "0.58357406", "0.5734108", "0.5729102", "0.56466043", "0.56212085", "0.55548865", "0.5548119", "0.54640967", "0.5426134", "0.5379119", "0.5364434", "0.53603375", "0.53511447", "0.5314139", "0.53106934", "0.52824...
0.78317475
0
Map MXNet's broadcast_sub operator attributes to onnx's Sub operator and return the created node.
def covert_broadcast_sub(node, **kwargs): return create_basic_op_node('Sub', node, kwargs)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def convert_elementwise_sub(node, **kwargs):\n return create_basic_op_node('Sub', node, kwargs)", "def __sub__(self, other):\r\n if isinstance(other, Node):\r\n new_node = sub_op(self, other)\r\n else:\r\n # Add by a constant stores the constant in the new node's const_attr...
[ "0.67469776", "0.58889604", "0.5360961", "0.52912056", "0.52282476", "0.5209201", "0.51015747", "0.5097751", "0.5091051", "0.5062901", "0.5049066", "0.5000981", "0.4991342", "0.49907017", "0.49763283", "0.49502683", "0.4915788", "0.4915497", "0.49108106", "0.48997536", "0.486...
0.789098
0
Map MXNet's elemwise_mul operator attributes to onnx's Mul operator and return the created node.
def convert_elemwise_mul(node, **kwargs): return create_basic_op_node('Mul', node, kwargs)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def convert_broadcast_mul(node, **kwargs):\n return create_basic_op_node('Mul', node, kwargs)", "def convert_mul_scalar(node, **kwargs):\n return scalar_op_helper(node, 'Mul', **kwargs)", "def __mul__(self, other):\n\n return self._binary_elementwise_op(other, np.multiply)", "def __mul__(self,th...
[ "0.7594407", "0.71869403", "0.6733486", "0.66129386", "0.6552191", "0.6502875", "0.6452503", "0.6406036", "0.6391522", "0.6358358", "0.6305449", "0.62934154", "0.62835354", "0.62716043", "0.62310976", "0.6220493", "0.6211813", "0.62024266", "0.6190721", "0.6168245", "0.616184...
0.8666627
0
Map MXNet's broadcast_mul operator attributes to onnx's Mul operator and return the created node.
def convert_broadcast_mul(node, **kwargs): return create_basic_op_node('Mul', node, kwargs)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def convert_elemwise_mul(node, **kwargs):\n return create_basic_op_node('Mul', node, kwargs)", "def convert_mul_scalar(node, **kwargs):\n return scalar_op_helper(node, 'Mul', **kwargs)", "def convert_broadcast_power(node, **kwargs):\n return create_basic_op_node('Pow', node, kwargs)", "def __mul__(s...
[ "0.7861241", "0.6676384", "0.6386201", "0.63332176", "0.62079525", "0.6138245", "0.6137143", "0.61355305", "0.6135341", "0.6122188", "0.6114543", "0.60985154", "0.60596746", "0.5996618", "0.5970572", "0.5940501", "0.5925935", "0.5879273", "0.5866603", "0.58360845", "0.5831286...
0.8551312
0
Map MXNet's elemwise_div operator attributes to onnx's Div operator and return the created node.
def convert_elemwise_div(node, **kwargs): return create_basic_op_node('Div', node, kwargs)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def convert_broadcast_div(node, **kwargs):\n return create_basic_op_node('Div', node, kwargs)", "def convert_div_scalar(node, **kwargs):\n return scalar_op_helper(node, 'Div', **kwargs)", "def __div__(self,that):\n return self.__opExpand2(that, np.divide)", "def __div__(self, other, **kwargs):\n ...
[ "0.7340376", "0.6443447", "0.61537325", "0.592873", "0.5718753", "0.5646812", "0.5636024", "0.5619774", "0.5601933", "0.5586622", "0.5423298", "0.54130757", "0.5413029", "0.5365055", "0.52879655", "0.52837497", "0.52705294", "0.5257432", "0.5253376", "0.5204898", "0.5200385",...
0.8170008
0
Map MXNet's broadcast_div operator attributes to onnx's Div operator and return the created node.
def convert_broadcast_div(node, **kwargs): return create_basic_op_node('Div', node, kwargs)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def convert_elemwise_div(node, **kwargs):\n return create_basic_op_node('Div', node, kwargs)", "def convert_div_scalar(node, **kwargs):\n return scalar_op_helper(node, 'Div', **kwargs)", "def __div__(self,that):\n return self.__opExpand2(that, np.divide)", "def divmod_node(self, mpi_procs, omp_threa...
[ "0.7091991", "0.5654483", "0.54506725", "0.5246034", "0.5235014", "0.5234837", "0.5166161", "0.5122407", "0.51005995", "0.5100523", "0.5087641", "0.50812745", "0.5039023", "0.50275385", "0.49929944", "0.49497214", "0.49223348", "0.49145442", "0.48808396", "0.4876805", "0.4875...
0.81881297
0
Map MXNet's negative operator attributes to onnx's Neg operator and return the created node.
def convert_negative(node, **kwargs): return create_basic_op_node('Neg', node, kwargs)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __neg__(self):\n return UnaryMinus(self)", "def __neg__(self) -> ColumnOperators:\n return self.operate(neg)", "def __neg__(self):\n return type(self)(self.parent(), self._simplify(-self._express))", "def __neg__(self):\n data = [[-self[i, j] for j in range(self.n)] for i in r...
[ "0.71537274", "0.69829476", "0.69523317", "0.6824377", "0.67790425", "0.6737146", "0.67330045", "0.67264277", "0.6704793", "0.66476816", "0.65888995", "0.6529035", "0.6525028", "0.6494333", "0.64887315", "0.6466599", "0.6442492", "0.6382106", "0.6371845", "0.6371845", "0.6351...
0.7796946
0
Map MXNet's abs operator attributes to onnx's Abs operator and return the created node.
def convert_abs(node, **kwargs): return create_basic_op_node('Abs', node, kwargs)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def abs(self) -> LinearOperator:\n return self.__class__(self._diag.abs())", "def __abs__(self):\n out = self.copy()\n out.addFunction(Query.Function.Abs)\n return out", "def abs(self):\n\n return self._get(\"abs\", rtype=self.__class__)", "def __init__(self):\n Gina...
[ "0.6505", "0.6213608", "0.6065252", "0.592394", "0.58516395", "0.5665779", "0.5517811", "0.5490736", "0.5476933", "0.54744714", "0.54476523", "0.54003453", "0.5390359", "0.5326909", "0.5319726", "0.5315074", "0.53031343", "0.52716726", "0.5261375", "0.5250854", "0.524636", ...
0.7698864
0
Map MXNet's add_n operator attributes to onnx's Sum operator and return the created node.
def convert_addn(node, **kwargs): return create_basic_op_node('Sum', node, kwargs)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def convert_elementwise_add(node, **kwargs):\n return create_basic_op_node('Add', node, kwargs)", "def covert_broadcast_add(node, **kwargs):\n return create_basic_op_node('Add', node, kwargs)", "def ADD (self, n1, n2):", "def convert_sum(node, **kwargs):\n name, input_nodes, attrs = get_inputs(node,...
[ "0.66162986", "0.65958875", "0.65007085", "0.6439678", "0.6250043", "0.5785631", "0.5775108", "0.57710975", "0.57699406", "0.57460105", "0.5735643", "0.57265383", "0.5694182", "0.5615365", "0.5607123", "0.56009924", "0.5595946", "0.5591358", "0.5585656", "0.5575787", "0.55362...
0.79465455
0
Map MXNet's ceil operator attributes to onnx's Ceil operator and return the created node.
def convert_ceil(node, **kwargs): return create_basic_op_node('Ceil', node, kwargs)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self):\n BuiltinFunction.__init__(self, \"ceil\",\n conversions=dict(maxima='ceiling',\n sympy='ceiling'))", "def __ceil__(self, ???):", "def ceil(raw_tensor):\n dst_dtype = \"int32\"\n\n return cast_...
[ "0.57651013", "0.5506392", "0.5357824", "0.5333224", "0.5305275", "0.52933913", "0.52933913", "0.52933913", "0.52933913", "0.52933913", "0.52933913", "0.52933913", "0.52933913", "0.52933913", "0.52933913", "0.52933913", "0.52933913", "0.52933913", "0.52933913", "0.52933913", ...
0.76702064
0
Map MXNet's floor operator attributes to onnx's Floor operator and return the created node.
def convert_floor(node, **kwargs): return create_basic_op_node('Floor', node, kwargs)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def build_floor(tile):\n rtn = bmesh.new()\n rtn.from_object(bpy.data.objects['FLOOR_CENTER'], bpy.context.scene)\n BmeshFactory.add_floor_corners(rtn, tile)\n rtn.from_object(bpy.data.objects[tile.terrain_type().name], bpy.context.scene)\n BmeshFactory.add_ceiling_center_below(r...
[ "0.5913754", "0.5866849", "0.5621953", "0.5610637", "0.55931014", "0.5583222", "0.54438764", "0.53761", "0.53067064", "0.5256313", "0.5121537", "0.50940347", "0.5093765", "0.5074465", "0.50180596", "0.50060546", "0.49971378", "0.49704736", "0.49595678", "0.49277905", "0.49110...
0.76645154
0
Map MXNet's Reshape operator attributes to onnx's Reshape operator. Converts output shape attribute to output shape tensor and return multiple created nodes.
def convert_reshape(node, **kwargs): name, input_nodes, attrs = get_inputs(node, kwargs) output_shape_list = convert_string_to_list(attrs["shape"]) initializer = kwargs["initializer"] output_shape_np = np.array(output_shape_list, dtype='int64') data_type = onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[output_shape_np.dtype] dims = np.shape(output_shape_np) output_shape_name = "reshape_attr_tensor" + str(kwargs["idx"]) tensor_node = onnx.helper.make_tensor_value_info(output_shape_name, data_type, dims) initializer.append( onnx.helper.make_tensor( name=output_shape_name, data_type=data_type, dims=dims, vals=output_shape_list, raw=False, ) ) input_nodes.append(output_shape_name) not_supported_shape = [-2, -3, -4] for val in output_shape_list: if val in not_supported_shape: raise AttributeError("Reshape: Shape value not supported in ONNX", val) reshape_node = onnx.helper.make_node( "Reshape", input_nodes, [name], name=name ) return [tensor_node, reshape_node]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_helper_reshape_node(input_name, output_name, shape, kwargs):\n shape_tensor_node, = create_helper_tensor_node(\n np.asarray(shape, dtype=np.int64), output_name + \"__shape\", kwargs\n )\n reshape_node = onnx.helper.make_node(\n \"Reshape\",\n inputs=[input_name, shape_tenso...
[ "0.7556744", "0.7085336", "0.7027707", "0.67682624", "0.67622006", "0.67400914", "0.65949494", "0.65602094", "0.6389615", "0.6363775", "0.62704414", "0.61320436", "0.6128499", "0.6113789", "0.5977804", "0.5916986", "0.58968294", "0.5860646", "0.58554107", "0.5846493", "0.5837...
0.77601105
0
Map MXNet's Cast operator attributes to onnx's Cast operator and return the created node.
def convert_cast(node, **kwargs): name, input_nodes, attrs = get_inputs(node, kwargs) dtype = attrs["dtype"] # dtype can be mapped only with types from TensorProto # float32 is mapped to float and float64 to double in onnx # following tensorproto mapping https://github.com/onnx/onnx/blob/master/onnx/mapping.py if dtype == 'float32': dtype = 'float' elif dtype == 'float64': dtype = 'double' node = onnx.helper.make_node( "Cast", input_nodes, [name], to=getattr(onnx.TensorProto, dtype.upper()), name=name, ) return [node]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _create_cast(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n map_dict = {\n tensor.float32: TensorProto.FLOAT, # FLOAT to float32\n tensor.int32: TensorProto.INT32, # INT32 to int32\n }\n node.attribute.extend([\n helpe...
[ "0.72991306", "0.70823056", "0.6469727", "0.6260471", "0.58524877", "0.56869894", "0.5614678", "0.5614463", "0.5590272", "0.5541235", "0.5510816", "0.5470201", "0.54070497", "0.5351177", "0.53237814", "0.5306725", "0.5301981", "0.5294145", "0.528131", "0.5262491", "0.5260314"...
0.67715365
2
Map MXNet's slice_axis operator attributes to onnx's Slice operator and return the created node.
def convert_slice_axis(node, **kwargs): name, input_nodes, input_shapes, attrs = get_inputs(node, kwargs, with_shapes=True) axes = int(attrs.get("axis")) starts = int(attrs.get("begin")) ends = attrs.get("end", None) if not ends or ends == 'None': # ONNX doesn't support None for ends. Since ends=None depicts # length of dimension, passing dimension in this case. in_shape = input_shapes[0] ends = in_shape[axes] export_nodes = [] starts = np.atleast_1d(np.asarray(starts, dtype=np.int)) ends = np.atleast_1d(np.asarray(ends, dtype=np.int)) axes = np.atleast_1d(np.asarray(axes, dtype=np.int)) starts_node = create_helper_tensor_node(starts, name + '__starts', kwargs) export_nodes.extend(starts_node) starts_node = starts_node[-1].name ends_node = create_helper_tensor_node(ends, name + '__ends', kwargs) export_nodes.extend(ends_node) ends_node = ends_node[-1].name axes_node = create_helper_tensor_node(axes, name + '__axes', kwargs) export_nodes.extend(axes_node) axes_node = axes_node[-1].name input_node = input_nodes[0] node = onnx.helper.make_node( "Slice", [input_node, starts_node, ends_node, axes_node], [name], name=name, ) export_nodes.extend([node]) return export_nodes
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _create_slice(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n for attr in ['starts', 'ends', 'axes', 'steps']:\n node.input.append(op.name + \":\" + attr)\n return node", "def _fix_slice(self, inputs, new_attr):\n begin = new_attr.get('begin')...
[ "0.71705914", "0.68219244", "0.6615894", "0.6356295", "0.6166811", "0.6157213", "0.61249214", "0.5888455", "0.58519363", "0.58514374", "0.57256836", "0.57235664", "0.5691767", "0.5674897", "0.5673684", "0.5644206", "0.56324863", "0.5606119", "0.5604062", "0.56020904", "0.5581...
0.7049499
1
Map MXNet's SliceChannel operator attributes to onnx's Squeeze or Split operator based on squeeze_axis attribute and return the created node.
def convert_slice_channel(node, **kwargs): name, input_nodes, input_shapes, attrs = get_inputs(node, kwargs, with_shapes=True) num_outputs = int(attrs.get("num_outputs")) axis = int(attrs.get("axis", 1)) squeeze_axis = int(attrs.get("squeeze_axis", 0)) if squeeze_axis == 1 and num_outputs == 1: node = onnx.helper.make_node( "Squeeze", input_nodes, [name], axes=[axis], name=name, ) return [node] elif squeeze_axis == 0 and num_outputs > 1: in_shape = input_shapes[0] split = in_shape[axis] // num_outputs node = onnx.helper.make_node( "Split", input_nodes, [name+'_output'+str(i) for i in range(num_outputs)], axis=axis, split=[split for _ in range(num_outputs)], name=name, ) return [node] else: raise NotImplementedError("SliceChannel operator with num_outputs>1 and" "squeeze_axis true is not implemented.")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _create_squeeze(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('axes', list(op.axis)),\n ])\n return node", "def _create_squeeze(cls, onnx_node, inputs, opset_version):\n axes = onnx_no...
[ "0.6746628", "0.6559421", "0.61696434", "0.61142856", "0.5958457", "0.5643979", "0.5640216", "0.54976195", "0.54508215", "0.54356575", "0.53339934", "0.53273505", "0.51586306", "0.5094175", "0.5021248", "0.50043905", "0.49964705", "0.49707195", "0.4929398", "0.4861826", "0.48...
0.669003
1
Map MXNet's expand_dims operator attributes to onnx's Unsqueeze operator and return the created node.
def convert_expand_dims(node, **kwargs): name, input_nodes, attrs = get_inputs(node, kwargs) axis = int(attrs.get("axis")) node = onnx.helper.make_node( "Unsqueeze", input_nodes, [name], axes=[axis], name=name, ) return [node]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _create_squeeze(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('axes', list(op.axis)),\n ])\n return node", "def _create_squeeze(cls, onnx_node, inputs, opset_version):\n axes = onnx_no...
[ "0.7386183", "0.6739398", "0.6726751", "0.637353", "0.62995607", "0.60692775", "0.60286963", "0.60238683", "0.59741277", "0.59160703", "0.5735533", "0.5717082", "0.5696729", "0.5589094", "0.55591923", "0.5529139", "0.55204725", "0.5500257", "0.54655224", "0.54617375", "0.5455...
0.716333
1
Map MXNet's squeeze operator attributes to onnx's squeeze operator and return the created node.
def convert_squeeze(node, **kwargs): name, input_nodes, attrs = get_inputs(node, kwargs) axis = attrs.get("axis", None) if not axis: raise AttributeError("Squeeze: Missing axis attribute: ONNX currently requires axis to " "be specified for squeeze operator") axis = convert_string_to_list(axis) node = onnx.helper.make_node( "Squeeze", input_nodes, [name], axes=axis, name=name, ) return [node]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _create_squeeze(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('axes', list(op.axis)),\n ])\n return node", "def _create_squeeze(cls, onnx_node, inputs, opset_version):\n axes = onnx_no...
[ "0.7598868", "0.717895", "0.6282262", "0.60528755", "0.5627854", "0.5592299", "0.5459605", "0.53992504", "0.5209412", "0.52017725", "0.5185037", "0.5168814", "0.5109538", "0.50819665", "0.50502604", "0.5031043", "0.500968", "0.49988806", "0.49876928", "0.49575466", "0.4944436...
0.6714586
2
Map MXNet's log operator attributes to onnx's Log operator and return the created node.
def convert_log(node, **kwargs): return create_basic_op_node('Log', node, kwargs)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def log(self: Float[LinearOperator, \"*batch M N\"]) -> Float[LinearOperator, \"*batch M N\"]:\n return self.__class__(self._diag.log())", "def log(self: Float[LinearOperator, \"*batch M N\"]) -> Float[LinearOperator, \"*batch M N\"]:\n return ConstantDiagLinearOperator(self.diag_values.log(), diag...
[ "0.59120095", "0.5716469", "0.5401583", "0.5346299", "0.532194", "0.5304277", "0.5276738", "0.5258929", "0.51880515", "0.51534116", "0.5130242", "0.5092342", "0.5069547", "0.5066726", "0.505875", "0.5041651", "0.50384235", "0.495094", "0.49467453", "0.49352145", "0.4929605", ...
0.7054104
0
Map MXNet's reciprocal operator attributes to onnx's Reciprocal operator and return the created node.
def convert_reciprocal(node, **kwargs): return create_basic_op_node('Reciprocal', node, kwargs)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __call__(self):\n new_node = Node()\n new_node.op = self\n return new_node", "def __call__(self):\r\n new_node = Op.__call__(self)\r\n return new_node", "def __call__(self):\n new_node = Op.__call__(self)\n return new_node", "def __call__(self):\r\n ...
[ "0.5679397", "0.56778276", "0.56740934", "0.56724566", "0.56111914", "0.5536893", "0.54996645", "0.5413009", "0.5394044", "0.53590983", "0.5352823", "0.53090364", "0.53014636", "0.5288799", "0.525179", "0.52360773", "0.5220722", "0.5163405", "0.5150799", "0.51470494", "0.5146...
0.689847
0
Map MXNet's _power operator attributes to onnx's Pow operator and return the created node.
def convert_power(node, **kwargs): return create_basic_op_node('Pow', node, kwargs)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def convert_broadcast_power(node, **kwargs):\n return create_basic_op_node('Pow', node, kwargs)", "def convert_pow(g, op, block):\n\n x = g.get_node(op.input(\"X\")[0])\n dtype = block.var(op.output(\"Out\")[0]).dtype\n dtype = _convert_dtype_value(dtype)\n factor = op.attr(\"factor\")\n factor...
[ "0.75909674", "0.7083156", "0.66979545", "0.64450526", "0.6441693", "0.64366263", "0.6400053", "0.6366476", "0.63472486", "0.62409025", "0.622891", "0.6202606", "0.60667837", "0.6045029", "0.6029663", "0.6026515", "0.60182786", "0.59912276", "0.5968232", "0.5967135", "0.59407...
0.79412013
0
Map MXNet's _power operator attributes to onnx's Pow operator and return the created node.
def convert_broadcast_power(node, **kwargs): return create_basic_op_node('Pow', node, kwargs)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def convert_power(node, **kwargs):\n return create_basic_op_node('Pow', node, kwargs)", "def convert_pow(g, op, block):\n\n x = g.get_node(op.input(\"X\")[0])\n dtype = block.var(op.output(\"Out\")[0]).dtype\n dtype = _convert_dtype_value(dtype)\n factor = op.attr(\"factor\")\n factor = _expr.c...
[ "0.7941838", "0.7083369", "0.66985244", "0.64452976", "0.64437544", "0.6436942", "0.64016074", "0.63671076", "0.63479775", "0.6240992", "0.62295127", "0.6203682", "0.6067832", "0.6046127", "0.60306716", "0.60275626", "0.6018355", "0.5992741", "0.5969637", "0.5969036", "0.5942...
0.7592172
1
Map MXNet's sqrt operator attributes to onnx's Sqrt operator and return the created node.
def convert_sqrt(node, **kwargs): return create_basic_op_node('Sqrt', node, kwargs)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __call__(self, node_A):\r\n new_node = Op.__call__(self)\r\n new_node.inputs = [node_A]\r\n new_node.name = \"Sqrt(%s)\" % (node_A.name)\r\n return new_node", "def sqrt(self):\n return type(self)(self.parent(),\n self._simplify(self._express.sqrt())...
[ "0.65579957", "0.6451186", "0.6441741", "0.6321632", "0.61004037", "0.5692846", "0.56454605", "0.561679", "0.55741465", "0.5483834", "0.5446249", "0.5332103", "0.527836", "0.52485776", "0.5248409", "0.5226648", "0.5189378", "0.5185982", "0.51702994", "0.5157216", "0.513852", ...
0.7393713
0
Map MXNet's depth_to_space operator attributes to onnx's DepthToSpace operator and return the created node.
def convert_depthtospace(node, **kwargs): name, input_nodes, attrs = get_inputs(node, kwargs) blksize = int(attrs.get("block_size", 0)) node = onnx.helper.make_node( "DepthToSpace", input_nodes, [name], blocksize=blksize, name=name, ) return [node]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def convert_spacetodepth(node, **kwargs):\n name, input_nodes, attrs = get_inputs(node, kwargs)\n\n blksize = int(attrs.get(\"block_size\", 0))\n\n node = onnx.helper.make_node(\n \"SpaceToDepth\",\n input_nodes,\n [name],\n blocksize=blksize,\n name=name,\n )\n re...
[ "0.654902", "0.5397657", "0.5092263", "0.5052654", "0.49498823", "0.49392277", "0.4938975", "0.48952127", "0.48248088", "0.48029906", "0.47768003", "0.4760767", "0.47406405", "0.46349868", "0.4629043", "0.45984888", "0.45853606", "0.45547014", "0.45455354", "0.4535628", "0.45...
0.71486926
0
Map MXNet's space_to_depth operator attributes to onnx's SpaceToDepth operator and return the created node.
def convert_spacetodepth(node, **kwargs): name, input_nodes, attrs = get_inputs(node, kwargs) blksize = int(attrs.get("block_size", 0)) node = onnx.helper.make_node( "SpaceToDepth", input_nodes, [name], blocksize=blksize, name=name, ) return [node]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def convert_depthtospace(node, **kwargs):\n name, input_nodes, attrs = get_inputs(node, kwargs)\n\n blksize = int(attrs.get(\"block_size\", 0))\n\n node = onnx.helper.make_node(\n \"DepthToSpace\",\n input_nodes,\n [name],\n blocksize=blksize,\n name=name,\n )\n re...
[ "0.6864339", "0.5582542", "0.5029747", "0.5017303", "0.49569693", "0.49251297", "0.4901449", "0.4830856", "0.48081103", "0.48042098", "0.480372", "0.4798086", "0.4687717", "0.46674845", "0.4647743", "0.4614791", "0.45717818", "0.455431", "0.45452812", "0.45188713", "0.4509976...
0.6642687
1
Map MXNet's square operator attributes to onnx's Pow operator and return the created node.
def convert_square(node, **kwargs): name, input_nodes, _ = get_inputs(node, kwargs) initializer = kwargs["initializer"] data_type = onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[np.dtype('int64')] power2_name = "square_tensor" + str(kwargs["idx"]) tensor_node = onnx.helper.make_tensor_value_info(power2_name, data_type, (1,)) initializer.append( onnx.helper.make_tensor( name=power2_name, data_type=data_type, dims=(1,), vals=[2], raw=False, ) ) input_nodes.append(power2_name) node = onnx.helper.make_node( "Pow", input_nodes, [name], name=name ) return [tensor_node, node]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def convert_power(node, **kwargs):\n return create_basic_op_node('Pow', node, kwargs)", "def convert_broadcast_power(node, **kwargs):\n return create_basic_op_node('Pow', node, kwargs)", "def convert_elemwise_mul(node, **kwargs):\n return create_basic_op_node('Mul', node, kwargs)", "def convert_pow(...
[ "0.70565623", "0.688015", "0.65715593", "0.6302769", "0.6290786", "0.6263917", "0.624317", "0.5840452", "0.54773146", "0.5454221", "0.5453616", "0.5436294", "0.5433582", "0.5418844", "0.541297", "0.5402865", "0.5398615", "0.53971535", "0.5396661", "0.5394583", "0.53737813", ...
0.6750729
2
Map MXNet's sum operator attributes to onnx's ReduceSum operator and return the created node.
def convert_sum(node, **kwargs): name, input_nodes, attrs = get_inputs(node, kwargs) mx_axis = attrs.get("axis", None) axes = convert_string_to_list(str(mx_axis)) if mx_axis is not None else None keepdims = get_boolean_attribute_value(attrs, "keepdims") if axes: node = onnx.helper.make_node( 'ReduceSum', inputs=input_nodes, outputs=[name], axes=axes, keepdims=keepdims, name=name ) else: node = onnx.helper.make_node( 'ReduceSum', inputs=input_nodes, outputs=[name], keepdims=keepdims, name=name ) return [node]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def convert_addn(node, **kwargs):\n return create_basic_op_node('Sum', node, kwargs)", "def with_sum_sum_reduction(self):\n return self.with_reduction(lambda x: x.sum())", "def sum(x, reduce_instance_dims=True, name=None): # pylint: disable=redefined-builtin\n return _numeric_combine(x, np.sum, red...
[ "0.6900051", "0.6317507", "0.6197959", "0.61500716", "0.60951954", "0.60030466", "0.5988547", "0.5975774", "0.5914605", "0.58477676", "0.5730659", "0.5723871", "0.56718594", "0.5667977", "0.5665367", "0.5665367", "0.5665367", "0.5665367", "0.5665367", "0.56367636", "0.5594987...
0.7541393
0
Map MXNet's shape_array operator attributes to onnx's Shape operator and return the created node.
def convert_shape(node, **kwargs): return create_basic_op_node('Shape', node, kwargs)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def ashape(node):\n shp = node.shape\n assert shp is not None\n return shp", "def create_helper_shape_node(input_name, output_name):\n shape_node = onnx.helper.make_node(\n \"Shape\",\n inputs=[input_name],\n outputs=[output_name],\n name=output_name,\n )\n return [s...
[ "0.6381112", "0.6290396", "0.61286956", "0.60487986", "0.6037103", "0.5871047", "0.57438403", "0.5713804", "0.57093483", "0.5691644", "0.5663834", "0.56613344", "0.5644378", "0.5637776", "0.56361204", "0.56357217", "0.562241", "0.56134427", "0.56105673", "0.55175805", "0.5502...
0.6979719
0
Map MXNet's hard_sigmoid operator attributes to onnx's HardSigmoid operator and return the created node.
def convert_hardsigmoid(node, **kwargs): name, input_nodes, attrs = get_inputs(node, kwargs) # Converting to float32 alpha = float(attrs.get("alpha", 0.2)) beta = float(attrs.get("beta", 0.5)) node = onnx.helper.make_node( 'HardSigmoid', input_nodes, [name], alpha=alpha, beta=beta, name=name ) return [node]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _create_hardsigmoid(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('alpha', op.alpha),\n helper.make_attribute('beta', op.gamma),\n ])\n return node", "def _create_hardsigmoid(cls, ...
[ "0.8193963", "0.7711967", "0.72541773", "0.6859289", "0.67957366", "0.6224166", "0.6205663", "0.62023205", "0.61203086", "0.608539", "0.6069968", "0.6067994", "0.6048303", "0.5977827", "0.59394383", "0.59345216", "0.5905961", "0.584112", "0.57807565", "0.5772983", "0.5710771"...
0.75288194
2
Map MXNet's broadcast_lesser operator attributes to onnx's Less operator and return the created node.
def convert_broadcast_lesser(node, **kwargs): return create_basic_op_node('Less', node, kwargs)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def convert_broadcast_greater(node, **kwargs):\n return create_basic_op_node('Greater', node, kwargs)", "def test_less_than_bcast(self):\n utils.compare_tracing_methods(\n SimpleCompareOpsModule(\"lessThan\"),\n torch.randn(3, 4, 5),\n torch.randn(4, 5),\n fu...
[ "0.5858624", "0.5801406", "0.5520665", "0.5515037", "0.54783213", "0.5373597", "0.5251242", "0.5212264", "0.520232", "0.5198165", "0.5109519", "0.51010454", "0.5100173", "0.5031007", "0.5028119", "0.50162864", "0.49891058", "0.49844187", "0.4968322", "0.49516803", "0.49433753...
0.85602987
0
Map MXNet's broadcast_greater operator attributes to onnx's Greater operator and return the created node.
def convert_broadcast_greater(node, **kwargs): return create_basic_op_node('Greater', node, kwargs)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_greater_than_bcast(self):\n utils.compare_tracing_methods(\n SimpleCompareOpsModule(\"greaterThan\"),\n torch.randn(3, 4, 5),\n torch.randn(4, 5),\n fusible_ops={\"aten::gt\"},\n )", "def greater_than(self) -> global___Expression:", "def greate...
[ "0.636679", "0.6333078", "0.60892975", "0.60536104", "0.5972537", "0.59382397", "0.5819084", "0.57593983", "0.5737228", "0.56998545", "0.56720966", "0.5599701", "0.55706614", "0.55538386", "0.54705316", "0.53752804", "0.53341436", "0.5307528", "0.52884525", "0.5286231", "0.52...
0.8361962
0
Map MXNet's broadcast_equal operator attributes to onnx's Equal operator and return the created node.
def convert_broadcast_equal(node, **kwargs): return create_basic_op_node('Equal', node, kwargs)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_equal_bcast(self):\n utils.compare_tracing_methods(\n SimpleCompareOpsModule(\"equal\"),\n torch.randn(3, 4, 5),\n torch.randn(4, 5),\n fusible_ops={\"aten::eq\"},\n )", "def convert_broadcast_logical_xor(node, **kwargs):\n return create_basic...
[ "0.593598", "0.56330514", "0.5556243", "0.53851575", "0.5349817", "0.52744114", "0.5272917", "0.5226989", "0.5213488", "0.51595694", "0.51184994", "0.50618356", "0.5043084", "0.4965128", "0.4951298", "0.49477023", "0.49306282", "0.49269903", "0.49220464", "0.4916875", "0.4910...
0.79504997
0
Map MXNet's broadcast logical and operator attributes to onnx's Add operator and return the created node.
def convert_broadcast_logical_and(node, **kwargs): return create_basic_op_node('And', node, kwargs)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def covert_broadcast_add(node, **kwargs):\n return create_basic_op_node('Add', node, kwargs)", "def convert_elementwise_add(node, **kwargs):\n return create_basic_op_node('Add', node, kwargs)", "def _add_node(self, node_entry, idx):\n if node_entry[\"op\"].name not in relay_to_onnx_op_mapping:\n ...
[ "0.7668125", "0.64318824", "0.63783944", "0.5989999", "0.5933431", "0.5828969", "0.5782011", "0.57488996", "0.5697768", "0.56923693", "0.56780064", "0.56764853", "0.5642956", "0.5639059", "0.56186986", "0.56082076", "0.55935323", "0.5592177", "0.55808014", "0.5580383", "0.558...
0.585624
5
Map MXNet's broadcast logical or operator attributes to onnx's Or operator and return the created node.
def convert_broadcast_logical_or(node, **kwargs): return create_basic_op_node('Or', node, kwargs)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def convert_broadcast_logical_xor(node, **kwargs):\n return create_basic_op_node('Xor', node, kwargs)", "def convert_broadcast_logical_and(node, **kwargs):\n return create_basic_op_node('And', node, kwargs)", "def create_net(self, shape1, shape2, ir_version):\n\n #\n # Create ONNX model\n...
[ "0.6557331", "0.59182656", "0.58574575", "0.5785354", "0.5779746", "0.5683941", "0.563225", "0.5597137", "0.5566193", "0.5559112", "0.55208814", "0.5506325", "0.5495919", "0.5447485", "0.54401416", "0.54356146", "0.54324657", "0.54322755", "0.5418937", "0.54083866", "0.540492...
0.76808363
0
Map MXNet's broadcast logical xor operator attributes to onnx's Xor operator and return the created node.
def convert_broadcast_logical_xor(node, **kwargs): return create_basic_op_node('Xor', node, kwargs)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def convert_broadcast_logical_or(node, **kwargs):\n return create_basic_op_node('Or', node, kwargs)", "def xor(self, *args):\n return Xor(self, *args)", "def xor_network():\n # fmt: off\n tpm = np.array([\n [0, 0, 0],\n [0, 1, 1],\n [1, 0, 1],\n [1, 1, 0],\n [...
[ "0.738217", "0.64635605", "0.6400872", "0.62084186", "0.5969626", "0.59505045", "0.59500635", "0.58331794", "0.58050483", "0.57880586", "0.56789726", "0.5678336", "0.5673798", "0.567273", "0.5661514", "0.5651046", "0.5638466", "0.5637232", "0.55427027", "0.5538561", "0.548832...
0.79945916
0
Map MXNet's logical not operator attributes to onnx's Not operator and return the created node.
def convert_logical_not(node, **kwargs): return create_basic_op_node('Not', node, kwargs)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def convert_logical_not(g, op, block):\n\n ipt0 = g.get_node(op.input(\"X\")[0])\n op_func = get_relay_op(op.type)\n out = op_func(ipt0)\n g.add_node(op.output(\"Out\")[0], out)", "def logical_not(data):\n return _make.logical_not(data)", "def bitwise_not(self) -> ColumnOperators:\n\n ret...
[ "0.75782496", "0.6934414", "0.6626711", "0.65900564", "0.64096093", "0.6354542", "0.6351578", "0.6329878", "0.62601715", "0.6213624", "0.6126839", "0.60965776", "0.60675186", "0.60501796", "0.60501796", "0.60501796", "0.60501796", "0.60501796", "0.60476726", "0.60350335", "0....
0.8060194
0
Map MXNet's size_array operator attributes to onnx's Size operator and return the created node.
def convert_size(node, **kwargs): return create_basic_op_node('Size', node, kwargs)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def convert_size(g, op, block):\n\n input_x = g.get_node(op.input(\"Input\")[0])\n out = _op.ndarray_size(input_x, dtype=\"int64\")\n out = _op.expand_dims(out, axis=0)\n g.add_node(op.output(\"Out\")[0], out)", "def arraySize( cls, value, typeCode = None ):\n return value.size", "def _t...
[ "0.6672948", "0.59488493", "0.5767115", "0.5739616", "0.5733506", "0.57241845", "0.5683179", "0.56279117", "0.5627782", "0.55931187", "0.5568864", "0.5536573", "0.55343175", "0.5522237", "0.55192995", "0.5505195", "0.55028814", "0.5481147", "0.54725236", "0.54569185", "0.5453...
0.6832319
0