efficientnet_b1-fp16-ov / efficientnet_b1.xml
openvino-ci's picture
Upload folder using huggingface_hub
7835470 verified
<?xml version="1.0"?>
<net name="Model537" version="11">
<layers>
<layer id="0" name="x" type="Parameter" version="opset1">
<data shape="1,3,240,240" element_type="f32" />
<output>
<port id="0" precision="FP32" names="image">
<dim>1</dim>
<dim>3</dim>
<dim>240</dim>
<dim>240</dim>
</port>
</output>
</layer>
<layer id="1" name="Multiply_278118_compressed" type="Const" version="opset1">
<data element_type="f16" shape="32, 3, 3, 3" offset="0" size="1728" />
<output>
<port id="0" precision="FP16">
<dim>32</dim>
<dim>3</dim>
<dim>3</dim>
<dim>3</dim>
</port>
</output>
</layer>
<layer id="2" name="Multiply_278118" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>32</dim>
<dim>3</dim>
<dim>3</dim>
<dim>3</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>32</dim>
<dim>3</dim>
<dim>3</dim>
<dim>3</dim>
</port>
</output>
</layer>
<layer id="3" name="Multiply_277633" type="Convolution" version="opset1">
<data strides="2, 2" dilations="1, 1" pads_begin="1, 1" pads_end="1, 1" auto_pad="explicit" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>3</dim>
<dim>240</dim>
<dim>240</dim>
</port>
<port id="1" precision="FP32">
<dim>32</dim>
<dim>3</dim>
<dim>3</dim>
<dim>3</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1</dim>
<dim>32</dim>
<dim>120</dim>
<dim>120</dim>
</port>
</output>
</layer>
<layer id="4" name="Constant_277638_compressed" type="Const" version="opset1">
<data element_type="f16" shape="1, 32, 1, 1" offset="1728" size="64" />
<output>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>32</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="5" name="Constant_277638" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>32</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>32</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="6" name="__module.features.0.1/aten::batch_norm/BatchNormInference" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>32</dim>
<dim>120</dim>
<dim>120</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>32</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="50_1">
<dim>1</dim>
<dim>32</dim>
<dim>120</dim>
<dim>120</dim>
</port>
</output>
</layer>
<layer id="7" name="__module.features.0.2/aten::silu_/Swish" type="Swish" version="opset4">
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>32</dim>
<dim>120</dim>
<dim>120</dim>
</port>
</input>
<output>
<port id="1" precision="FP32" names="50,input.3">
<dim>1</dim>
<dim>32</dim>
<dim>120</dim>
<dim>120</dim>
</port>
</output>
</layer>
<layer id="8" name="Multiply_278122_compressed" type="Const" version="opset1">
<data element_type="f16" shape="32, 1, 1, 3, 3" offset="1792" size="576" />
<output>
<port id="0" precision="FP16">
<dim>32</dim>
<dim>1</dim>
<dim>1</dim>
<dim>3</dim>
<dim>3</dim>
</port>
</output>
</layer>
<layer id="9" name="Multiply_278122" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>32</dim>
<dim>1</dim>
<dim>1</dim>
<dim>3</dim>
<dim>3</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>32</dim>
<dim>1</dim>
<dim>1</dim>
<dim>3</dim>
<dim>3</dim>
</port>
</output>
</layer>
<layer id="10" name="Multiply_277640" type="GroupConvolution" version="opset1">
<data strides="1, 1" pads_begin="1, 1" pads_end="1, 1" dilations="1, 1" auto_pad="explicit" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>32</dim>
<dim>120</dim>
<dim>120</dim>
</port>
<port id="1" precision="FP32">
<dim>32</dim>
<dim>1</dim>
<dim>1</dim>
<dim>3</dim>
<dim>3</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="66,input.7">
<dim>1</dim>
<dim>32</dim>
<dim>120</dim>
<dim>120</dim>
</port>
</output>
</layer>
<layer id="11" name="Constant_277645_compressed" type="Const" version="opset1">
<data element_type="f16" shape="1, 32, 1, 1" offset="2368" size="64" />
<output>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>32</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="12" name="Constant_277645" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>32</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>32</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="13" name="__module.features.1.0.block.0.1/aten::batch_norm/BatchNormInference" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>32</dim>
<dim>120</dim>
<dim>120</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>32</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="71_1">
<dim>1</dim>
<dim>32</dim>
<dim>120</dim>
<dim>120</dim>
</port>
</output>
</layer>
<layer id="14" name="__module.features.1.0.block.0.2/aten::silu_/Swish" type="Swish" version="opset4">
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>32</dim>
<dim>120</dim>
<dim>120</dim>
</port>
</input>
<output>
<port id="1" precision="FP32" names="71,input.9">
<dim>1</dim>
<dim>32</dim>
<dim>120</dim>
<dim>120</dim>
</port>
</output>
</layer>
<layer id="15" name="Constant_272658" type="Const" version="opset1">
<data element_type="i64" shape="2" offset="2432" size="16" />
<output>
<port id="0" precision="I64">
<dim>2</dim>
</port>
</output>
</layer>
<layer id="16" name="__module.features.1.0.block.1.avgpool/aten::adaptive_avg_pool2d/AdaptiveAvgPool" type="ReduceMean" version="opset1">
<data keep_dims="true" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>32</dim>
<dim>120</dim>
<dim>120</dim>
</port>
<port id="1" precision="I64">
<dim>2</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="79,input.13">
<dim>1</dim>
<dim>32</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="17" name="self.features.1.0.block.1.fc1.weight_compressed" type="Const" version="opset1">
<data element_type="f16" shape="8, 32, 1, 1" offset="2448" size="512" />
<output>
<port id="0" precision="FP16" names="self.features.1.0.block.1.fc1.weight">
<dim>8</dim>
<dim>32</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="18" name="self.features.1.0.block.1.fc1.weight" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>8</dim>
<dim>32</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>8</dim>
<dim>32</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="19" name="__module.features.1.0.block.1.fc1/aten::_convolution/Convolution" type="Convolution" version="opset1">
<data strides="1, 1" dilations="1, 1" pads_begin="0, 0" pads_end="0, 0" auto_pad="explicit" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>32</dim>
<dim>1</dim>
<dim>1</dim>
</port>
<port id="1" precision="FP32">
<dim>8</dim>
<dim>32</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1</dim>
<dim>8</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="20" name="__module.features.1.0.block.1.fc1/aten::_convolution/Reshape_compressed" type="Const" version="opset1">
<data element_type="f16" shape="1, 8, 1, 1" offset="2960" size="16" />
<output>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>8</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="21" name="__module.features.1.0.block.1.fc1/aten::_convolution/Reshape" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>8</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>8</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="22" name="__module.features.1.0.block.1.fc1/aten::_convolution/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>8</dim>
<dim>1</dim>
<dim>1</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>8</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="86_1">
<dim>1</dim>
<dim>8</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="23" name="__module.features.1.0.block.1.activation/aten::silu_/Swish" type="Swish" version="opset4">
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>8</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32" names="86,input.15">
<dim>1</dim>
<dim>8</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="24" name="self.features.1.0.block.1.fc2.weight_compressed" type="Const" version="opset1">
<data element_type="f16" shape="32, 8, 1, 1" offset="2976" size="512" />
<output>
<port id="0" precision="FP16" names="self.features.1.0.block.1.fc2.weight">
<dim>32</dim>
<dim>8</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="25" name="self.features.1.0.block.1.fc2.weight" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>32</dim>
<dim>8</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>32</dim>
<dim>8</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="26" name="__module.features.1.0.block.1.fc2/aten::_convolution/Convolution" type="Convolution" version="opset1">
<data strides="1, 1" dilations="1, 1" pads_begin="0, 0" pads_end="0, 0" auto_pad="explicit" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>8</dim>
<dim>1</dim>
<dim>1</dim>
</port>
<port id="1" precision="FP32">
<dim>32</dim>
<dim>8</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1</dim>
<dim>32</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="27" name="__module.features.1.0.block.1.fc2/aten::_convolution/Reshape_compressed" type="Const" version="opset1">
<data element_type="f16" shape="1, 32, 1, 1" offset="3488" size="64" />
<output>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>32</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="28" name="__module.features.1.0.block.1.fc2/aten::_convolution/Reshape" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>32</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>32</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="29" name="__module.features.1.0.block.1.fc2/aten::_convolution/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>32</dim>
<dim>1</dim>
<dim>1</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>32</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="94,input.19">
<dim>1</dim>
<dim>32</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="30" name="__module.features.1.0.block.1.scale_activation/aten::sigmoid/Sigmoid" type="Sigmoid" version="opset1">
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>32</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32" names="95,scale.1">
<dim>1</dim>
<dim>32</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="31" name="__module.features.1.0.block.1/aten::mul/Multiply" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>32</dim>
<dim>1</dim>
<dim>1</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>32</dim>
<dim>120</dim>
<dim>120</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="96,input.21">
<dim>1</dim>
<dim>32</dim>
<dim>120</dim>
<dim>120</dim>
</port>
</output>
</layer>
<layer id="32" name="Multiply_278125_compressed" type="Const" version="opset1">
<data element_type="f16" shape="16, 32, 1, 1" offset="3552" size="1024" />
<output>
<port id="0" precision="FP16">
<dim>16</dim>
<dim>32</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="33" name="Multiply_278125" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>16</dim>
<dim>32</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>16</dim>
<dim>32</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="34" name="Multiply_277647" type="Convolution" version="opset1">
<data strides="1, 1" dilations="1, 1" pads_begin="0, 0" pads_end="0, 0" auto_pad="explicit" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>32</dim>
<dim>120</dim>
<dim>120</dim>
</port>
<port id="1" precision="FP32">
<dim>16</dim>
<dim>32</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1</dim>
<dim>16</dim>
<dim>120</dim>
<dim>120</dim>
</port>
</output>
</layer>
<layer id="35" name="Constant_277652_compressed" type="Const" version="opset1">
<data element_type="f16" shape="1, 16, 1, 1" offset="4576" size="32" />
<output>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>16</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="36" name="Constant_277652" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>16</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>16</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="37" name="__module.features.1.0.block.2.1/aten::batch_norm/BatchNormInference" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>16</dim>
<dim>120</dim>
<dim>120</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>16</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="109,input.25">
<dim>1</dim>
<dim>16</dim>
<dim>120</dim>
<dim>120</dim>
</port>
</output>
</layer>
<layer id="38" name="Multiply_278129_compressed" type="Const" version="opset1">
<data element_type="f16" shape="16, 1, 1, 3, 3" offset="4608" size="288" />
<output>
<port id="0" precision="FP16">
<dim>16</dim>
<dim>1</dim>
<dim>1</dim>
<dim>3</dim>
<dim>3</dim>
</port>
</output>
</layer>
<layer id="39" name="Multiply_278129" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>16</dim>
<dim>1</dim>
<dim>1</dim>
<dim>3</dim>
<dim>3</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>16</dim>
<dim>1</dim>
<dim>1</dim>
<dim>3</dim>
<dim>3</dim>
</port>
</output>
</layer>
<layer id="40" name="Multiply_277654" type="GroupConvolution" version="opset1">
<data strides="1, 1" pads_begin="1, 1" pads_end="1, 1" dilations="1, 1" auto_pad="explicit" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>16</dim>
<dim>120</dim>
<dim>120</dim>
</port>
<port id="1" precision="FP32">
<dim>16</dim>
<dim>1</dim>
<dim>1</dim>
<dim>3</dim>
<dim>3</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="123,input.27">
<dim>1</dim>
<dim>16</dim>
<dim>120</dim>
<dim>120</dim>
</port>
</output>
</layer>
<layer id="41" name="Constant_277659_compressed" type="Const" version="opset1">
<data element_type="f16" shape="1, 16, 1, 1" offset="4896" size="32" />
<output>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>16</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="42" name="Constant_277659" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>16</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>16</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="43" name="__module.features.1.1.block.0.1/aten::batch_norm/BatchNormInference" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>16</dim>
<dim>120</dim>
<dim>120</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>16</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="128_1">
<dim>1</dim>
<dim>16</dim>
<dim>120</dim>
<dim>120</dim>
</port>
</output>
</layer>
<layer id="44" name="__module.features.1.1.block.0.2/aten::silu_/Swish" type="Swish" version="opset4">
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>16</dim>
<dim>120</dim>
<dim>120</dim>
</port>
</input>
<output>
<port id="1" precision="FP32" names="128,input.29">
<dim>1</dim>
<dim>16</dim>
<dim>120</dim>
<dim>120</dim>
</port>
</output>
</layer>
<layer id="45" name="Constant_272666" type="Const" version="opset1">
<data element_type="i64" shape="2" offset="2432" size="16" />
<output>
<port id="0" precision="I64">
<dim>2</dim>
</port>
</output>
</layer>
<layer id="46" name="__module.features.1.1.block.1.avgpool/aten::adaptive_avg_pool2d/AdaptiveAvgPool" type="ReduceMean" version="opset1">
<data keep_dims="true" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>16</dim>
<dim>120</dim>
<dim>120</dim>
</port>
<port id="1" precision="I64">
<dim>2</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="136,input.33">
<dim>1</dim>
<dim>16</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="47" name="self.features.1.1.block.1.fc1.weight_compressed" type="Const" version="opset1">
<data element_type="f16" shape="4, 16, 1, 1" offset="4928" size="128" />
<output>
<port id="0" precision="FP16" names="self.features.1.1.block.1.fc1.weight">
<dim>4</dim>
<dim>16</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="48" name="self.features.1.1.block.1.fc1.weight" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>4</dim>
<dim>16</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>4</dim>
<dim>16</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="49" name="__module.features.1.1.block.1.fc1/aten::_convolution/Convolution" type="Convolution" version="opset1">
<data strides="1, 1" dilations="1, 1" pads_begin="0, 0" pads_end="0, 0" auto_pad="explicit" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>16</dim>
<dim>1</dim>
<dim>1</dim>
</port>
<port id="1" precision="FP32">
<dim>4</dim>
<dim>16</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1</dim>
<dim>4</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="50" name="__module.features.1.1.block.1.fc1/aten::_convolution/Reshape_compressed" type="Const" version="opset1">
<data element_type="f16" shape="1, 4, 1, 1" offset="5056" size="8" />
<output>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>4</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="51" name="__module.features.1.1.block.1.fc1/aten::_convolution/Reshape" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>4</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>4</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="52" name="__module.features.1.1.block.1.fc1/aten::_convolution/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>4</dim>
<dim>1</dim>
<dim>1</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>4</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="143_1">
<dim>1</dim>
<dim>4</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="53" name="__module.features.1.1.block.1.activation/aten::silu_/Swish" type="Swish" version="opset4">
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>4</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32" names="143,input.35">
<dim>1</dim>
<dim>4</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="54" name="self.features.1.1.block.1.fc2.weight_compressed" type="Const" version="opset1">
<data element_type="f16" shape="16, 4, 1, 1" offset="5064" size="128" />
<output>
<port id="0" precision="FP16" names="self.features.1.1.block.1.fc2.weight">
<dim>16</dim>
<dim>4</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="55" name="self.features.1.1.block.1.fc2.weight" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>16</dim>
<dim>4</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>16</dim>
<dim>4</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="56" name="__module.features.1.1.block.1.fc2/aten::_convolution/Convolution" type="Convolution" version="opset1">
<data strides="1, 1" dilations="1, 1" pads_begin="0, 0" pads_end="0, 0" auto_pad="explicit" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>4</dim>
<dim>1</dim>
<dim>1</dim>
</port>
<port id="1" precision="FP32">
<dim>16</dim>
<dim>4</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1</dim>
<dim>16</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="57" name="__module.features.1.1.block.1.fc2/aten::_convolution/Reshape_compressed" type="Const" version="opset1">
<data element_type="f16" shape="1, 16, 1, 1" offset="5192" size="32" />
<output>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>16</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="58" name="__module.features.1.1.block.1.fc2/aten::_convolution/Reshape" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>16</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>16</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="59" name="__module.features.1.1.block.1.fc2/aten::_convolution/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>16</dim>
<dim>1</dim>
<dim>1</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>16</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="151,input.39">
<dim>1</dim>
<dim>16</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="60" name="__module.features.1.1.block.1.scale_activation/aten::sigmoid/Sigmoid" type="Sigmoid" version="opset1">
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>16</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32" names="152,scale.3">
<dim>1</dim>
<dim>16</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="61" name="__module.features.1.1.block.1/aten::mul/Multiply" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>16</dim>
<dim>1</dim>
<dim>1</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>16</dim>
<dim>120</dim>
<dim>120</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="153,input.41">
<dim>1</dim>
<dim>16</dim>
<dim>120</dim>
<dim>120</dim>
</port>
</output>
</layer>
<layer id="62" name="Multiply_278132_compressed" type="Const" version="opset1">
<data element_type="f16" shape="16, 16, 1, 1" offset="5224" size="512" />
<output>
<port id="0" precision="FP16">
<dim>16</dim>
<dim>16</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="63" name="Multiply_278132" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>16</dim>
<dim>16</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>16</dim>
<dim>16</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="64" name="Multiply_277661" type="Convolution" version="opset1">
<data strides="1, 1" dilations="1, 1" pads_begin="0, 0" pads_end="0, 0" auto_pad="explicit" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>16</dim>
<dim>120</dim>
<dim>120</dim>
</port>
<port id="1" precision="FP32">
<dim>16</dim>
<dim>16</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1</dim>
<dim>16</dim>
<dim>120</dim>
<dim>120</dim>
</port>
</output>
</layer>
<layer id="65" name="Constant_277666_compressed" type="Const" version="opset1">
<data element_type="f16" shape="1, 16, 1, 1" offset="5736" size="32" />
<output>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>16</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="66" name="Constant_277666" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>16</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>16</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="67" name="__module.features.1.1.block.2.1/aten::batch_norm/BatchNormInference" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>16</dim>
<dim>120</dim>
<dim>120</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>16</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="166_1">
<dim>1</dim>
<dim>16</dim>
<dim>120</dim>
<dim>120</dim>
</port>
</output>
</layer>
<layer id="68" name="__module.features.1.1/aten::add_/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>16</dim>
<dim>120</dim>
<dim>120</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>16</dim>
<dim>120</dim>
<dim>120</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="166,result.1">
<dim>1</dim>
<dim>16</dim>
<dim>120</dim>
<dim>120</dim>
</port>
</output>
</layer>
<layer id="69" name="Multiply_278136_compressed" type="Const" version="opset1">
<data element_type="f16" shape="96, 16, 1, 1" offset="5768" size="3072" />
<output>
<port id="0" precision="FP16">
<dim>96</dim>
<dim>16</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="70" name="Multiply_278136" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>96</dim>
<dim>16</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>96</dim>
<dim>16</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="71" name="Multiply_277668" type="Convolution" version="opset1">
<data strides="1, 1" dilations="1, 1" pads_begin="0, 0" pads_end="0, 0" auto_pad="explicit" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>16</dim>
<dim>120</dim>
<dim>120</dim>
</port>
<port id="1" precision="FP32">
<dim>96</dim>
<dim>16</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1</dim>
<dim>96</dim>
<dim>120</dim>
<dim>120</dim>
</port>
</output>
</layer>
<layer id="72" name="Constant_277673_compressed" type="Const" version="opset1">
<data element_type="f16" shape="1, 96, 1, 1" offset="8840" size="192" />
<output>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>96</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="73" name="Constant_277673" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>96</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>96</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="74" name="__module.features.2.0.block.0.1/aten::batch_norm/BatchNormInference" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>96</dim>
<dim>120</dim>
<dim>120</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>96</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="189_1">
<dim>1</dim>
<dim>96</dim>
<dim>120</dim>
<dim>120</dim>
</port>
</output>
</layer>
<layer id="75" name="__module.features.2.0.block.0.2/aten::silu_/Swish" type="Swish" version="opset4">
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>96</dim>
<dim>120</dim>
<dim>120</dim>
</port>
</input>
<output>
<port id="1" precision="FP32" names="189,input.49">
<dim>1</dim>
<dim>96</dim>
<dim>120</dim>
<dim>120</dim>
</port>
</output>
</layer>
<layer id="76" name="Multiply_278140_compressed" type="Const" version="opset1">
<data element_type="f16" shape="96, 1, 1, 3, 3" offset="9032" size="1728" />
<output>
<port id="0" precision="FP16">
<dim>96</dim>
<dim>1</dim>
<dim>1</dim>
<dim>3</dim>
<dim>3</dim>
</port>
</output>
</layer>
<layer id="77" name="Multiply_278140" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>96</dim>
<dim>1</dim>
<dim>1</dim>
<dim>3</dim>
<dim>3</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>96</dim>
<dim>1</dim>
<dim>1</dim>
<dim>3</dim>
<dim>3</dim>
</port>
</output>
</layer>
<layer id="78" name="Multiply_277675" type="GroupConvolution" version="opset1">
<data strides="2, 2" pads_begin="1, 1" pads_end="1, 1" dilations="1, 1" auto_pad="explicit" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>96</dim>
<dim>120</dim>
<dim>120</dim>
</port>
<port id="1" precision="FP32">
<dim>96</dim>
<dim>1</dim>
<dim>1</dim>
<dim>3</dim>
<dim>3</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="199,input.53">
<dim>1</dim>
<dim>96</dim>
<dim>60</dim>
<dim>60</dim>
</port>
</output>
</layer>
<layer id="79" name="Constant_277680_compressed" type="Const" version="opset1">
<data element_type="f16" shape="1, 96, 1, 1" offset="10760" size="192" />
<output>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>96</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="80" name="Constant_277680" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>96</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>96</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="81" name="__module.features.2.0.block.1.1/aten::batch_norm/BatchNormInference" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>96</dim>
<dim>60</dim>
<dim>60</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>96</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="204_1">
<dim>1</dim>
<dim>96</dim>
<dim>60</dim>
<dim>60</dim>
</port>
</output>
</layer>
<layer id="82" name="__module.features.2.0.block.1.2/aten::silu_/Swish" type="Swish" version="opset4">
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>96</dim>
<dim>60</dim>
<dim>60</dim>
</port>
</input>
<output>
<port id="1" precision="FP32" names="204,input.55">
<dim>1</dim>
<dim>96</dim>
<dim>60</dim>
<dim>60</dim>
</port>
</output>
</layer>
<layer id="83" name="Constant_272676" type="Const" version="opset1">
<data element_type="i64" shape="2" offset="2432" size="16" />
<output>
<port id="0" precision="I64">
<dim>2</dim>
</port>
</output>
</layer>
<layer id="84" name="__module.features.2.0.block.2.avgpool/aten::adaptive_avg_pool2d/AdaptiveAvgPool" type="ReduceMean" version="opset1">
<data keep_dims="true" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>96</dim>
<dim>60</dim>
<dim>60</dim>
</port>
<port id="1" precision="I64">
<dim>2</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="212,input.59">
<dim>1</dim>
<dim>96</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="85" name="self.features.2.0.block.2.fc1.weight_compressed" type="Const" version="opset1">
<data element_type="f16" shape="4, 96, 1, 1" offset="10952" size="768" />
<output>
<port id="0" precision="FP16" names="self.features.2.0.block.2.fc1.weight">
<dim>4</dim>
<dim>96</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="86" name="self.features.2.0.block.2.fc1.weight" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>4</dim>
<dim>96</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>4</dim>
<dim>96</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="87" name="__module.features.2.0.block.2.fc1/aten::_convolution/Convolution" type="Convolution" version="opset1">
<data strides="1, 1" dilations="1, 1" pads_begin="0, 0" pads_end="0, 0" auto_pad="explicit" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>96</dim>
<dim>1</dim>
<dim>1</dim>
</port>
<port id="1" precision="FP32">
<dim>4</dim>
<dim>96</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1</dim>
<dim>4</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="88" name="__module.features.2.0.block.2.fc1/aten::_convolution/Reshape_compressed" type="Const" version="opset1">
<data element_type="f16" shape="1, 4, 1, 1" offset="11720" size="8" />
<output>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>4</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="89" name="__module.features.2.0.block.2.fc1/aten::_convolution/Reshape" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>4</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>4</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="90" name="__module.features.2.0.block.2.fc1/aten::_convolution/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>4</dim>
<dim>1</dim>
<dim>1</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>4</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="219_1">
<dim>1</dim>
<dim>4</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="91" name="__module.features.2.0.block.2.activation/aten::silu_/Swish" type="Swish" version="opset4">
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>4</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32" names="219,input.61">
<dim>1</dim>
<dim>4</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="92" name="self.features.2.0.block.2.fc2.weight_compressed" type="Const" version="opset1">
<data element_type="f16" shape="96, 4, 1, 1" offset="11728" size="768" />
<output>
<port id="0" precision="FP16" names="self.features.2.0.block.2.fc2.weight">
<dim>96</dim>
<dim>4</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="93" name="self.features.2.0.block.2.fc2.weight" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>96</dim>
<dim>4</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>96</dim>
<dim>4</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="94" name="__module.features.2.0.block.2.fc2/aten::_convolution/Convolution" type="Convolution" version="opset1">
<data strides="1, 1" dilations="1, 1" pads_begin="0, 0" pads_end="0, 0" auto_pad="explicit" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>4</dim>
<dim>1</dim>
<dim>1</dim>
</port>
<port id="1" precision="FP32">
<dim>96</dim>
<dim>4</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1</dim>
<dim>96</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="95" name="__module.features.2.0.block.2.fc2/aten::_convolution/Reshape_compressed" type="Const" version="opset1">
<data element_type="f16" shape="1, 96, 1, 1" offset="12496" size="192" />
<output>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>96</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="96" name="__module.features.2.0.block.2.fc2/aten::_convolution/Reshape" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>96</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>96</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="97" name="__module.features.2.0.block.2.fc2/aten::_convolution/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>96</dim>
<dim>1</dim>
<dim>1</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>96</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="227,input.65">
<dim>1</dim>
<dim>96</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="98" name="__module.features.2.0.block.2.scale_activation/aten::sigmoid/Sigmoid" type="Sigmoid" version="opset1">
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>96</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32" names="228,scale.5">
<dim>1</dim>
<dim>96</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="99" name="__module.features.2.0.block.2/aten::mul/Multiply" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>96</dim>
<dim>1</dim>
<dim>1</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>96</dim>
<dim>60</dim>
<dim>60</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="229,input.67">
<dim>1</dim>
<dim>96</dim>
<dim>60</dim>
<dim>60</dim>
</port>
</output>
</layer>
<layer id="100" name="Multiply_278143_compressed" type="Const" version="opset1">
<data element_type="f16" shape="24, 96, 1, 1" offset="12688" size="4608" />
<output>
<port id="0" precision="FP16">
<dim>24</dim>
<dim>96</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="101" name="Multiply_278143" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>24</dim>
<dim>96</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>24</dim>
<dim>96</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="102" name="Multiply_277682" type="Convolution" version="opset1">
<data strides="1, 1" dilations="1, 1" pads_begin="0, 0" pads_end="0, 0" auto_pad="explicit" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>96</dim>
<dim>60</dim>
<dim>60</dim>
</port>
<port id="1" precision="FP32">
<dim>24</dim>
<dim>96</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1</dim>
<dim>24</dim>
<dim>60</dim>
<dim>60</dim>
</port>
</output>
</layer>
<layer id="103" name="Constant_277687_compressed" type="Const" version="opset1">
<data element_type="f16" shape="1, 24, 1, 1" offset="17296" size="48" />
<output>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>24</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="104" name="Constant_277687" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>24</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>24</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="105" name="__module.features.2.0.block.3.1/aten::batch_norm/BatchNormInference" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>24</dim>
<dim>60</dim>
<dim>60</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>24</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="242,input.71">
<dim>1</dim>
<dim>24</dim>
<dim>60</dim>
<dim>60</dim>
</port>
</output>
</layer>
<layer id="106" name="Multiply_278147_compressed" type="Const" version="opset1">
<data element_type="f16" shape="144, 24, 1, 1" offset="17344" size="6912" />
<output>
<port id="0" precision="FP16">
<dim>144</dim>
<dim>24</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="107" name="Multiply_278147" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>144</dim>
<dim>24</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>144</dim>
<dim>24</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="108" name="Multiply_277689" type="Convolution" version="opset1">
<data strides="1, 1" dilations="1, 1" pads_begin="0, 0" pads_end="0, 0" auto_pad="explicit" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>24</dim>
<dim>60</dim>
<dim>60</dim>
</port>
<port id="1" precision="FP32">
<dim>144</dim>
<dim>24</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1</dim>
<dim>144</dim>
<dim>60</dim>
<dim>60</dim>
</port>
</output>
</layer>
<layer id="109" name="Constant_277694_compressed" type="Const" version="opset1">
<data element_type="f16" shape="1, 144, 1, 1" offset="24256" size="288" />
<output>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>144</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="110" name="Constant_277694" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>144</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>144</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="111" name="__module.features.2.1.block.0.1/aten::batch_norm/BatchNormInference" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>144</dim>
<dim>60</dim>
<dim>60</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>144</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="262_1">
<dim>1</dim>
<dim>144</dim>
<dim>60</dim>
<dim>60</dim>
</port>
</output>
</layer>
<layer id="112" name="__module.features.2.1.block.0.2/aten::silu_/Swish" type="Swish" version="opset4">
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>144</dim>
<dim>60</dim>
<dim>60</dim>
</port>
</input>
<output>
<port id="1" precision="FP32" names="262,input.75">
<dim>1</dim>
<dim>144</dim>
<dim>60</dim>
<dim>60</dim>
</port>
</output>
</layer>
<layer id="113" name="Multiply_278151_compressed" type="Const" version="opset1">
<data element_type="f16" shape="144, 1, 1, 3, 3" offset="24544" size="2592" />
<output>
<port id="0" precision="FP16">
<dim>144</dim>
<dim>1</dim>
<dim>1</dim>
<dim>3</dim>
<dim>3</dim>
</port>
</output>
</layer>
<layer id="114" name="Multiply_278151" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>144</dim>
<dim>1</dim>
<dim>1</dim>
<dim>3</dim>
<dim>3</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>144</dim>
<dim>1</dim>
<dim>1</dim>
<dim>3</dim>
<dim>3</dim>
</port>
</output>
</layer>
<layer id="115" name="Multiply_277696" type="GroupConvolution" version="opset1">
<data strides="1, 1" pads_begin="1, 1" pads_end="1, 1" dilations="1, 1" auto_pad="explicit" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>144</dim>
<dim>60</dim>
<dim>60</dim>
</port>
<port id="1" precision="FP32">
<dim>144</dim>
<dim>1</dim>
<dim>1</dim>
<dim>3</dim>
<dim>3</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="272,input.79">
<dim>1</dim>
<dim>144</dim>
<dim>60</dim>
<dim>60</dim>
</port>
</output>
</layer>
<layer id="116" name="Constant_277701_compressed" type="Const" version="opset1">
<data element_type="f16" shape="1, 144, 1, 1" offset="27136" size="288" />
<output>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>144</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="117" name="Constant_277701" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>144</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>144</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="118" name="__module.features.2.1.block.1.1/aten::batch_norm/BatchNormInference" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>144</dim>
<dim>60</dim>
<dim>60</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>144</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="277_1">
<dim>1</dim>
<dim>144</dim>
<dim>60</dim>
<dim>60</dim>
</port>
</output>
</layer>
<layer id="119" name="__module.features.2.1.block.1.2/aten::silu_/Swish" type="Swish" version="opset4">
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>144</dim>
<dim>60</dim>
<dim>60</dim>
</port>
</input>
<output>
<port id="1" precision="FP32" names="277,input.81">
<dim>1</dim>
<dim>144</dim>
<dim>60</dim>
<dim>60</dim>
</port>
</output>
</layer>
<layer id="120" name="Constant_272684" type="Const" version="opset1">
<data element_type="i64" shape="2" offset="2432" size="16" />
<output>
<port id="0" precision="I64">
<dim>2</dim>
</port>
</output>
</layer>
<layer id="121" name="__module.features.2.1.block.2.avgpool/aten::adaptive_avg_pool2d/AdaptiveAvgPool" type="ReduceMean" version="opset1">
<data keep_dims="true" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>144</dim>
<dim>60</dim>
<dim>60</dim>
</port>
<port id="1" precision="I64">
<dim>2</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="285,input.85">
<dim>1</dim>
<dim>144</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="122" name="self.features.2.1.block.2.fc1.weight_compressed" type="Const" version="opset1">
<data element_type="f16" shape="6, 144, 1, 1" offset="27424" size="1728" />
<output>
<port id="0" precision="FP16" names="self.features.2.1.block.2.fc1.weight">
<dim>6</dim>
<dim>144</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="123" name="self.features.2.1.block.2.fc1.weight" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>6</dim>
<dim>144</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>6</dim>
<dim>144</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="124" name="__module.features.2.1.block.2.fc1/aten::_convolution/Convolution" type="Convolution" version="opset1">
<data strides="1, 1" dilations="1, 1" pads_begin="0, 0" pads_end="0, 0" auto_pad="explicit" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>144</dim>
<dim>1</dim>
<dim>1</dim>
</port>
<port id="1" precision="FP32">
<dim>6</dim>
<dim>144</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1</dim>
<dim>6</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="125" name="__module.features.2.1.block.2.fc1/aten::_convolution/Reshape_compressed" type="Const" version="opset1">
<data element_type="f16" shape="1, 6, 1, 1" offset="29152" size="12" />
<output>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>6</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="126" name="__module.features.2.1.block.2.fc1/aten::_convolution/Reshape" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>6</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>6</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="127" name="__module.features.2.1.block.2.fc1/aten::_convolution/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>6</dim>
<dim>1</dim>
<dim>1</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>6</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="292_1">
<dim>1</dim>
<dim>6</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="128" name="__module.features.2.1.block.2.activation/aten::silu_/Swish" type="Swish" version="opset4">
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>6</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32" names="292,input.87">
<dim>1</dim>
<dim>6</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="129" name="self.features.2.1.block.2.fc2.weight_compressed" type="Const" version="opset1">
<data element_type="f16" shape="144, 6, 1, 1" offset="29164" size="1728" />
<output>
<port id="0" precision="FP16" names="self.features.2.1.block.2.fc2.weight">
<dim>144</dim>
<dim>6</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="130" name="self.features.2.1.block.2.fc2.weight" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>144</dim>
<dim>6</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>144</dim>
<dim>6</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="131" name="__module.features.2.1.block.2.fc2/aten::_convolution/Convolution" type="Convolution" version="opset1">
<data strides="1, 1" dilations="1, 1" pads_begin="0, 0" pads_end="0, 0" auto_pad="explicit" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>6</dim>
<dim>1</dim>
<dim>1</dim>
</port>
<port id="1" precision="FP32">
<dim>144</dim>
<dim>6</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1</dim>
<dim>144</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="132" name="__module.features.2.1.block.2.fc2/aten::_convolution/Reshape_compressed" type="Const" version="opset1">
<data element_type="f16" shape="1, 144, 1, 1" offset="30892" size="288" />
<output>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>144</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="133" name="__module.features.2.1.block.2.fc2/aten::_convolution/Reshape" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>144</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>144</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="134" name="__module.features.2.1.block.2.fc2/aten::_convolution/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>144</dim>
<dim>1</dim>
<dim>1</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>144</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="300,input.91">
<dim>1</dim>
<dim>144</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="135" name="__module.features.2.1.block.2.scale_activation/aten::sigmoid/Sigmoid" type="Sigmoid" version="opset1">
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>144</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32" names="301,scale.7">
<dim>1</dim>
<dim>144</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="136" name="__module.features.2.1.block.2/aten::mul/Multiply" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>144</dim>
<dim>1</dim>
<dim>1</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>144</dim>
<dim>60</dim>
<dim>60</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="302,input.93">
<dim>1</dim>
<dim>144</dim>
<dim>60</dim>
<dim>60</dim>
</port>
</output>
</layer>
<layer id="137" name="Multiply_278154_compressed" type="Const" version="opset1">
<data element_type="f16" shape="24, 144, 1, 1" offset="31180" size="6912" />
<output>
<port id="0" precision="FP16">
<dim>24</dim>
<dim>144</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="138" name="Multiply_278154" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>24</dim>
<dim>144</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>24</dim>
<dim>144</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="139" name="Multiply_277703" type="Convolution" version="opset1">
<data strides="1, 1" dilations="1, 1" pads_begin="0, 0" pads_end="0, 0" auto_pad="explicit" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>144</dim>
<dim>60</dim>
<dim>60</dim>
</port>
<port id="1" precision="FP32">
<dim>24</dim>
<dim>144</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1</dim>
<dim>24</dim>
<dim>60</dim>
<dim>60</dim>
</port>
</output>
</layer>
<layer id="140" name="Constant_277708_compressed" type="Const" version="opset1">
<data element_type="f16" shape="1, 24, 1, 1" offset="38092" size="48" />
<output>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>24</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="141" name="Constant_277708" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>24</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>24</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="142" name="__module.features.2.1.block.3.1/aten::batch_norm/BatchNormInference" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>24</dim>
<dim>60</dim>
<dim>60</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>24</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="315_1">
<dim>1</dim>
<dim>24</dim>
<dim>60</dim>
<dim>60</dim>
</port>
</output>
</layer>
<layer id="143" name="__module.features.2.1/aten::add_/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>24</dim>
<dim>60</dim>
<dim>60</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>24</dim>
<dim>60</dim>
<dim>60</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="315,result.3">
<dim>1</dim>
<dim>24</dim>
<dim>60</dim>
<dim>60</dim>
</port>
</output>
</layer>
<layer id="144" name="Multiply_278158_compressed" type="Const" version="opset1">
<data element_type="f16" shape="144, 24, 1, 1" offset="38140" size="6912" />
<output>
<port id="0" precision="FP16">
<dim>144</dim>
<dim>24</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="145" name="Multiply_278158" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>144</dim>
<dim>24</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>144</dim>
<dim>24</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="146" name="Multiply_277710" type="Convolution" version="opset1">
<data strides="1, 1" dilations="1, 1" pads_begin="0, 0" pads_end="0, 0" auto_pad="explicit" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>24</dim>
<dim>60</dim>
<dim>60</dim>
</port>
<port id="1" precision="FP32">
<dim>144</dim>
<dim>24</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1</dim>
<dim>144</dim>
<dim>60</dim>
<dim>60</dim>
</port>
</output>
</layer>
<layer id="147" name="Constant_277715_compressed" type="Const" version="opset1">
<data element_type="f16" shape="1, 144, 1, 1" offset="45052" size="288" />
<output>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>144</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="148" name="Constant_277715" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>144</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>144</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="149" name="__module.features.2.2.block.0.1/aten::batch_norm/BatchNormInference" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>144</dim>
<dim>60</dim>
<dim>60</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>144</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="336_1">
<dim>1</dim>
<dim>144</dim>
<dim>60</dim>
<dim>60</dim>
</port>
</output>
</layer>
<layer id="150" name="__module.features.2.2.block.0.2/aten::silu_/Swish" type="Swish" version="opset4">
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>144</dim>
<dim>60</dim>
<dim>60</dim>
</port>
</input>
<output>
<port id="1" precision="FP32" names="336,input.101">
<dim>1</dim>
<dim>144</dim>
<dim>60</dim>
<dim>60</dim>
</port>
</output>
</layer>
<layer id="151" name="Multiply_278162_compressed" type="Const" version="opset1">
<data element_type="f16" shape="144, 1, 1, 3, 3" offset="45340" size="2592" />
<output>
<port id="0" precision="FP16">
<dim>144</dim>
<dim>1</dim>
<dim>1</dim>
<dim>3</dim>
<dim>3</dim>
</port>
</output>
</layer>
<layer id="152" name="Multiply_278162" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>144</dim>
<dim>1</dim>
<dim>1</dim>
<dim>3</dim>
<dim>3</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>144</dim>
<dim>1</dim>
<dim>1</dim>
<dim>3</dim>
<dim>3</dim>
</port>
</output>
</layer>
<layer id="153" name="Multiply_277717" type="GroupConvolution" version="opset1">
<data strides="1, 1" pads_begin="1, 1" pads_end="1, 1" dilations="1, 1" auto_pad="explicit" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>144</dim>
<dim>60</dim>
<dim>60</dim>
</port>
<port id="1" precision="FP32">
<dim>144</dim>
<dim>1</dim>
<dim>1</dim>
<dim>3</dim>
<dim>3</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="346,input.105">
<dim>1</dim>
<dim>144</dim>
<dim>60</dim>
<dim>60</dim>
</port>
</output>
</layer>
<layer id="154" name="Constant_277722_compressed" type="Const" version="opset1">
<data element_type="f16" shape="1, 144, 1, 1" offset="47932" size="288" />
<output>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>144</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="155" name="Constant_277722" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>144</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>144</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="156" name="__module.features.2.2.block.1.1/aten::batch_norm/BatchNormInference" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>144</dim>
<dim>60</dim>
<dim>60</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>144</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="351_1">
<dim>1</dim>
<dim>144</dim>
<dim>60</dim>
<dim>60</dim>
</port>
</output>
</layer>
<layer id="157" name="__module.features.2.2.block.1.2/aten::silu_/Swish" type="Swish" version="opset4">
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>144</dim>
<dim>60</dim>
<dim>60</dim>
</port>
</input>
<output>
<port id="1" precision="FP32" names="351,input.107">
<dim>1</dim>
<dim>144</dim>
<dim>60</dim>
<dim>60</dim>
</port>
</output>
</layer>
<layer id="158" name="Constant_272694" type="Const" version="opset1">
<data element_type="i64" shape="2" offset="2432" size="16" />
<output>
<port id="0" precision="I64">
<dim>2</dim>
</port>
</output>
</layer>
<layer id="159" name="__module.features.2.2.block.2.avgpool/aten::adaptive_avg_pool2d/AdaptiveAvgPool" type="ReduceMean" version="opset1">
<data keep_dims="true" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>144</dim>
<dim>60</dim>
<dim>60</dim>
</port>
<port id="1" precision="I64">
<dim>2</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="359,input.111">
<dim>1</dim>
<dim>144</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="160" name="self.features.2.2.block.2.fc1.weight_compressed" type="Const" version="opset1">
<data element_type="f16" shape="6, 144, 1, 1" offset="48220" size="1728" />
<output>
<port id="0" precision="FP16" names="self.features.2.2.block.2.fc1.weight">
<dim>6</dim>
<dim>144</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="161" name="self.features.2.2.block.2.fc1.weight" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>6</dim>
<dim>144</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>6</dim>
<dim>144</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="162" name="__module.features.2.2.block.2.fc1/aten::_convolution/Convolution" type="Convolution" version="opset1">
<data strides="1, 1" dilations="1, 1" pads_begin="0, 0" pads_end="0, 0" auto_pad="explicit" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>144</dim>
<dim>1</dim>
<dim>1</dim>
</port>
<port id="1" precision="FP32">
<dim>6</dim>
<dim>144</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1</dim>
<dim>6</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="163" name="__module.features.2.2.block.2.fc1/aten::_convolution/Reshape_compressed" type="Const" version="opset1">
<data element_type="f16" shape="1, 6, 1, 1" offset="49948" size="12" />
<output>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>6</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="164" name="__module.features.2.2.block.2.fc1/aten::_convolution/Reshape" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>6</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>6</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="165" name="__module.features.2.2.block.2.fc1/aten::_convolution/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>6</dim>
<dim>1</dim>
<dim>1</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>6</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="366_1">
<dim>1</dim>
<dim>6</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="166" name="__module.features.2.2.block.2.activation/aten::silu_/Swish" type="Swish" version="opset4">
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>6</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32" names="366,input.113">
<dim>1</dim>
<dim>6</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="167" name="self.features.2.2.block.2.fc2.weight_compressed" type="Const" version="opset1">
<data element_type="f16" shape="144, 6, 1, 1" offset="49960" size="1728" />
<output>
<port id="0" precision="FP16" names="self.features.2.2.block.2.fc2.weight">
<dim>144</dim>
<dim>6</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="168" name="self.features.2.2.block.2.fc2.weight" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>144</dim>
<dim>6</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>144</dim>
<dim>6</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="169" name="__module.features.2.2.block.2.fc2/aten::_convolution/Convolution" type="Convolution" version="opset1">
<data strides="1, 1" dilations="1, 1" pads_begin="0, 0" pads_end="0, 0" auto_pad="explicit" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>6</dim>
<dim>1</dim>
<dim>1</dim>
</port>
<port id="1" precision="FP32">
<dim>144</dim>
<dim>6</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1</dim>
<dim>144</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="170" name="__module.features.2.2.block.2.fc2/aten::_convolution/Reshape_compressed" type="Const" version="opset1">
<data element_type="f16" shape="1, 144, 1, 1" offset="51688" size="288" />
<output>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>144</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="171" name="__module.features.2.2.block.2.fc2/aten::_convolution/Reshape" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>144</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>144</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="172" name="__module.features.2.2.block.2.fc2/aten::_convolution/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>144</dim>
<dim>1</dim>
<dim>1</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>144</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="374,input.117">
<dim>1</dim>
<dim>144</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="173" name="__module.features.2.2.block.2.scale_activation/aten::sigmoid/Sigmoid" type="Sigmoid" version="opset1">
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>144</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32" names="375,scale.9">
<dim>1</dim>
<dim>144</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="174" name="__module.features.2.2.block.2/aten::mul/Multiply" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>144</dim>
<dim>1</dim>
<dim>1</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>144</dim>
<dim>60</dim>
<dim>60</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="376,input.119">
<dim>1</dim>
<dim>144</dim>
<dim>60</dim>
<dim>60</dim>
</port>
</output>
</layer>
<layer id="175" name="Multiply_278165_compressed" type="Const" version="opset1">
<data element_type="f16" shape="24, 144, 1, 1" offset="51976" size="6912" />
<output>
<port id="0" precision="FP16">
<dim>24</dim>
<dim>144</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="176" name="Multiply_278165" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>24</dim>
<dim>144</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>24</dim>
<dim>144</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="177" name="Multiply_277724" type="Convolution" version="opset1">
<data strides="1, 1" dilations="1, 1" pads_begin="0, 0" pads_end="0, 0" auto_pad="explicit" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>144</dim>
<dim>60</dim>
<dim>60</dim>
</port>
<port id="1" precision="FP32">
<dim>24</dim>
<dim>144</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1</dim>
<dim>24</dim>
<dim>60</dim>
<dim>60</dim>
</port>
</output>
</layer>
<layer id="178" name="Constant_277729_compressed" type="Const" version="opset1">
<data element_type="f16" shape="1, 24, 1, 1" offset="58888" size="48" />
<output>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>24</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="179" name="Constant_277729" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>24</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>24</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="180" name="__module.features.2.2.block.3.1/aten::batch_norm/BatchNormInference" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>24</dim>
<dim>60</dim>
<dim>60</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>24</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="389_1">
<dim>1</dim>
<dim>24</dim>
<dim>60</dim>
<dim>60</dim>
</port>
</output>
</layer>
<layer id="181" name="__module.features.2.2/aten::add_/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>24</dim>
<dim>60</dim>
<dim>60</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>24</dim>
<dim>60</dim>
<dim>60</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="389,result.5">
<dim>1</dim>
<dim>24</dim>
<dim>60</dim>
<dim>60</dim>
</port>
</output>
</layer>
<layer id="182" name="Multiply_278169_compressed" type="Const" version="opset1">
<data element_type="f16" shape="144, 24, 1, 1" offset="58936" size="6912" />
<output>
<port id="0" precision="FP16">
<dim>144</dim>
<dim>24</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="183" name="Multiply_278169" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>144</dim>
<dim>24</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>144</dim>
<dim>24</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="184" name="Multiply_277731" type="Convolution" version="opset1">
<data strides="1, 1" dilations="1, 1" pads_begin="0, 0" pads_end="0, 0" auto_pad="explicit" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>24</dim>
<dim>60</dim>
<dim>60</dim>
</port>
<port id="1" precision="FP32">
<dim>144</dim>
<dim>24</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1</dim>
<dim>144</dim>
<dim>60</dim>
<dim>60</dim>
</port>
</output>
</layer>
<layer id="185" name="Constant_277736_compressed" type="Const" version="opset1">
<data element_type="f16" shape="1, 144, 1, 1" offset="65848" size="288" />
<output>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>144</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="186" name="Constant_277736" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>144</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>144</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="187" name="__module.features.3.0.block.0.1/aten::batch_norm/BatchNormInference" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>144</dim>
<dim>60</dim>
<dim>60</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>144</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="412_1">
<dim>1</dim>
<dim>144</dim>
<dim>60</dim>
<dim>60</dim>
</port>
</output>
</layer>
<layer id="188" name="__module.features.3.0.block.0.2/aten::silu_/Swish" type="Swish" version="opset4">
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>144</dim>
<dim>60</dim>
<dim>60</dim>
</port>
</input>
<output>
<port id="1" precision="FP32" names="412,input.127">
<dim>1</dim>
<dim>144</dim>
<dim>60</dim>
<dim>60</dim>
</port>
</output>
</layer>
<layer id="189" name="Multiply_278173_compressed" type="Const" version="opset1">
<data element_type="f16" shape="144, 1, 1, 5, 5" offset="66136" size="7200" />
<output>
<port id="0" precision="FP16">
<dim>144</dim>
<dim>1</dim>
<dim>1</dim>
<dim>5</dim>
<dim>5</dim>
</port>
</output>
</layer>
<layer id="190" name="Multiply_278173" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>144</dim>
<dim>1</dim>
<dim>1</dim>
<dim>5</dim>
<dim>5</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>144</dim>
<dim>1</dim>
<dim>1</dim>
<dim>5</dim>
<dim>5</dim>
</port>
</output>
</layer>
<layer id="191" name="Multiply_277738" type="GroupConvolution" version="opset1">
<data strides="2, 2" pads_begin="2, 2" pads_end="2, 2" dilations="1, 1" auto_pad="explicit" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>144</dim>
<dim>60</dim>
<dim>60</dim>
</port>
<port id="1" precision="FP32">
<dim>144</dim>
<dim>1</dim>
<dim>1</dim>
<dim>5</dim>
<dim>5</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="422,input.131">
<dim>1</dim>
<dim>144</dim>
<dim>30</dim>
<dim>30</dim>
</port>
</output>
</layer>
<layer id="192" name="Constant_277743_compressed" type="Const" version="opset1">
<data element_type="f16" shape="1, 144, 1, 1" offset="73336" size="288" />
<output>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>144</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="193" name="Constant_277743" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>144</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>144</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="194" name="__module.features.3.0.block.1.1/aten::batch_norm/BatchNormInference" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>144</dim>
<dim>30</dim>
<dim>30</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>144</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="427_1">
<dim>1</dim>
<dim>144</dim>
<dim>30</dim>
<dim>30</dim>
</port>
</output>
</layer>
<layer id="195" name="__module.features.3.0.block.1.2/aten::silu_/Swish" type="Swish" version="opset4">
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>144</dim>
<dim>30</dim>
<dim>30</dim>
</port>
</input>
<output>
<port id="1" precision="FP32" names="427,input.133">
<dim>1</dim>
<dim>144</dim>
<dim>30</dim>
<dim>30</dim>
</port>
</output>
</layer>
<layer id="196" name="Constant_272704" type="Const" version="opset1">
<data element_type="i64" shape="2" offset="2432" size="16" />
<output>
<port id="0" precision="I64">
<dim>2</dim>
</port>
</output>
</layer>
<layer id="197" name="__module.features.3.0.block.2.avgpool/aten::adaptive_avg_pool2d/AdaptiveAvgPool" type="ReduceMean" version="opset1">
<data keep_dims="true" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>144</dim>
<dim>30</dim>
<dim>30</dim>
</port>
<port id="1" precision="I64">
<dim>2</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="435,input.137">
<dim>1</dim>
<dim>144</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="198" name="self.features.3.0.block.2.fc1.weight_compressed" type="Const" version="opset1">
<data element_type="f16" shape="6, 144, 1, 1" offset="73624" size="1728" />
<output>
<port id="0" precision="FP16" names="self.features.3.0.block.2.fc1.weight">
<dim>6</dim>
<dim>144</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="199" name="self.features.3.0.block.2.fc1.weight" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>6</dim>
<dim>144</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>6</dim>
<dim>144</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="200" name="__module.features.3.0.block.2.fc1/aten::_convolution/Convolution" type="Convolution" version="opset1">
<data strides="1, 1" dilations="1, 1" pads_begin="0, 0" pads_end="0, 0" auto_pad="explicit" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>144</dim>
<dim>1</dim>
<dim>1</dim>
</port>
<port id="1" precision="FP32">
<dim>6</dim>
<dim>144</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1</dim>
<dim>6</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="201" name="__module.features.3.0.block.2.fc1/aten::_convolution/Reshape_compressed" type="Const" version="opset1">
<data element_type="f16" shape="1, 6, 1, 1" offset="75352" size="12" />
<output>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>6</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="202" name="__module.features.3.0.block.2.fc1/aten::_convolution/Reshape" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>6</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>6</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="203" name="__module.features.3.0.block.2.fc1/aten::_convolution/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>6</dim>
<dim>1</dim>
<dim>1</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>6</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="442_1">
<dim>1</dim>
<dim>6</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="204" name="__module.features.3.0.block.2.activation/aten::silu_/Swish" type="Swish" version="opset4">
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>6</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32" names="442,input.139">
<dim>1</dim>
<dim>6</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="205" name="self.features.3.0.block.2.fc2.weight_compressed" type="Const" version="opset1">
<data element_type="f16" shape="144, 6, 1, 1" offset="75364" size="1728" />
<output>
<port id="0" precision="FP16" names="self.features.3.0.block.2.fc2.weight">
<dim>144</dim>
<dim>6</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="206" name="self.features.3.0.block.2.fc2.weight" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>144</dim>
<dim>6</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>144</dim>
<dim>6</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="207" name="__module.features.3.0.block.2.fc2/aten::_convolution/Convolution" type="Convolution" version="opset1">
<data strides="1, 1" dilations="1, 1" pads_begin="0, 0" pads_end="0, 0" auto_pad="explicit" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>6</dim>
<dim>1</dim>
<dim>1</dim>
</port>
<port id="1" precision="FP32">
<dim>144</dim>
<dim>6</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1</dim>
<dim>144</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="208" name="__module.features.3.0.block.2.fc2/aten::_convolution/Reshape_compressed" type="Const" version="opset1">
<data element_type="f16" shape="1, 144, 1, 1" offset="77092" size="288" />
<output>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>144</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="209" name="__module.features.3.0.block.2.fc2/aten::_convolution/Reshape" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>144</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>144</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="210" name="__module.features.3.0.block.2.fc2/aten::_convolution/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>144</dim>
<dim>1</dim>
<dim>1</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>144</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="450,input.143">
<dim>1</dim>
<dim>144</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="211" name="__module.features.3.0.block.2.scale_activation/aten::sigmoid/Sigmoid" type="Sigmoid" version="opset1">
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>144</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32" names="451,scale.11">
<dim>1</dim>
<dim>144</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="212" name="__module.features.3.0.block.2/aten::mul/Multiply" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>144</dim>
<dim>1</dim>
<dim>1</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>144</dim>
<dim>30</dim>
<dim>30</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="452,input.145">
<dim>1</dim>
<dim>144</dim>
<dim>30</dim>
<dim>30</dim>
</port>
</output>
</layer>
<layer id="213" name="Multiply_278176_compressed" type="Const" version="opset1">
<data element_type="f16" shape="40, 144, 1, 1" offset="77380" size="11520" />
<output>
<port id="0" precision="FP16">
<dim>40</dim>
<dim>144</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="214" name="Multiply_278176" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>40</dim>
<dim>144</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>40</dim>
<dim>144</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="215" name="Multiply_277745" type="Convolution" version="opset1">
<data strides="1, 1" dilations="1, 1" pads_begin="0, 0" pads_end="0, 0" auto_pad="explicit" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>144</dim>
<dim>30</dim>
<dim>30</dim>
</port>
<port id="1" precision="FP32">
<dim>40</dim>
<dim>144</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1</dim>
<dim>40</dim>
<dim>30</dim>
<dim>30</dim>
</port>
</output>
</layer>
<layer id="216" name="Constant_277750_compressed" type="Const" version="opset1">
<data element_type="f16" shape="1, 40, 1, 1" offset="88900" size="80" />
<output>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>40</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="217" name="Constant_277750" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>40</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>40</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="218" name="__module.features.3.0.block.3.1/aten::batch_norm/BatchNormInference" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>40</dim>
<dim>30</dim>
<dim>30</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>40</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="465,input.149">
<dim>1</dim>
<dim>40</dim>
<dim>30</dim>
<dim>30</dim>
</port>
</output>
</layer>
<layer id="219" name="Multiply_278180_compressed" type="Const" version="opset1">
<data element_type="f16" shape="240, 40, 1, 1" offset="88980" size="19200" />
<output>
<port id="0" precision="FP16">
<dim>240</dim>
<dim>40</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="220" name="Multiply_278180" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>240</dim>
<dim>40</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>240</dim>
<dim>40</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="221" name="Multiply_277752" type="Convolution" version="opset1">
<data strides="1, 1" dilations="1, 1" pads_begin="0, 0" pads_end="0, 0" auto_pad="explicit" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>40</dim>
<dim>30</dim>
<dim>30</dim>
</port>
<port id="1" precision="FP32">
<dim>240</dim>
<dim>40</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1</dim>
<dim>240</dim>
<dim>30</dim>
<dim>30</dim>
</port>
</output>
</layer>
<layer id="222" name="Constant_277757_compressed" type="Const" version="opset1">
<data element_type="f16" shape="1, 240, 1, 1" offset="108180" size="480" />
<output>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>240</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="223" name="Constant_277757" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>240</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>240</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="224" name="__module.features.3.1.block.0.1/aten::batch_norm/BatchNormInference" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>240</dim>
<dim>30</dim>
<dim>30</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>240</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="485_1">
<dim>1</dim>
<dim>240</dim>
<dim>30</dim>
<dim>30</dim>
</port>
</output>
</layer>
<layer id="225" name="__module.features.3.1.block.0.2/aten::silu_/Swish" type="Swish" version="opset4">
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>240</dim>
<dim>30</dim>
<dim>30</dim>
</port>
</input>
<output>
<port id="1" precision="FP32" names="485,input.153">
<dim>1</dim>
<dim>240</dim>
<dim>30</dim>
<dim>30</dim>
</port>
</output>
</layer>
<layer id="226" name="Multiply_278184_compressed" type="Const" version="opset1">
<data element_type="f16" shape="240, 1, 1, 5, 5" offset="108660" size="12000" />
<output>
<port id="0" precision="FP16">
<dim>240</dim>
<dim>1</dim>
<dim>1</dim>
<dim>5</dim>
<dim>5</dim>
</port>
</output>
</layer>
<layer id="227" name="Multiply_278184" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>240</dim>
<dim>1</dim>
<dim>1</dim>
<dim>5</dim>
<dim>5</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>240</dim>
<dim>1</dim>
<dim>1</dim>
<dim>5</dim>
<dim>5</dim>
</port>
</output>
</layer>
<layer id="228" name="Multiply_277759" type="GroupConvolution" version="opset1">
<data strides="1, 1" pads_begin="2, 2" pads_end="2, 2" dilations="1, 1" auto_pad="explicit" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>240</dim>
<dim>30</dim>
<dim>30</dim>
</port>
<port id="1" precision="FP32">
<dim>240</dim>
<dim>1</dim>
<dim>1</dim>
<dim>5</dim>
<dim>5</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="495,input.157">
<dim>1</dim>
<dim>240</dim>
<dim>30</dim>
<dim>30</dim>
</port>
</output>
</layer>
<layer id="229" name="Constant_277764_compressed" type="Const" version="opset1">
<data element_type="f16" shape="1, 240, 1, 1" offset="120660" size="480" />
<output>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>240</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="230" name="Constant_277764" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>240</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>240</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="231" name="__module.features.3.1.block.1.1/aten::batch_norm/BatchNormInference" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>240</dim>
<dim>30</dim>
<dim>30</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>240</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="500_1">
<dim>1</dim>
<dim>240</dim>
<dim>30</dim>
<dim>30</dim>
</port>
</output>
</layer>
<layer id="232" name="__module.features.3.1.block.1.2/aten::silu_/Swish" type="Swish" version="opset4">
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>240</dim>
<dim>30</dim>
<dim>30</dim>
</port>
</input>
<output>
<port id="1" precision="FP32" names="500,input.159">
<dim>1</dim>
<dim>240</dim>
<dim>30</dim>
<dim>30</dim>
</port>
</output>
</layer>
<layer id="233" name="Constant_272712" type="Const" version="opset1">
<data element_type="i64" shape="2" offset="2432" size="16" />
<output>
<port id="0" precision="I64">
<dim>2</dim>
</port>
</output>
</layer>
<layer id="234" name="__module.features.3.1.block.2.avgpool/aten::adaptive_avg_pool2d/AdaptiveAvgPool" type="ReduceMean" version="opset1">
<data keep_dims="true" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>240</dim>
<dim>30</dim>
<dim>30</dim>
</port>
<port id="1" precision="I64">
<dim>2</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="508,input.163">
<dim>1</dim>
<dim>240</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="235" name="self.features.3.1.block.2.fc1.weight_compressed" type="Const" version="opset1">
<data element_type="f16" shape="10, 240, 1, 1" offset="121140" size="4800" />
<output>
<port id="0" precision="FP16" names="self.features.3.1.block.2.fc1.weight">
<dim>10</dim>
<dim>240</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="236" name="self.features.3.1.block.2.fc1.weight" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>10</dim>
<dim>240</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>10</dim>
<dim>240</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="237" name="__module.features.3.1.block.2.fc1/aten::_convolution/Convolution" type="Convolution" version="opset1">
<data strides="1, 1" dilations="1, 1" pads_begin="0, 0" pads_end="0, 0" auto_pad="explicit" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>240</dim>
<dim>1</dim>
<dim>1</dim>
</port>
<port id="1" precision="FP32">
<dim>10</dim>
<dim>240</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1</dim>
<dim>10</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="238" name="__module.features.3.1.block.2.fc1/aten::_convolution/Reshape_compressed" type="Const" version="opset1">
<data element_type="f16" shape="1, 10, 1, 1" offset="125940" size="20" />
<output>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>10</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="239" name="__module.features.3.1.block.2.fc1/aten::_convolution/Reshape" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>10</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>10</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="240" name="__module.features.3.1.block.2.fc1/aten::_convolution/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>10</dim>
<dim>1</dim>
<dim>1</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>10</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="515_1">
<dim>1</dim>
<dim>10</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="241" name="__module.features.3.1.block.2.activation/aten::silu_/Swish" type="Swish" version="opset4">
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>10</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32" names="515,input.165">
<dim>1</dim>
<dim>10</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="242" name="self.features.3.1.block.2.fc2.weight_compressed" type="Const" version="opset1">
<data element_type="f16" shape="240, 10, 1, 1" offset="125960" size="4800" />
<output>
<port id="0" precision="FP16" names="self.features.3.1.block.2.fc2.weight">
<dim>240</dim>
<dim>10</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="243" name="self.features.3.1.block.2.fc2.weight" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>240</dim>
<dim>10</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>240</dim>
<dim>10</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="244" name="__module.features.3.1.block.2.fc2/aten::_convolution/Convolution" type="Convolution" version="opset1">
<data strides="1, 1" dilations="1, 1" pads_begin="0, 0" pads_end="0, 0" auto_pad="explicit" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>10</dim>
<dim>1</dim>
<dim>1</dim>
</port>
<port id="1" precision="FP32">
<dim>240</dim>
<dim>10</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1</dim>
<dim>240</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="245" name="__module.features.3.1.block.2.fc2/aten::_convolution/Reshape_compressed" type="Const" version="opset1">
<data element_type="f16" shape="1, 240, 1, 1" offset="130760" size="480" />
<output>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>240</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="246" name="__module.features.3.1.block.2.fc2/aten::_convolution/Reshape" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>240</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>240</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="247" name="__module.features.3.1.block.2.fc2/aten::_convolution/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>240</dim>
<dim>1</dim>
<dim>1</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>240</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="523,input.169">
<dim>1</dim>
<dim>240</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="248" name="__module.features.3.1.block.2.scale_activation/aten::sigmoid/Sigmoid" type="Sigmoid" version="opset1">
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>240</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32" names="524,scale.13">
<dim>1</dim>
<dim>240</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="249" name="__module.features.3.1.block.2/aten::mul/Multiply" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>240</dim>
<dim>1</dim>
<dim>1</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>240</dim>
<dim>30</dim>
<dim>30</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="525,input.171">
<dim>1</dim>
<dim>240</dim>
<dim>30</dim>
<dim>30</dim>
</port>
</output>
</layer>
<layer id="250" name="Multiply_278187_compressed" type="Const" version="opset1">
<data element_type="f16" shape="40, 240, 1, 1" offset="131240" size="19200" />
<output>
<port id="0" precision="FP16">
<dim>40</dim>
<dim>240</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="251" name="Multiply_278187" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>40</dim>
<dim>240</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>40</dim>
<dim>240</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="252" name="Multiply_277766" type="Convolution" version="opset1">
<data strides="1, 1" dilations="1, 1" pads_begin="0, 0" pads_end="0, 0" auto_pad="explicit" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>240</dim>
<dim>30</dim>
<dim>30</dim>
</port>
<port id="1" precision="FP32">
<dim>40</dim>
<dim>240</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1</dim>
<dim>40</dim>
<dim>30</dim>
<dim>30</dim>
</port>
</output>
</layer>
<layer id="253" name="Constant_277771_compressed" type="Const" version="opset1">
<data element_type="f16" shape="1, 40, 1, 1" offset="150440" size="80" />
<output>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>40</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="254" name="Constant_277771" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>40</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>40</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="255" name="__module.features.3.1.block.3.1/aten::batch_norm/BatchNormInference" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>40</dim>
<dim>30</dim>
<dim>30</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>40</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="538_1">
<dim>1</dim>
<dim>40</dim>
<dim>30</dim>
<dim>30</dim>
</port>
</output>
</layer>
<layer id="256" name="__module.features.3.1/aten::add_/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>40</dim>
<dim>30</dim>
<dim>30</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>40</dim>
<dim>30</dim>
<dim>30</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="538,result.7">
<dim>1</dim>
<dim>40</dim>
<dim>30</dim>
<dim>30</dim>
</port>
</output>
</layer>
<layer id="257" name="Multiply_278191_compressed" type="Const" version="opset1">
<data element_type="f16" shape="240, 40, 1, 1" offset="150520" size="19200" />
<output>
<port id="0" precision="FP16">
<dim>240</dim>
<dim>40</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="258" name="Multiply_278191" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>240</dim>
<dim>40</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>240</dim>
<dim>40</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="259" name="Multiply_277773" type="Convolution" version="opset1">
<data strides="1, 1" dilations="1, 1" pads_begin="0, 0" pads_end="0, 0" auto_pad="explicit" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>40</dim>
<dim>30</dim>
<dim>30</dim>
</port>
<port id="1" precision="FP32">
<dim>240</dim>
<dim>40</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1</dim>
<dim>240</dim>
<dim>30</dim>
<dim>30</dim>
</port>
</output>
</layer>
<layer id="260" name="Constant_277778_compressed" type="Const" version="opset1">
<data element_type="f16" shape="1, 240, 1, 1" offset="169720" size="480" />
<output>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>240</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="261" name="Constant_277778" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>240</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>240</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="262" name="__module.features.3.2.block.0.1/aten::batch_norm/BatchNormInference" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>240</dim>
<dim>30</dim>
<dim>30</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>240</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="559_1">
<dim>1</dim>
<dim>240</dim>
<dim>30</dim>
<dim>30</dim>
</port>
</output>
</layer>
<layer id="263" name="__module.features.3.2.block.0.2/aten::silu_/Swish" type="Swish" version="opset4">
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>240</dim>
<dim>30</dim>
<dim>30</dim>
</port>
</input>
<output>
<port id="1" precision="FP32" names="559,input.179">
<dim>1</dim>
<dim>240</dim>
<dim>30</dim>
<dim>30</dim>
</port>
</output>
</layer>
<layer id="264" name="Multiply_278195_compressed" type="Const" version="opset1">
<data element_type="f16" shape="240, 1, 1, 5, 5" offset="170200" size="12000" />
<output>
<port id="0" precision="FP16">
<dim>240</dim>
<dim>1</dim>
<dim>1</dim>
<dim>5</dim>
<dim>5</dim>
</port>
</output>
</layer>
<layer id="265" name="Multiply_278195" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>240</dim>
<dim>1</dim>
<dim>1</dim>
<dim>5</dim>
<dim>5</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>240</dim>
<dim>1</dim>
<dim>1</dim>
<dim>5</dim>
<dim>5</dim>
</port>
</output>
</layer>
<layer id="266" name="Multiply_277780" type="GroupConvolution" version="opset1">
<data strides="1, 1" pads_begin="2, 2" pads_end="2, 2" dilations="1, 1" auto_pad="explicit" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>240</dim>
<dim>30</dim>
<dim>30</dim>
</port>
<port id="1" precision="FP32">
<dim>240</dim>
<dim>1</dim>
<dim>1</dim>
<dim>5</dim>
<dim>5</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="569,input.183">
<dim>1</dim>
<dim>240</dim>
<dim>30</dim>
<dim>30</dim>
</port>
</output>
</layer>
<layer id="267" name="Constant_277785_compressed" type="Const" version="opset1">
<data element_type="f16" shape="1, 240, 1, 1" offset="182200" size="480" />
<output>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>240</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="268" name="Constant_277785" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>240</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>240</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="269" name="__module.features.3.2.block.1.1/aten::batch_norm/BatchNormInference" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>240</dim>
<dim>30</dim>
<dim>30</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>240</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="574_1">
<dim>1</dim>
<dim>240</dim>
<dim>30</dim>
<dim>30</dim>
</port>
</output>
</layer>
<layer id="270" name="__module.features.3.2.block.1.2/aten::silu_/Swish" type="Swish" version="opset4">
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>240</dim>
<dim>30</dim>
<dim>30</dim>
</port>
</input>
<output>
<port id="1" precision="FP32" names="574,input.185">
<dim>1</dim>
<dim>240</dim>
<dim>30</dim>
<dim>30</dim>
</port>
</output>
</layer>
<layer id="271" name="Constant_272722" type="Const" version="opset1">
<data element_type="i64" shape="2" offset="2432" size="16" />
<output>
<port id="0" precision="I64">
<dim>2</dim>
</port>
</output>
</layer>
<layer id="272" name="__module.features.3.2.block.2.avgpool/aten::adaptive_avg_pool2d/AdaptiveAvgPool" type="ReduceMean" version="opset1">
<data keep_dims="true" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>240</dim>
<dim>30</dim>
<dim>30</dim>
</port>
<port id="1" precision="I64">
<dim>2</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="582,input.189">
<dim>1</dim>
<dim>240</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="273" name="self.features.3.2.block.2.fc1.weight_compressed" type="Const" version="opset1">
<data element_type="f16" shape="10, 240, 1, 1" offset="182680" size="4800" />
<output>
<port id="0" precision="FP16" names="self.features.3.2.block.2.fc1.weight">
<dim>10</dim>
<dim>240</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="274" name="self.features.3.2.block.2.fc1.weight" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>10</dim>
<dim>240</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>10</dim>
<dim>240</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="275" name="__module.features.3.2.block.2.fc1/aten::_convolution/Convolution" type="Convolution" version="opset1">
<data strides="1, 1" dilations="1, 1" pads_begin="0, 0" pads_end="0, 0" auto_pad="explicit" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>240</dim>
<dim>1</dim>
<dim>1</dim>
</port>
<port id="1" precision="FP32">
<dim>10</dim>
<dim>240</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1</dim>
<dim>10</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="276" name="__module.features.3.2.block.2.fc1/aten::_convolution/Reshape_compressed" type="Const" version="opset1">
<data element_type="f16" shape="1, 10, 1, 1" offset="187480" size="20" />
<output>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>10</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="277" name="__module.features.3.2.block.2.fc1/aten::_convolution/Reshape" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>10</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>10</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="278" name="__module.features.3.2.block.2.fc1/aten::_convolution/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>10</dim>
<dim>1</dim>
<dim>1</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>10</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="589_1">
<dim>1</dim>
<dim>10</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="279" name="__module.features.3.2.block.2.activation/aten::silu_/Swish" type="Swish" version="opset4">
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>10</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32" names="589,input.191">
<dim>1</dim>
<dim>10</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="280" name="self.features.3.2.block.2.fc2.weight_compressed" type="Const" version="opset1">
<data element_type="f16" shape="240, 10, 1, 1" offset="187500" size="4800" />
<output>
<port id="0" precision="FP16" names="self.features.3.2.block.2.fc2.weight">
<dim>240</dim>
<dim>10</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="281" name="self.features.3.2.block.2.fc2.weight" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>240</dim>
<dim>10</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>240</dim>
<dim>10</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="282" name="__module.features.3.2.block.2.fc2/aten::_convolution/Convolution" type="Convolution" version="opset1">
<data strides="1, 1" dilations="1, 1" pads_begin="0, 0" pads_end="0, 0" auto_pad="explicit" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>10</dim>
<dim>1</dim>
<dim>1</dim>
</port>
<port id="1" precision="FP32">
<dim>240</dim>
<dim>10</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1</dim>
<dim>240</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="283" name="__module.features.3.2.block.2.fc2/aten::_convolution/Reshape_compressed" type="Const" version="opset1">
<data element_type="f16" shape="1, 240, 1, 1" offset="192300" size="480" />
<output>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>240</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="284" name="__module.features.3.2.block.2.fc2/aten::_convolution/Reshape" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>240</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>240</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="285" name="__module.features.3.2.block.2.fc2/aten::_convolution/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>240</dim>
<dim>1</dim>
<dim>1</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>240</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="597,input.195">
<dim>1</dim>
<dim>240</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="286" name="__module.features.3.2.block.2.scale_activation/aten::sigmoid/Sigmoid" type="Sigmoid" version="opset1">
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>240</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32" names="598,scale.15">
<dim>1</dim>
<dim>240</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="287" name="__module.features.3.2.block.2/aten::mul/Multiply" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>240</dim>
<dim>1</dim>
<dim>1</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>240</dim>
<dim>30</dim>
<dim>30</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="599,input.197">
<dim>1</dim>
<dim>240</dim>
<dim>30</dim>
<dim>30</dim>
</port>
</output>
</layer>
<layer id="288" name="Multiply_278198_compressed" type="Const" version="opset1">
<data element_type="f16" shape="40, 240, 1, 1" offset="192780" size="19200" />
<output>
<port id="0" precision="FP16">
<dim>40</dim>
<dim>240</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="289" name="Multiply_278198" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>40</dim>
<dim>240</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>40</dim>
<dim>240</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="290" name="Multiply_277787" type="Convolution" version="opset1">
<data strides="1, 1" dilations="1, 1" pads_begin="0, 0" pads_end="0, 0" auto_pad="explicit" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>240</dim>
<dim>30</dim>
<dim>30</dim>
</port>
<port id="1" precision="FP32">
<dim>40</dim>
<dim>240</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1</dim>
<dim>40</dim>
<dim>30</dim>
<dim>30</dim>
</port>
</output>
</layer>
<layer id="291" name="Constant_277792_compressed" type="Const" version="opset1">
<data element_type="f16" shape="1, 40, 1, 1" offset="211980" size="80" />
<output>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>40</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="292" name="Constant_277792" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>40</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>40</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="293" name="__module.features.3.2.block.3.1/aten::batch_norm/BatchNormInference" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>40</dim>
<dim>30</dim>
<dim>30</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>40</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="612_1">
<dim>1</dim>
<dim>40</dim>
<dim>30</dim>
<dim>30</dim>
</port>
</output>
</layer>
<layer id="294" name="__module.features.3.2/aten::add_/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>40</dim>
<dim>30</dim>
<dim>30</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>40</dim>
<dim>30</dim>
<dim>30</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="612,result.9">
<dim>1</dim>
<dim>40</dim>
<dim>30</dim>
<dim>30</dim>
</port>
</output>
</layer>
<layer id="295" name="Multiply_278202_compressed" type="Const" version="opset1">
<data element_type="f16" shape="240, 40, 1, 1" offset="212060" size="19200" />
<output>
<port id="0" precision="FP16">
<dim>240</dim>
<dim>40</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="296" name="Multiply_278202" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>240</dim>
<dim>40</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>240</dim>
<dim>40</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="297" name="Multiply_277794" type="Convolution" version="opset1">
<data strides="1, 1" dilations="1, 1" pads_begin="0, 0" pads_end="0, 0" auto_pad="explicit" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>40</dim>
<dim>30</dim>
<dim>30</dim>
</port>
<port id="1" precision="FP32">
<dim>240</dim>
<dim>40</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1</dim>
<dim>240</dim>
<dim>30</dim>
<dim>30</dim>
</port>
</output>
</layer>
<layer id="298" name="Constant_277799_compressed" type="Const" version="opset1">
<data element_type="f16" shape="1, 240, 1, 1" offset="231260" size="480" />
<output>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>240</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="299" name="Constant_277799" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>240</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>240</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="300" name="__module.features.4.0.block.0.1/aten::batch_norm/BatchNormInference" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>240</dim>
<dim>30</dim>
<dim>30</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>240</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="636_1">
<dim>1</dim>
<dim>240</dim>
<dim>30</dim>
<dim>30</dim>
</port>
</output>
</layer>
<layer id="301" name="__module.features.4.0.block.0.2/aten::silu_/Swish" type="Swish" version="opset4">
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>240</dim>
<dim>30</dim>
<dim>30</dim>
</port>
</input>
<output>
<port id="1" precision="FP32" names="636,input.205">
<dim>1</dim>
<dim>240</dim>
<dim>30</dim>
<dim>30</dim>
</port>
</output>
</layer>
<layer id="302" name="Multiply_278206_compressed" type="Const" version="opset1">
<data element_type="f16" shape="240, 1, 1, 3, 3" offset="231740" size="4320" />
<output>
<port id="0" precision="FP16">
<dim>240</dim>
<dim>1</dim>
<dim>1</dim>
<dim>3</dim>
<dim>3</dim>
</port>
</output>
</layer>
<layer id="303" name="Multiply_278206" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>240</dim>
<dim>1</dim>
<dim>1</dim>
<dim>3</dim>
<dim>3</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>240</dim>
<dim>1</dim>
<dim>1</dim>
<dim>3</dim>
<dim>3</dim>
</port>
</output>
</layer>
<layer id="304" name="Multiply_277801" type="GroupConvolution" version="opset1">
<data strides="2, 2" pads_begin="1, 1" pads_end="1, 1" dilations="1, 1" auto_pad="explicit" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>240</dim>
<dim>30</dim>
<dim>30</dim>
</port>
<port id="1" precision="FP32">
<dim>240</dim>
<dim>1</dim>
<dim>1</dim>
<dim>3</dim>
<dim>3</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="646,input.209">
<dim>1</dim>
<dim>240</dim>
<dim>15</dim>
<dim>15</dim>
</port>
</output>
</layer>
<layer id="305" name="Constant_277806_compressed" type="Const" version="opset1">
<data element_type="f16" shape="1, 240, 1, 1" offset="236060" size="480" />
<output>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>240</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="306" name="Constant_277806" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>240</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>240</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="307" name="__module.features.4.0.block.1.1/aten::batch_norm/BatchNormInference" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>240</dim>
<dim>15</dim>
<dim>15</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>240</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="651_1">
<dim>1</dim>
<dim>240</dim>
<dim>15</dim>
<dim>15</dim>
</port>
</output>
</layer>
<layer id="308" name="__module.features.4.0.block.1.2/aten::silu_/Swish" type="Swish" version="opset4">
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>240</dim>
<dim>15</dim>
<dim>15</dim>
</port>
</input>
<output>
<port id="1" precision="FP32" names="651,input.211">
<dim>1</dim>
<dim>240</dim>
<dim>15</dim>
<dim>15</dim>
</port>
</output>
</layer>
<layer id="309" name="Constant_272732" type="Const" version="opset1">
<data element_type="i64" shape="2" offset="2432" size="16" />
<output>
<port id="0" precision="I64">
<dim>2</dim>
</port>
</output>
</layer>
<layer id="310" name="__module.features.4.0.block.2.avgpool/aten::adaptive_avg_pool2d/AdaptiveAvgPool" type="ReduceMean" version="opset1">
<data keep_dims="true" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>240</dim>
<dim>15</dim>
<dim>15</dim>
</port>
<port id="1" precision="I64">
<dim>2</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="659,input.215">
<dim>1</dim>
<dim>240</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="311" name="self.features.4.0.block.2.fc1.weight_compressed" type="Const" version="opset1">
<data element_type="f16" shape="10, 240, 1, 1" offset="236540" size="4800" />
<output>
<port id="0" precision="FP16" names="self.features.4.0.block.2.fc1.weight">
<dim>10</dim>
<dim>240</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="312" name="self.features.4.0.block.2.fc1.weight" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>10</dim>
<dim>240</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>10</dim>
<dim>240</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="313" name="__module.features.4.0.block.2.fc1/aten::_convolution/Convolution" type="Convolution" version="opset1">
<data strides="1, 1" dilations="1, 1" pads_begin="0, 0" pads_end="0, 0" auto_pad="explicit" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>240</dim>
<dim>1</dim>
<dim>1</dim>
</port>
<port id="1" precision="FP32">
<dim>10</dim>
<dim>240</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1</dim>
<dim>10</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="314" name="__module.features.4.0.block.2.fc1/aten::_convolution/Reshape_compressed" type="Const" version="opset1">
<data element_type="f16" shape="1, 10, 1, 1" offset="241340" size="20" />
<output>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>10</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="315" name="__module.features.4.0.block.2.fc1/aten::_convolution/Reshape" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>10</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>10</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="316" name="__module.features.4.0.block.2.fc1/aten::_convolution/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>10</dim>
<dim>1</dim>
<dim>1</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>10</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="666_1">
<dim>1</dim>
<dim>10</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="317" name="__module.features.4.0.block.2.activation/aten::silu_/Swish" type="Swish" version="opset4">
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>10</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32" names="666,input.217">
<dim>1</dim>
<dim>10</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="318" name="self.features.4.0.block.2.fc2.weight_compressed" type="Const" version="opset1">
<data element_type="f16" shape="240, 10, 1, 1" offset="241360" size="4800" />
<output>
<port id="0" precision="FP16" names="self.features.4.0.block.2.fc2.weight">
<dim>240</dim>
<dim>10</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="319" name="self.features.4.0.block.2.fc2.weight" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>240</dim>
<dim>10</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>240</dim>
<dim>10</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="320" name="__module.features.4.0.block.2.fc2/aten::_convolution/Convolution" type="Convolution" version="opset1">
<data strides="1, 1" dilations="1, 1" pads_begin="0, 0" pads_end="0, 0" auto_pad="explicit" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>10</dim>
<dim>1</dim>
<dim>1</dim>
</port>
<port id="1" precision="FP32">
<dim>240</dim>
<dim>10</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1</dim>
<dim>240</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="321" name="__module.features.4.0.block.2.fc2/aten::_convolution/Reshape_compressed" type="Const" version="opset1">
<data element_type="f16" shape="1, 240, 1, 1" offset="246160" size="480" />
<output>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>240</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="322" name="__module.features.4.0.block.2.fc2/aten::_convolution/Reshape" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>240</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>240</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="323" name="__module.features.4.0.block.2.fc2/aten::_convolution/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>240</dim>
<dim>1</dim>
<dim>1</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>240</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="674,input.221">
<dim>1</dim>
<dim>240</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="324" name="__module.features.4.0.block.2.scale_activation/aten::sigmoid/Sigmoid" type="Sigmoid" version="opset1">
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>240</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32" names="675,scale.17">
<dim>1</dim>
<dim>240</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="325" name="__module.features.4.0.block.2/aten::mul/Multiply" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>240</dim>
<dim>1</dim>
<dim>1</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>240</dim>
<dim>15</dim>
<dim>15</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="676,input.223">
<dim>1</dim>
<dim>240</dim>
<dim>15</dim>
<dim>15</dim>
</port>
</output>
</layer>
<layer id="326" name="Multiply_278209_compressed" type="Const" version="opset1">
<data element_type="f16" shape="80, 240, 1, 1" offset="246640" size="38400" />
<output>
<port id="0" precision="FP16">
<dim>80</dim>
<dim>240</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="327" name="Multiply_278209" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>80</dim>
<dim>240</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>80</dim>
<dim>240</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="328" name="Multiply_277808" type="Convolution" version="opset1">
<data strides="1, 1" dilations="1, 1" pads_begin="0, 0" pads_end="0, 0" auto_pad="explicit" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>240</dim>
<dim>15</dim>
<dim>15</dim>
</port>
<port id="1" precision="FP32">
<dim>80</dim>
<dim>240</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1</dim>
<dim>80</dim>
<dim>15</dim>
<dim>15</dim>
</port>
</output>
</layer>
<layer id="329" name="Constant_277813_compressed" type="Const" version="opset1">
<data element_type="f16" shape="1, 80, 1, 1" offset="285040" size="160" />
<output>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>80</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="330" name="Constant_277813" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>80</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>80</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="331" name="__module.features.4.0.block.3.1/aten::batch_norm/BatchNormInference" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>80</dim>
<dim>15</dim>
<dim>15</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>80</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="689,input.227">
<dim>1</dim>
<dim>80</dim>
<dim>15</dim>
<dim>15</dim>
</port>
</output>
</layer>
<layer id="332" name="Multiply_278213_compressed" type="Const" version="opset1">
<data element_type="f16" shape="480, 80, 1, 1" offset="285200" size="76800" />
<output>
<port id="0" precision="FP16">
<dim>480</dim>
<dim>80</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="333" name="Multiply_278213" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>480</dim>
<dim>80</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>480</dim>
<dim>80</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="334" name="Multiply_277815" type="Convolution" version="opset1">
<data strides="1, 1" dilations="1, 1" pads_begin="0, 0" pads_end="0, 0" auto_pad="explicit" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>80</dim>
<dim>15</dim>
<dim>15</dim>
</port>
<port id="1" precision="FP32">
<dim>480</dim>
<dim>80</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1</dim>
<dim>480</dim>
<dim>15</dim>
<dim>15</dim>
</port>
</output>
</layer>
<layer id="335" name="Constant_277820_compressed" type="Const" version="opset1">
<data element_type="f16" shape="1, 480, 1, 1" offset="362000" size="960" />
<output>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>480</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="336" name="Constant_277820" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>480</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>480</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="337" name="__module.features.4.1.block.0.1/aten::batch_norm/BatchNormInference" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>480</dim>
<dim>15</dim>
<dim>15</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>480</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="709_1">
<dim>1</dim>
<dim>480</dim>
<dim>15</dim>
<dim>15</dim>
</port>
</output>
</layer>
<layer id="338" name="__module.features.4.1.block.0.2/aten::silu_/Swish" type="Swish" version="opset4">
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>480</dim>
<dim>15</dim>
<dim>15</dim>
</port>
</input>
<output>
<port id="1" precision="FP32" names="709,input.231">
<dim>1</dim>
<dim>480</dim>
<dim>15</dim>
<dim>15</dim>
</port>
</output>
</layer>
<layer id="339" name="Multiply_278217_compressed" type="Const" version="opset1">
<data element_type="f16" shape="480, 1, 1, 3, 3" offset="362960" size="8640" />
<output>
<port id="0" precision="FP16">
<dim>480</dim>
<dim>1</dim>
<dim>1</dim>
<dim>3</dim>
<dim>3</dim>
</port>
</output>
</layer>
<layer id="340" name="Multiply_278217" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>480</dim>
<dim>1</dim>
<dim>1</dim>
<dim>3</dim>
<dim>3</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>480</dim>
<dim>1</dim>
<dim>1</dim>
<dim>3</dim>
<dim>3</dim>
</port>
</output>
</layer>
<layer id="341" name="Multiply_277822" type="GroupConvolution" version="opset1">
<data strides="1, 1" pads_begin="1, 1" pads_end="1, 1" dilations="1, 1" auto_pad="explicit" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>480</dim>
<dim>15</dim>
<dim>15</dim>
</port>
<port id="1" precision="FP32">
<dim>480</dim>
<dim>1</dim>
<dim>1</dim>
<dim>3</dim>
<dim>3</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="719,input.235">
<dim>1</dim>
<dim>480</dim>
<dim>15</dim>
<dim>15</dim>
</port>
</output>
</layer>
<layer id="342" name="Constant_277827_compressed" type="Const" version="opset1">
<data element_type="f16" shape="1, 480, 1, 1" offset="371600" size="960" />
<output>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>480</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="343" name="Constant_277827" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>480</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>480</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="344" name="__module.features.4.1.block.1.1/aten::batch_norm/BatchNormInference" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>480</dim>
<dim>15</dim>
<dim>15</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>480</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="724_1">
<dim>1</dim>
<dim>480</dim>
<dim>15</dim>
<dim>15</dim>
</port>
</output>
</layer>
<layer id="345" name="__module.features.4.1.block.1.2/aten::silu_/Swish" type="Swish" version="opset4">
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>480</dim>
<dim>15</dim>
<dim>15</dim>
</port>
</input>
<output>
<port id="1" precision="FP32" names="724,input.237">
<dim>1</dim>
<dim>480</dim>
<dim>15</dim>
<dim>15</dim>
</port>
</output>
</layer>
<layer id="346" name="Constant_272740" type="Const" version="opset1">
<data element_type="i64" shape="2" offset="2432" size="16" />
<output>
<port id="0" precision="I64">
<dim>2</dim>
</port>
</output>
</layer>
<layer id="347" name="__module.features.4.1.block.2.avgpool/aten::adaptive_avg_pool2d/AdaptiveAvgPool" type="ReduceMean" version="opset1">
<data keep_dims="true" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>480</dim>
<dim>15</dim>
<dim>15</dim>
</port>
<port id="1" precision="I64">
<dim>2</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="732,input.241">
<dim>1</dim>
<dim>480</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="348" name="self.features.4.1.block.2.fc1.weight_compressed" type="Const" version="opset1">
<data element_type="f16" shape="20, 480, 1, 1" offset="372560" size="19200" />
<output>
<port id="0" precision="FP16" names="self.features.4.1.block.2.fc1.weight">
<dim>20</dim>
<dim>480</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="349" name="self.features.4.1.block.2.fc1.weight" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>20</dim>
<dim>480</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>20</dim>
<dim>480</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="350" name="__module.features.4.1.block.2.fc1/aten::_convolution/Convolution" type="Convolution" version="opset1">
<data strides="1, 1" dilations="1, 1" pads_begin="0, 0" pads_end="0, 0" auto_pad="explicit" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>480</dim>
<dim>1</dim>
<dim>1</dim>
</port>
<port id="1" precision="FP32">
<dim>20</dim>
<dim>480</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1</dim>
<dim>20</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="351" name="__module.features.4.1.block.2.fc1/aten::_convolution/Reshape_compressed" type="Const" version="opset1">
<data element_type="f16" shape="1, 20, 1, 1" offset="391760" size="40" />
<output>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>20</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="352" name="__module.features.4.1.block.2.fc1/aten::_convolution/Reshape" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>20</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>20</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="353" name="__module.features.4.1.block.2.fc1/aten::_convolution/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>20</dim>
<dim>1</dim>
<dim>1</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>20</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="739_1">
<dim>1</dim>
<dim>20</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="354" name="__module.features.4.1.block.2.activation/aten::silu_/Swish" type="Swish" version="opset4">
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>20</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32" names="739,input.243">
<dim>1</dim>
<dim>20</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="355" name="self.features.4.1.block.2.fc2.weight_compressed" type="Const" version="opset1">
<data element_type="f16" shape="480, 20, 1, 1" offset="391800" size="19200" />
<output>
<port id="0" precision="FP16" names="self.features.4.1.block.2.fc2.weight">
<dim>480</dim>
<dim>20</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="356" name="self.features.4.1.block.2.fc2.weight" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>480</dim>
<dim>20</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>480</dim>
<dim>20</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="357" name="__module.features.4.1.block.2.fc2/aten::_convolution/Convolution" type="Convolution" version="opset1">
<data strides="1, 1" dilations="1, 1" pads_begin="0, 0" pads_end="0, 0" auto_pad="explicit" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>20</dim>
<dim>1</dim>
<dim>1</dim>
</port>
<port id="1" precision="FP32">
<dim>480</dim>
<dim>20</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1</dim>
<dim>480</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="358" name="__module.features.4.1.block.2.fc2/aten::_convolution/Reshape_compressed" type="Const" version="opset1">
<data element_type="f16" shape="1, 480, 1, 1" offset="411000" size="960" />
<output>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>480</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="359" name="__module.features.4.1.block.2.fc2/aten::_convolution/Reshape" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>480</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>480</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="360" name="__module.features.4.1.block.2.fc2/aten::_convolution/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>480</dim>
<dim>1</dim>
<dim>1</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>480</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="747,input.247">
<dim>1</dim>
<dim>480</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="361" name="__module.features.4.1.block.2.scale_activation/aten::sigmoid/Sigmoid" type="Sigmoid" version="opset1">
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>480</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32" names="748,scale.19">
<dim>1</dim>
<dim>480</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="362" name="__module.features.4.1.block.2/aten::mul/Multiply" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>480</dim>
<dim>1</dim>
<dim>1</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>480</dim>
<dim>15</dim>
<dim>15</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="749,input.249">
<dim>1</dim>
<dim>480</dim>
<dim>15</dim>
<dim>15</dim>
</port>
</output>
</layer>
<layer id="363" name="Multiply_278220_compressed" type="Const" version="opset1">
<data element_type="f16" shape="80, 480, 1, 1" offset="411960" size="76800" />
<output>
<port id="0" precision="FP16">
<dim>80</dim>
<dim>480</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="364" name="Multiply_278220" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>80</dim>
<dim>480</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>80</dim>
<dim>480</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="365" name="Multiply_277829" type="Convolution" version="opset1">
<data strides="1, 1" dilations="1, 1" pads_begin="0, 0" pads_end="0, 0" auto_pad="explicit" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>480</dim>
<dim>15</dim>
<dim>15</dim>
</port>
<port id="1" precision="FP32">
<dim>80</dim>
<dim>480</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1</dim>
<dim>80</dim>
<dim>15</dim>
<dim>15</dim>
</port>
</output>
</layer>
<layer id="366" name="Constant_277834_compressed" type="Const" version="opset1">
<data element_type="f16" shape="1, 80, 1, 1" offset="488760" size="160" />
<output>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>80</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="367" name="Constant_277834" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>80</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>80</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="368" name="__module.features.4.1.block.3.1/aten::batch_norm/BatchNormInference" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>80</dim>
<dim>15</dim>
<dim>15</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>80</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="762_1">
<dim>1</dim>
<dim>80</dim>
<dim>15</dim>
<dim>15</dim>
</port>
</output>
</layer>
<layer id="369" name="__module.features.4.1/aten::add_/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>80</dim>
<dim>15</dim>
<dim>15</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>80</dim>
<dim>15</dim>
<dim>15</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="762,result.11">
<dim>1</dim>
<dim>80</dim>
<dim>15</dim>
<dim>15</dim>
</port>
</output>
</layer>
<layer id="370" name="Multiply_278224_compressed" type="Const" version="opset1">
<data element_type="f16" shape="480, 80, 1, 1" offset="488920" size="76800" />
<output>
<port id="0" precision="FP16">
<dim>480</dim>
<dim>80</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="371" name="Multiply_278224" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>480</dim>
<dim>80</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>480</dim>
<dim>80</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="372" name="Multiply_277836" type="Convolution" version="opset1">
<data strides="1, 1" dilations="1, 1" pads_begin="0, 0" pads_end="0, 0" auto_pad="explicit" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>80</dim>
<dim>15</dim>
<dim>15</dim>
</port>
<port id="1" precision="FP32">
<dim>480</dim>
<dim>80</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1</dim>
<dim>480</dim>
<dim>15</dim>
<dim>15</dim>
</port>
</output>
</layer>
<layer id="373" name="Constant_277841_compressed" type="Const" version="opset1">
<data element_type="f16" shape="1, 480, 1, 1" offset="565720" size="960" />
<output>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>480</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="374" name="Constant_277841" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>480</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>480</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="375" name="__module.features.4.2.block.0.1/aten::batch_norm/BatchNormInference" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>480</dim>
<dim>15</dim>
<dim>15</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>480</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="783_1">
<dim>1</dim>
<dim>480</dim>
<dim>15</dim>
<dim>15</dim>
</port>
</output>
</layer>
<layer id="376" name="__module.features.4.2.block.0.2/aten::silu_/Swish" type="Swish" version="opset4">
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>480</dim>
<dim>15</dim>
<dim>15</dim>
</port>
</input>
<output>
<port id="1" precision="FP32" names="783,input.257">
<dim>1</dim>
<dim>480</dim>
<dim>15</dim>
<dim>15</dim>
</port>
</output>
</layer>
<layer id="377" name="Multiply_278228_compressed" type="Const" version="opset1">
<data element_type="f16" shape="480, 1, 1, 3, 3" offset="566680" size="8640" />
<output>
<port id="0" precision="FP16">
<dim>480</dim>
<dim>1</dim>
<dim>1</dim>
<dim>3</dim>
<dim>3</dim>
</port>
</output>
</layer>
<layer id="378" name="Multiply_278228" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>480</dim>
<dim>1</dim>
<dim>1</dim>
<dim>3</dim>
<dim>3</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>480</dim>
<dim>1</dim>
<dim>1</dim>
<dim>3</dim>
<dim>3</dim>
</port>
</output>
</layer>
<layer id="379" name="Multiply_277843" type="GroupConvolution" version="opset1">
<data strides="1, 1" pads_begin="1, 1" pads_end="1, 1" dilations="1, 1" auto_pad="explicit" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>480</dim>
<dim>15</dim>
<dim>15</dim>
</port>
<port id="1" precision="FP32">
<dim>480</dim>
<dim>1</dim>
<dim>1</dim>
<dim>3</dim>
<dim>3</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="793,input.261">
<dim>1</dim>
<dim>480</dim>
<dim>15</dim>
<dim>15</dim>
</port>
</output>
</layer>
<layer id="380" name="Constant_277848_compressed" type="Const" version="opset1">
<data element_type="f16" shape="1, 480, 1, 1" offset="575320" size="960" />
<output>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>480</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="381" name="Constant_277848" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>480</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>480</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="382" name="__module.features.4.2.block.1.1/aten::batch_norm/BatchNormInference" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>480</dim>
<dim>15</dim>
<dim>15</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>480</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="798_1">
<dim>1</dim>
<dim>480</dim>
<dim>15</dim>
<dim>15</dim>
</port>
</output>
</layer>
<layer id="383" name="__module.features.4.2.block.1.2/aten::silu_/Swish" type="Swish" version="opset4">
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>480</dim>
<dim>15</dim>
<dim>15</dim>
</port>
</input>
<output>
<port id="1" precision="FP32" names="798,input.263">
<dim>1</dim>
<dim>480</dim>
<dim>15</dim>
<dim>15</dim>
</port>
</output>
</layer>
<layer id="384" name="Constant_272750" type="Const" version="opset1">
<data element_type="i64" shape="2" offset="2432" size="16" />
<output>
<port id="0" precision="I64">
<dim>2</dim>
</port>
</output>
</layer>
<layer id="385" name="__module.features.4.2.block.2.avgpool/aten::adaptive_avg_pool2d/AdaptiveAvgPool" type="ReduceMean" version="opset1">
<data keep_dims="true" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>480</dim>
<dim>15</dim>
<dim>15</dim>
</port>
<port id="1" precision="I64">
<dim>2</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="806,input.267">
<dim>1</dim>
<dim>480</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="386" name="self.features.4.2.block.2.fc1.weight_compressed" type="Const" version="opset1">
<data element_type="f16" shape="20, 480, 1, 1" offset="576280" size="19200" />
<output>
<port id="0" precision="FP16" names="self.features.4.2.block.2.fc1.weight">
<dim>20</dim>
<dim>480</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="387" name="self.features.4.2.block.2.fc1.weight" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>20</dim>
<dim>480</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>20</dim>
<dim>480</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="388" name="__module.features.4.2.block.2.fc1/aten::_convolution/Convolution" type="Convolution" version="opset1">
<data strides="1, 1" dilations="1, 1" pads_begin="0, 0" pads_end="0, 0" auto_pad="explicit" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>480</dim>
<dim>1</dim>
<dim>1</dim>
</port>
<port id="1" precision="FP32">
<dim>20</dim>
<dim>480</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1</dim>
<dim>20</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="389" name="__module.features.4.2.block.2.fc1/aten::_convolution/Reshape_compressed" type="Const" version="opset1">
<data element_type="f16" shape="1, 20, 1, 1" offset="595480" size="40" />
<output>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>20</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="390" name="__module.features.4.2.block.2.fc1/aten::_convolution/Reshape" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>20</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>20</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="391" name="__module.features.4.2.block.2.fc1/aten::_convolution/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>20</dim>
<dim>1</dim>
<dim>1</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>20</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="813_1">
<dim>1</dim>
<dim>20</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="392" name="__module.features.4.2.block.2.activation/aten::silu_/Swish" type="Swish" version="opset4">
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>20</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32" names="813,input.269">
<dim>1</dim>
<dim>20</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="393" name="self.features.4.2.block.2.fc2.weight_compressed" type="Const" version="opset1">
<data element_type="f16" shape="480, 20, 1, 1" offset="595520" size="19200" />
<output>
<port id="0" precision="FP16" names="self.features.4.2.block.2.fc2.weight">
<dim>480</dim>
<dim>20</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="394" name="self.features.4.2.block.2.fc2.weight" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>480</dim>
<dim>20</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>480</dim>
<dim>20</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="395" name="__module.features.4.2.block.2.fc2/aten::_convolution/Convolution" type="Convolution" version="opset1">
<data strides="1, 1" dilations="1, 1" pads_begin="0, 0" pads_end="0, 0" auto_pad="explicit" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>20</dim>
<dim>1</dim>
<dim>1</dim>
</port>
<port id="1" precision="FP32">
<dim>480</dim>
<dim>20</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1</dim>
<dim>480</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="396" name="__module.features.4.2.block.2.fc2/aten::_convolution/Reshape_compressed" type="Const" version="opset1">
<data element_type="f16" shape="1, 480, 1, 1" offset="614720" size="960" />
<output>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>480</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="397" name="__module.features.4.2.block.2.fc2/aten::_convolution/Reshape" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>480</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>480</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="398" name="__module.features.4.2.block.2.fc2/aten::_convolution/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>480</dim>
<dim>1</dim>
<dim>1</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>480</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="821,input.273">
<dim>1</dim>
<dim>480</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="399" name="__module.features.4.2.block.2.scale_activation/aten::sigmoid/Sigmoid" type="Sigmoid" version="opset1">
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>480</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32" names="822,scale.21">
<dim>1</dim>
<dim>480</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="400" name="__module.features.4.2.block.2/aten::mul/Multiply" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>480</dim>
<dim>1</dim>
<dim>1</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>480</dim>
<dim>15</dim>
<dim>15</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="823,input.275">
<dim>1</dim>
<dim>480</dim>
<dim>15</dim>
<dim>15</dim>
</port>
</output>
</layer>
<layer id="401" name="Multiply_278231_compressed" type="Const" version="opset1">
<data element_type="f16" shape="80, 480, 1, 1" offset="615680" size="76800" />
<output>
<port id="0" precision="FP16">
<dim>80</dim>
<dim>480</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="402" name="Multiply_278231" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>80</dim>
<dim>480</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>80</dim>
<dim>480</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="403" name="Multiply_277850" type="Convolution" version="opset1">
<data strides="1, 1" dilations="1, 1" pads_begin="0, 0" pads_end="0, 0" auto_pad="explicit" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>480</dim>
<dim>15</dim>
<dim>15</dim>
</port>
<port id="1" precision="FP32">
<dim>80</dim>
<dim>480</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1</dim>
<dim>80</dim>
<dim>15</dim>
<dim>15</dim>
</port>
</output>
</layer>
<layer id="404" name="Constant_277855_compressed" type="Const" version="opset1">
<data element_type="f16" shape="1, 80, 1, 1" offset="692480" size="160" />
<output>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>80</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="405" name="Constant_277855" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>80</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>80</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="406" name="__module.features.4.2.block.3.1/aten::batch_norm/BatchNormInference" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>80</dim>
<dim>15</dim>
<dim>15</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>80</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="836_1">
<dim>1</dim>
<dim>80</dim>
<dim>15</dim>
<dim>15</dim>
</port>
</output>
</layer>
<layer id="407" name="__module.features.4.2/aten::add_/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>80</dim>
<dim>15</dim>
<dim>15</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>80</dim>
<dim>15</dim>
<dim>15</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="836,result.13">
<dim>1</dim>
<dim>80</dim>
<dim>15</dim>
<dim>15</dim>
</port>
</output>
</layer>
<layer id="408" name="Multiply_278235_compressed" type="Const" version="opset1">
<data element_type="f16" shape="480, 80, 1, 1" offset="692640" size="76800" />
<output>
<port id="0" precision="FP16">
<dim>480</dim>
<dim>80</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="409" name="Multiply_278235" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>480</dim>
<dim>80</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>480</dim>
<dim>80</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="410" name="Multiply_277857" type="Convolution" version="opset1">
<data strides="1, 1" dilations="1, 1" pads_begin="0, 0" pads_end="0, 0" auto_pad="explicit" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>80</dim>
<dim>15</dim>
<dim>15</dim>
</port>
<port id="1" precision="FP32">
<dim>480</dim>
<dim>80</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1</dim>
<dim>480</dim>
<dim>15</dim>
<dim>15</dim>
</port>
</output>
</layer>
<layer id="411" name="Constant_277862_compressed" type="Const" version="opset1">
<data element_type="f16" shape="1, 480, 1, 1" offset="769440" size="960" />
<output>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>480</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="412" name="Constant_277862" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>480</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>480</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="413" name="__module.features.4.3.block.0.1/aten::batch_norm/BatchNormInference" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>480</dim>
<dim>15</dim>
<dim>15</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>480</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="857_1">
<dim>1</dim>
<dim>480</dim>
<dim>15</dim>
<dim>15</dim>
</port>
</output>
</layer>
<layer id="414" name="__module.features.4.3.block.0.2/aten::silu_/Swish" type="Swish" version="opset4">
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>480</dim>
<dim>15</dim>
<dim>15</dim>
</port>
</input>
<output>
<port id="1" precision="FP32" names="857,input.283">
<dim>1</dim>
<dim>480</dim>
<dim>15</dim>
<dim>15</dim>
</port>
</output>
</layer>
<layer id="415" name="Multiply_278239_compressed" type="Const" version="opset1">
<data element_type="f16" shape="480, 1, 1, 3, 3" offset="770400" size="8640" />
<output>
<port id="0" precision="FP16">
<dim>480</dim>
<dim>1</dim>
<dim>1</dim>
<dim>3</dim>
<dim>3</dim>
</port>
</output>
</layer>
<layer id="416" name="Multiply_278239" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>480</dim>
<dim>1</dim>
<dim>1</dim>
<dim>3</dim>
<dim>3</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>480</dim>
<dim>1</dim>
<dim>1</dim>
<dim>3</dim>
<dim>3</dim>
</port>
</output>
</layer>
<layer id="417" name="Multiply_277864" type="GroupConvolution" version="opset1">
<data strides="1, 1" pads_begin="1, 1" pads_end="1, 1" dilations="1, 1" auto_pad="explicit" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>480</dim>
<dim>15</dim>
<dim>15</dim>
</port>
<port id="1" precision="FP32">
<dim>480</dim>
<dim>1</dim>
<dim>1</dim>
<dim>3</dim>
<dim>3</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="867,input.287">
<dim>1</dim>
<dim>480</dim>
<dim>15</dim>
<dim>15</dim>
</port>
</output>
</layer>
<layer id="418" name="Constant_277869_compressed" type="Const" version="opset1">
<data element_type="f16" shape="1, 480, 1, 1" offset="779040" size="960" />
<output>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>480</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="419" name="Constant_277869" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>480</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>480</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="420" name="__module.features.4.3.block.1.1/aten::batch_norm/BatchNormInference" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>480</dim>
<dim>15</dim>
<dim>15</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>480</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="872_1">
<dim>1</dim>
<dim>480</dim>
<dim>15</dim>
<dim>15</dim>
</port>
</output>
</layer>
<layer id="421" name="__module.features.4.3.block.1.2/aten::silu_/Swish" type="Swish" version="opset4">
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>480</dim>
<dim>15</dim>
<dim>15</dim>
</port>
</input>
<output>
<port id="1" precision="FP32" names="872,input.289">
<dim>1</dim>
<dim>480</dim>
<dim>15</dim>
<dim>15</dim>
</port>
</output>
</layer>
<layer id="422" name="Constant_272760" type="Const" version="opset1">
<data element_type="i64" shape="2" offset="2432" size="16" />
<output>
<port id="0" precision="I64">
<dim>2</dim>
</port>
</output>
</layer>
<layer id="423" name="__module.features.4.3.block.2.avgpool/aten::adaptive_avg_pool2d/AdaptiveAvgPool" type="ReduceMean" version="opset1">
<data keep_dims="true" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>480</dim>
<dim>15</dim>
<dim>15</dim>
</port>
<port id="1" precision="I64">
<dim>2</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="880,input.293">
<dim>1</dim>
<dim>480</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="424" name="self.features.4.3.block.2.fc1.weight_compressed" type="Const" version="opset1">
<data element_type="f16" shape="20, 480, 1, 1" offset="780000" size="19200" />
<output>
<port id="0" precision="FP16" names="self.features.4.3.block.2.fc1.weight">
<dim>20</dim>
<dim>480</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="425" name="self.features.4.3.block.2.fc1.weight" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>20</dim>
<dim>480</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>20</dim>
<dim>480</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="426" name="__module.features.4.3.block.2.fc1/aten::_convolution/Convolution" type="Convolution" version="opset1">
<data strides="1, 1" dilations="1, 1" pads_begin="0, 0" pads_end="0, 0" auto_pad="explicit" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>480</dim>
<dim>1</dim>
<dim>1</dim>
</port>
<port id="1" precision="FP32">
<dim>20</dim>
<dim>480</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1</dim>
<dim>20</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="427" name="__module.features.4.3.block.2.fc1/aten::_convolution/Reshape_compressed" type="Const" version="opset1">
<data element_type="f16" shape="1, 20, 1, 1" offset="799200" size="40" />
<output>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>20</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="428" name="__module.features.4.3.block.2.fc1/aten::_convolution/Reshape" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>20</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>20</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="429" name="__module.features.4.3.block.2.fc1/aten::_convolution/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>20</dim>
<dim>1</dim>
<dim>1</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>20</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="887_1">
<dim>1</dim>
<dim>20</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="430" name="__module.features.4.3.block.2.activation/aten::silu_/Swish" type="Swish" version="opset4">
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>20</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32" names="887,input.295">
<dim>1</dim>
<dim>20</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="431" name="self.features.4.3.block.2.fc2.weight_compressed" type="Const" version="opset1">
<data element_type="f16" shape="480, 20, 1, 1" offset="799240" size="19200" />
<output>
<port id="0" precision="FP16" names="self.features.4.3.block.2.fc2.weight">
<dim>480</dim>
<dim>20</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="432" name="self.features.4.3.block.2.fc2.weight" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>480</dim>
<dim>20</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>480</dim>
<dim>20</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="433" name="__module.features.4.3.block.2.fc2/aten::_convolution/Convolution" type="Convolution" version="opset1">
<data strides="1, 1" dilations="1, 1" pads_begin="0, 0" pads_end="0, 0" auto_pad="explicit" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>20</dim>
<dim>1</dim>
<dim>1</dim>
</port>
<port id="1" precision="FP32">
<dim>480</dim>
<dim>20</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1</dim>
<dim>480</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="434" name="__module.features.4.3.block.2.fc2/aten::_convolution/Reshape_compressed" type="Const" version="opset1">
<data element_type="f16" shape="1, 480, 1, 1" offset="818440" size="960" />
<output>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>480</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="435" name="__module.features.4.3.block.2.fc2/aten::_convolution/Reshape" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>480</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>480</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="436" name="__module.features.4.3.block.2.fc2/aten::_convolution/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>480</dim>
<dim>1</dim>
<dim>1</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>480</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="895,input.299">
<dim>1</dim>
<dim>480</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="437" name="__module.features.4.3.block.2.scale_activation/aten::sigmoid/Sigmoid" type="Sigmoid" version="opset1">
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>480</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32" names="896,scale.23">
<dim>1</dim>
<dim>480</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="438" name="__module.features.4.3.block.2/aten::mul/Multiply" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>480</dim>
<dim>1</dim>
<dim>1</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>480</dim>
<dim>15</dim>
<dim>15</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="897,input.301">
<dim>1</dim>
<dim>480</dim>
<dim>15</dim>
<dim>15</dim>
</port>
</output>
</layer>
<layer id="439" name="Multiply_278242_compressed" type="Const" version="opset1">
<data element_type="f16" shape="80, 480, 1, 1" offset="819400" size="76800" />
<output>
<port id="0" precision="FP16">
<dim>80</dim>
<dim>480</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="440" name="Multiply_278242" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>80</dim>
<dim>480</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>80</dim>
<dim>480</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="441" name="Multiply_277871" type="Convolution" version="opset1">
<data strides="1, 1" dilations="1, 1" pads_begin="0, 0" pads_end="0, 0" auto_pad="explicit" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>480</dim>
<dim>15</dim>
<dim>15</dim>
</port>
<port id="1" precision="FP32">
<dim>80</dim>
<dim>480</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1</dim>
<dim>80</dim>
<dim>15</dim>
<dim>15</dim>
</port>
</output>
</layer>
<layer id="442" name="Constant_277876_compressed" type="Const" version="opset1">
<data element_type="f16" shape="1, 80, 1, 1" offset="896200" size="160" />
<output>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>80</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="443" name="Constant_277876" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>80</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>80</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="444" name="__module.features.4.3.block.3.1/aten::batch_norm/BatchNormInference" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>80</dim>
<dim>15</dim>
<dim>15</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>80</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="910_1">
<dim>1</dim>
<dim>80</dim>
<dim>15</dim>
<dim>15</dim>
</port>
</output>
</layer>
<layer id="445" name="__module.features.4.3/aten::add_/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>80</dim>
<dim>15</dim>
<dim>15</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>80</dim>
<dim>15</dim>
<dim>15</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="910,result.15">
<dim>1</dim>
<dim>80</dim>
<dim>15</dim>
<dim>15</dim>
</port>
</output>
</layer>
<layer id="446" name="Multiply_278246_compressed" type="Const" version="opset1">
<data element_type="f16" shape="480, 80, 1, 1" offset="896360" size="76800" />
<output>
<port id="0" precision="FP16">
<dim>480</dim>
<dim>80</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="447" name="Multiply_278246" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>480</dim>
<dim>80</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>480</dim>
<dim>80</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="448" name="Multiply_277878" type="Convolution" version="opset1">
<data strides="1, 1" dilations="1, 1" pads_begin="0, 0" pads_end="0, 0" auto_pad="explicit" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>80</dim>
<dim>15</dim>
<dim>15</dim>
</port>
<port id="1" precision="FP32">
<dim>480</dim>
<dim>80</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1</dim>
<dim>480</dim>
<dim>15</dim>
<dim>15</dim>
</port>
</output>
</layer>
<layer id="449" name="Constant_277883_compressed" type="Const" version="opset1">
<data element_type="f16" shape="1, 480, 1, 1" offset="973160" size="960" />
<output>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>480</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="450" name="Constant_277883" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>480</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>480</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="451" name="__module.features.5.0.block.0.1/aten::batch_norm/BatchNormInference" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>480</dim>
<dim>15</dim>
<dim>15</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>480</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="934_1">
<dim>1</dim>
<dim>480</dim>
<dim>15</dim>
<dim>15</dim>
</port>
</output>
</layer>
<layer id="452" name="__module.features.5.0.block.0.2/aten::silu_/Swish" type="Swish" version="opset4">
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>480</dim>
<dim>15</dim>
<dim>15</dim>
</port>
</input>
<output>
<port id="1" precision="FP32" names="934,input.309">
<dim>1</dim>
<dim>480</dim>
<dim>15</dim>
<dim>15</dim>
</port>
</output>
</layer>
<layer id="453" name="Multiply_278250_compressed" type="Const" version="opset1">
<data element_type="f16" shape="480, 1, 1, 5, 5" offset="974120" size="24000" />
<output>
<port id="0" precision="FP16">
<dim>480</dim>
<dim>1</dim>
<dim>1</dim>
<dim>5</dim>
<dim>5</dim>
</port>
</output>
</layer>
<layer id="454" name="Multiply_278250" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>480</dim>
<dim>1</dim>
<dim>1</dim>
<dim>5</dim>
<dim>5</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>480</dim>
<dim>1</dim>
<dim>1</dim>
<dim>5</dim>
<dim>5</dim>
</port>
</output>
</layer>
<layer id="455" name="Multiply_277885" type="GroupConvolution" version="opset1">
<data strides="1, 1" pads_begin="2, 2" pads_end="2, 2" dilations="1, 1" auto_pad="explicit" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>480</dim>
<dim>15</dim>
<dim>15</dim>
</port>
<port id="1" precision="FP32">
<dim>480</dim>
<dim>1</dim>
<dim>1</dim>
<dim>5</dim>
<dim>5</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="944,input.313">
<dim>1</dim>
<dim>480</dim>
<dim>15</dim>
<dim>15</dim>
</port>
</output>
</layer>
<layer id="456" name="Constant_277890_compressed" type="Const" version="opset1">
<data element_type="f16" shape="1, 480, 1, 1" offset="998120" size="960" />
<output>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>480</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="457" name="Constant_277890" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>480</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>480</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="458" name="__module.features.5.0.block.1.1/aten::batch_norm/BatchNormInference" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>480</dim>
<dim>15</dim>
<dim>15</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>480</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="949_1">
<dim>1</dim>
<dim>480</dim>
<dim>15</dim>
<dim>15</dim>
</port>
</output>
</layer>
<layer id="459" name="__module.features.5.0.block.1.2/aten::silu_/Swish" type="Swish" version="opset4">
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>480</dim>
<dim>15</dim>
<dim>15</dim>
</port>
</input>
<output>
<port id="1" precision="FP32" names="949,input.315">
<dim>1</dim>
<dim>480</dim>
<dim>15</dim>
<dim>15</dim>
</port>
</output>
</layer>
<layer id="460" name="Constant_272770" type="Const" version="opset1">
<data element_type="i64" shape="2" offset="2432" size="16" />
<output>
<port id="0" precision="I64">
<dim>2</dim>
</port>
</output>
</layer>
<layer id="461" name="__module.features.5.0.block.2.avgpool/aten::adaptive_avg_pool2d/AdaptiveAvgPool" type="ReduceMean" version="opset1">
<data keep_dims="true" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>480</dim>
<dim>15</dim>
<dim>15</dim>
</port>
<port id="1" precision="I64">
<dim>2</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="957,input.319">
<dim>1</dim>
<dim>480</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="462" name="self.features.5.0.block.2.fc1.weight_compressed" type="Const" version="opset1">
<data element_type="f16" shape="20, 480, 1, 1" offset="999080" size="19200" />
<output>
<port id="0" precision="FP16" names="self.features.5.0.block.2.fc1.weight">
<dim>20</dim>
<dim>480</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="463" name="self.features.5.0.block.2.fc1.weight" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>20</dim>
<dim>480</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>20</dim>
<dim>480</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="464" name="__module.features.5.0.block.2.fc1/aten::_convolution/Convolution" type="Convolution" version="opset1">
<data strides="1, 1" dilations="1, 1" pads_begin="0, 0" pads_end="0, 0" auto_pad="explicit" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>480</dim>
<dim>1</dim>
<dim>1</dim>
</port>
<port id="1" precision="FP32">
<dim>20</dim>
<dim>480</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1</dim>
<dim>20</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="465" name="__module.features.5.0.block.2.fc1/aten::_convolution/Reshape_compressed" type="Const" version="opset1">
<data element_type="f16" shape="1, 20, 1, 1" offset="1018280" size="40" />
<output>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>20</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="466" name="__module.features.5.0.block.2.fc1/aten::_convolution/Reshape" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>20</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>20</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="467" name="__module.features.5.0.block.2.fc1/aten::_convolution/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>20</dim>
<dim>1</dim>
<dim>1</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>20</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="964_1">
<dim>1</dim>
<dim>20</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="468" name="__module.features.5.0.block.2.activation/aten::silu_/Swish" type="Swish" version="opset4">
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>20</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32" names="964,input.321">
<dim>1</dim>
<dim>20</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="469" name="self.features.5.0.block.2.fc2.weight_compressed" type="Const" version="opset1">
<data element_type="f16" shape="480, 20, 1, 1" offset="1018320" size="19200" />
<output>
<port id="0" precision="FP16" names="self.features.5.0.block.2.fc2.weight">
<dim>480</dim>
<dim>20</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="470" name="self.features.5.0.block.2.fc2.weight" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>480</dim>
<dim>20</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>480</dim>
<dim>20</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="471" name="__module.features.5.0.block.2.fc2/aten::_convolution/Convolution" type="Convolution" version="opset1">
<data strides="1, 1" dilations="1, 1" pads_begin="0, 0" pads_end="0, 0" auto_pad="explicit" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>20</dim>
<dim>1</dim>
<dim>1</dim>
</port>
<port id="1" precision="FP32">
<dim>480</dim>
<dim>20</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1</dim>
<dim>480</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="472" name="__module.features.5.0.block.2.fc2/aten::_convolution/Reshape_compressed" type="Const" version="opset1">
<data element_type="f16" shape="1, 480, 1, 1" offset="1037520" size="960" />
<output>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>480</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="473" name="__module.features.5.0.block.2.fc2/aten::_convolution/Reshape" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>480</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>480</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="474" name="__module.features.5.0.block.2.fc2/aten::_convolution/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>480</dim>
<dim>1</dim>
<dim>1</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>480</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="972,input.325">
<dim>1</dim>
<dim>480</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="475" name="__module.features.5.0.block.2.scale_activation/aten::sigmoid/Sigmoid" type="Sigmoid" version="opset1">
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>480</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32" names="973,scale.25">
<dim>1</dim>
<dim>480</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="476" name="__module.features.5.0.block.2/aten::mul/Multiply" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>480</dim>
<dim>1</dim>
<dim>1</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>480</dim>
<dim>15</dim>
<dim>15</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="974,input.327">
<dim>1</dim>
<dim>480</dim>
<dim>15</dim>
<dim>15</dim>
</port>
</output>
</layer>
<layer id="477" name="Multiply_278253_compressed" type="Const" version="opset1">
<data element_type="f16" shape="112, 480, 1, 1" offset="1038480" size="107520" />
<output>
<port id="0" precision="FP16">
<dim>112</dim>
<dim>480</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="478" name="Multiply_278253" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>112</dim>
<dim>480</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>112</dim>
<dim>480</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="479" name="Multiply_277892" type="Convolution" version="opset1">
<data strides="1, 1" dilations="1, 1" pads_begin="0, 0" pads_end="0, 0" auto_pad="explicit" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>480</dim>
<dim>15</dim>
<dim>15</dim>
</port>
<port id="1" precision="FP32">
<dim>112</dim>
<dim>480</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1</dim>
<dim>112</dim>
<dim>15</dim>
<dim>15</dim>
</port>
</output>
</layer>
<layer id="480" name="Constant_277897_compressed" type="Const" version="opset1">
<data element_type="f16" shape="1, 112, 1, 1" offset="1146000" size="224" />
<output>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>112</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="481" name="Constant_277897" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>112</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>112</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="482" name="__module.features.5.0.block.3.1/aten::batch_norm/BatchNormInference" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>112</dim>
<dim>15</dim>
<dim>15</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>112</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="987,input.331">
<dim>1</dim>
<dim>112</dim>
<dim>15</dim>
<dim>15</dim>
</port>
</output>
</layer>
<layer id="483" name="Multiply_278257_compressed" type="Const" version="opset1">
<data element_type="f16" shape="672, 112, 1, 1" offset="1146224" size="150528" />
<output>
<port id="0" precision="FP16">
<dim>672</dim>
<dim>112</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="484" name="Multiply_278257" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>672</dim>
<dim>112</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>672</dim>
<dim>112</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="485" name="Multiply_277899" type="Convolution" version="opset1">
<data strides="1, 1" dilations="1, 1" pads_begin="0, 0" pads_end="0, 0" auto_pad="explicit" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>112</dim>
<dim>15</dim>
<dim>15</dim>
</port>
<port id="1" precision="FP32">
<dim>672</dim>
<dim>112</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1</dim>
<dim>672</dim>
<dim>15</dim>
<dim>15</dim>
</port>
</output>
</layer>
<layer id="486" name="Constant_277904_compressed" type="Const" version="opset1">
<data element_type="f16" shape="1, 672, 1, 1" offset="1296752" size="1344" />
<output>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>672</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="487" name="Constant_277904" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>672</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>672</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="488" name="__module.features.5.1.block.0.1/aten::batch_norm/BatchNormInference" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>672</dim>
<dim>15</dim>
<dim>15</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>672</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1007_1">
<dim>1</dim>
<dim>672</dim>
<dim>15</dim>
<dim>15</dim>
</port>
</output>
</layer>
<layer id="489" name="__module.features.5.1.block.0.2/aten::silu_/Swish" type="Swish" version="opset4">
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>672</dim>
<dim>15</dim>
<dim>15</dim>
</port>
</input>
<output>
<port id="1" precision="FP32" names="1007,input.335">
<dim>1</dim>
<dim>672</dim>
<dim>15</dim>
<dim>15</dim>
</port>
</output>
</layer>
<layer id="490" name="Multiply_278261_compressed" type="Const" version="opset1">
<data element_type="f16" shape="672, 1, 1, 5, 5" offset="1298096" size="33600" />
<output>
<port id="0" precision="FP16">
<dim>672</dim>
<dim>1</dim>
<dim>1</dim>
<dim>5</dim>
<dim>5</dim>
</port>
</output>
</layer>
<layer id="491" name="Multiply_278261" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>672</dim>
<dim>1</dim>
<dim>1</dim>
<dim>5</dim>
<dim>5</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>672</dim>
<dim>1</dim>
<dim>1</dim>
<dim>5</dim>
<dim>5</dim>
</port>
</output>
</layer>
<layer id="492" name="Multiply_277906" type="GroupConvolution" version="opset1">
<data strides="1, 1" pads_begin="2, 2" pads_end="2, 2" dilations="1, 1" auto_pad="explicit" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>672</dim>
<dim>15</dim>
<dim>15</dim>
</port>
<port id="1" precision="FP32">
<dim>672</dim>
<dim>1</dim>
<dim>1</dim>
<dim>5</dim>
<dim>5</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1017,input.339">
<dim>1</dim>
<dim>672</dim>
<dim>15</dim>
<dim>15</dim>
</port>
</output>
</layer>
<layer id="493" name="Constant_277911_compressed" type="Const" version="opset1">
<data element_type="f16" shape="1, 672, 1, 1" offset="1331696" size="1344" />
<output>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>672</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="494" name="Constant_277911" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>672</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>672</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="495" name="__module.features.5.1.block.1.1/aten::batch_norm/BatchNormInference" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>672</dim>
<dim>15</dim>
<dim>15</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>672</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1022_1">
<dim>1</dim>
<dim>672</dim>
<dim>15</dim>
<dim>15</dim>
</port>
</output>
</layer>
<layer id="496" name="__module.features.5.1.block.1.2/aten::silu_/Swish" type="Swish" version="opset4">
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>672</dim>
<dim>15</dim>
<dim>15</dim>
</port>
</input>
<output>
<port id="1" precision="FP32" names="1022,input.341">
<dim>1</dim>
<dim>672</dim>
<dim>15</dim>
<dim>15</dim>
</port>
</output>
</layer>
<layer id="497" name="Constant_272778" type="Const" version="opset1">
<data element_type="i64" shape="2" offset="2432" size="16" />
<output>
<port id="0" precision="I64">
<dim>2</dim>
</port>
</output>
</layer>
<layer id="498" name="__module.features.5.1.block.2.avgpool/aten::adaptive_avg_pool2d/AdaptiveAvgPool" type="ReduceMean" version="opset1">
<data keep_dims="true" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>672</dim>
<dim>15</dim>
<dim>15</dim>
</port>
<port id="1" precision="I64">
<dim>2</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1030,input.345">
<dim>1</dim>
<dim>672</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="499" name="self.features.5.1.block.2.fc1.weight_compressed" type="Const" version="opset1">
<data element_type="f16" shape="28, 672, 1, 1" offset="1333040" size="37632" />
<output>
<port id="0" precision="FP16" names="self.features.5.1.block.2.fc1.weight">
<dim>28</dim>
<dim>672</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="500" name="self.features.5.1.block.2.fc1.weight" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>28</dim>
<dim>672</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>28</dim>
<dim>672</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="501" name="__module.features.5.1.block.2.fc1/aten::_convolution/Convolution" type="Convolution" version="opset1">
<data strides="1, 1" dilations="1, 1" pads_begin="0, 0" pads_end="0, 0" auto_pad="explicit" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>672</dim>
<dim>1</dim>
<dim>1</dim>
</port>
<port id="1" precision="FP32">
<dim>28</dim>
<dim>672</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1</dim>
<dim>28</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="502" name="__module.features.5.1.block.2.fc1/aten::_convolution/Reshape_compressed" type="Const" version="opset1">
<data element_type="f16" shape="1, 28, 1, 1" offset="1370672" size="56" />
<output>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>28</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="503" name="__module.features.5.1.block.2.fc1/aten::_convolution/Reshape" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>28</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>28</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="504" name="__module.features.5.1.block.2.fc1/aten::_convolution/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>28</dim>
<dim>1</dim>
<dim>1</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>28</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1037_1">
<dim>1</dim>
<dim>28</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="505" name="__module.features.5.1.block.2.activation/aten::silu_/Swish" type="Swish" version="opset4">
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>28</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32" names="1037,input.347">
<dim>1</dim>
<dim>28</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="506" name="self.features.5.1.block.2.fc2.weight_compressed" type="Const" version="opset1">
<data element_type="f16" shape="672, 28, 1, 1" offset="1370728" size="37632" />
<output>
<port id="0" precision="FP16" names="self.features.5.1.block.2.fc2.weight">
<dim>672</dim>
<dim>28</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="507" name="self.features.5.1.block.2.fc2.weight" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>672</dim>
<dim>28</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>672</dim>
<dim>28</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="508" name="__module.features.5.1.block.2.fc2/aten::_convolution/Convolution" type="Convolution" version="opset1">
<data strides="1, 1" dilations="1, 1" pads_begin="0, 0" pads_end="0, 0" auto_pad="explicit" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>28</dim>
<dim>1</dim>
<dim>1</dim>
</port>
<port id="1" precision="FP32">
<dim>672</dim>
<dim>28</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1</dim>
<dim>672</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="509" name="__module.features.5.1.block.2.fc2/aten::_convolution/Reshape_compressed" type="Const" version="opset1">
<data element_type="f16" shape="1, 672, 1, 1" offset="1408360" size="1344" />
<output>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>672</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="510" name="__module.features.5.1.block.2.fc2/aten::_convolution/Reshape" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>672</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>672</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="511" name="__module.features.5.1.block.2.fc2/aten::_convolution/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>672</dim>
<dim>1</dim>
<dim>1</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>672</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1045,input.351">
<dim>1</dim>
<dim>672</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="512" name="__module.features.5.1.block.2.scale_activation/aten::sigmoid/Sigmoid" type="Sigmoid" version="opset1">
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>672</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32" names="1046,scale.27">
<dim>1</dim>
<dim>672</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="513" name="__module.features.5.1.block.2/aten::mul/Multiply" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>672</dim>
<dim>1</dim>
<dim>1</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>672</dim>
<dim>15</dim>
<dim>15</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1047,input.353">
<dim>1</dim>
<dim>672</dim>
<dim>15</dim>
<dim>15</dim>
</port>
</output>
</layer>
<layer id="514" name="Multiply_278264_compressed" type="Const" version="opset1">
<data element_type="f16" shape="112, 672, 1, 1" offset="1409704" size="150528" />
<output>
<port id="0" precision="FP16">
<dim>112</dim>
<dim>672</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="515" name="Multiply_278264" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>112</dim>
<dim>672</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>112</dim>
<dim>672</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="516" name="Multiply_277913" type="Convolution" version="opset1">
<data strides="1, 1" dilations="1, 1" pads_begin="0, 0" pads_end="0, 0" auto_pad="explicit" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>672</dim>
<dim>15</dim>
<dim>15</dim>
</port>
<port id="1" precision="FP32">
<dim>112</dim>
<dim>672</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1</dim>
<dim>112</dim>
<dim>15</dim>
<dim>15</dim>
</port>
</output>
</layer>
<layer id="517" name="Constant_277918_compressed" type="Const" version="opset1">
<data element_type="f16" shape="1, 112, 1, 1" offset="1560232" size="224" />
<output>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>112</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="518" name="Constant_277918" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>112</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>112</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="519" name="__module.features.5.1.block.3.1/aten::batch_norm/BatchNormInference" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>112</dim>
<dim>15</dim>
<dim>15</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>112</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1060_1">
<dim>1</dim>
<dim>112</dim>
<dim>15</dim>
<dim>15</dim>
</port>
</output>
</layer>
<layer id="520" name="__module.features.5.1/aten::add_/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>112</dim>
<dim>15</dim>
<dim>15</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>112</dim>
<dim>15</dim>
<dim>15</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1060,result.17">
<dim>1</dim>
<dim>112</dim>
<dim>15</dim>
<dim>15</dim>
</port>
</output>
</layer>
<layer id="521" name="Multiply_278268_compressed" type="Const" version="opset1">
<data element_type="f16" shape="672, 112, 1, 1" offset="1560456" size="150528" />
<output>
<port id="0" precision="FP16">
<dim>672</dim>
<dim>112</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="522" name="Multiply_278268" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>672</dim>
<dim>112</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>672</dim>
<dim>112</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="523" name="Multiply_277920" type="Convolution" version="opset1">
<data strides="1, 1" dilations="1, 1" pads_begin="0, 0" pads_end="0, 0" auto_pad="explicit" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>112</dim>
<dim>15</dim>
<dim>15</dim>
</port>
<port id="1" precision="FP32">
<dim>672</dim>
<dim>112</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1</dim>
<dim>672</dim>
<dim>15</dim>
<dim>15</dim>
</port>
</output>
</layer>
<layer id="524" name="Constant_277925_compressed" type="Const" version="opset1">
<data element_type="f16" shape="1, 672, 1, 1" offset="1710984" size="1344" />
<output>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>672</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="525" name="Constant_277925" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>672</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>672</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="526" name="__module.features.5.2.block.0.1/aten::batch_norm/BatchNormInference" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>672</dim>
<dim>15</dim>
<dim>15</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>672</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1081_1">
<dim>1</dim>
<dim>672</dim>
<dim>15</dim>
<dim>15</dim>
</port>
</output>
</layer>
<layer id="527" name="__module.features.5.2.block.0.2/aten::silu_/Swish" type="Swish" version="opset4">
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>672</dim>
<dim>15</dim>
<dim>15</dim>
</port>
</input>
<output>
<port id="1" precision="FP32" names="1081,input.361">
<dim>1</dim>
<dim>672</dim>
<dim>15</dim>
<dim>15</dim>
</port>
</output>
</layer>
<layer id="528" name="Multiply_278272_compressed" type="Const" version="opset1">
<data element_type="f16" shape="672, 1, 1, 5, 5" offset="1712328" size="33600" />
<output>
<port id="0" precision="FP16">
<dim>672</dim>
<dim>1</dim>
<dim>1</dim>
<dim>5</dim>
<dim>5</dim>
</port>
</output>
</layer>
<layer id="529" name="Multiply_278272" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>672</dim>
<dim>1</dim>
<dim>1</dim>
<dim>5</dim>
<dim>5</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>672</dim>
<dim>1</dim>
<dim>1</dim>
<dim>5</dim>
<dim>5</dim>
</port>
</output>
</layer>
<layer id="530" name="Multiply_277927" type="GroupConvolution" version="opset1">
<data strides="1, 1" pads_begin="2, 2" pads_end="2, 2" dilations="1, 1" auto_pad="explicit" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>672</dim>
<dim>15</dim>
<dim>15</dim>
</port>
<port id="1" precision="FP32">
<dim>672</dim>
<dim>1</dim>
<dim>1</dim>
<dim>5</dim>
<dim>5</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1091,input.365">
<dim>1</dim>
<dim>672</dim>
<dim>15</dim>
<dim>15</dim>
</port>
</output>
</layer>
<layer id="531" name="Constant_277932_compressed" type="Const" version="opset1">
<data element_type="f16" shape="1, 672, 1, 1" offset="1745928" size="1344" />
<output>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>672</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="532" name="Constant_277932" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>672</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>672</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="533" name="__module.features.5.2.block.1.1/aten::batch_norm/BatchNormInference" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>672</dim>
<dim>15</dim>
<dim>15</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>672</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1096_1">
<dim>1</dim>
<dim>672</dim>
<dim>15</dim>
<dim>15</dim>
</port>
</output>
</layer>
<layer id="534" name="__module.features.5.2.block.1.2/aten::silu_/Swish" type="Swish" version="opset4">
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>672</dim>
<dim>15</dim>
<dim>15</dim>
</port>
</input>
<output>
<port id="1" precision="FP32" names="1096,input.367">
<dim>1</dim>
<dim>672</dim>
<dim>15</dim>
<dim>15</dim>
</port>
</output>
</layer>
<layer id="535" name="Constant_272788" type="Const" version="opset1">
<data element_type="i64" shape="2" offset="2432" size="16" />
<output>
<port id="0" precision="I64">
<dim>2</dim>
</port>
</output>
</layer>
<layer id="536" name="__module.features.5.2.block.2.avgpool/aten::adaptive_avg_pool2d/AdaptiveAvgPool" type="ReduceMean" version="opset1">
<data keep_dims="true" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>672</dim>
<dim>15</dim>
<dim>15</dim>
</port>
<port id="1" precision="I64">
<dim>2</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1104,input.371">
<dim>1</dim>
<dim>672</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="537" name="self.features.5.2.block.2.fc1.weight_compressed" type="Const" version="opset1">
<data element_type="f16" shape="28, 672, 1, 1" offset="1747272" size="37632" />
<output>
<port id="0" precision="FP16" names="self.features.5.2.block.2.fc1.weight">
<dim>28</dim>
<dim>672</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="538" name="self.features.5.2.block.2.fc1.weight" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>28</dim>
<dim>672</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>28</dim>
<dim>672</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="539" name="__module.features.5.2.block.2.fc1/aten::_convolution/Convolution" type="Convolution" version="opset1">
<data strides="1, 1" dilations="1, 1" pads_begin="0, 0" pads_end="0, 0" auto_pad="explicit" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>672</dim>
<dim>1</dim>
<dim>1</dim>
</port>
<port id="1" precision="FP32">
<dim>28</dim>
<dim>672</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1</dim>
<dim>28</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="540" name="__module.features.5.2.block.2.fc1/aten::_convolution/Reshape_compressed" type="Const" version="opset1">
<data element_type="f16" shape="1, 28, 1, 1" offset="1784904" size="56" />
<output>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>28</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="541" name="__module.features.5.2.block.2.fc1/aten::_convolution/Reshape" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>28</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>28</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="542" name="__module.features.5.2.block.2.fc1/aten::_convolution/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>28</dim>
<dim>1</dim>
<dim>1</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>28</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1111_1">
<dim>1</dim>
<dim>28</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="543" name="__module.features.5.2.block.2.activation/aten::silu_/Swish" type="Swish" version="opset4">
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>28</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32" names="1111,input.373">
<dim>1</dim>
<dim>28</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="544" name="self.features.5.2.block.2.fc2.weight_compressed" type="Const" version="opset1">
<data element_type="f16" shape="672, 28, 1, 1" offset="1784960" size="37632" />
<output>
<port id="0" precision="FP16" names="self.features.5.2.block.2.fc2.weight">
<dim>672</dim>
<dim>28</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="545" name="self.features.5.2.block.2.fc2.weight" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>672</dim>
<dim>28</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>672</dim>
<dim>28</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="546" name="__module.features.5.2.block.2.fc2/aten::_convolution/Convolution" type="Convolution" version="opset1">
<data strides="1, 1" dilations="1, 1" pads_begin="0, 0" pads_end="0, 0" auto_pad="explicit" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>28</dim>
<dim>1</dim>
<dim>1</dim>
</port>
<port id="1" precision="FP32">
<dim>672</dim>
<dim>28</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1</dim>
<dim>672</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="547" name="__module.features.5.2.block.2.fc2/aten::_convolution/Reshape_compressed" type="Const" version="opset1">
<data element_type="f16" shape="1, 672, 1, 1" offset="1822592" size="1344" />
<output>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>672</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="548" name="__module.features.5.2.block.2.fc2/aten::_convolution/Reshape" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>672</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>672</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="549" name="__module.features.5.2.block.2.fc2/aten::_convolution/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>672</dim>
<dim>1</dim>
<dim>1</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>672</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1119,input.377">
<dim>1</dim>
<dim>672</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="550" name="__module.features.5.2.block.2.scale_activation/aten::sigmoid/Sigmoid" type="Sigmoid" version="opset1">
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>672</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32" names="1120,scale.29">
<dim>1</dim>
<dim>672</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="551" name="__module.features.5.2.block.2/aten::mul/Multiply" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>672</dim>
<dim>1</dim>
<dim>1</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>672</dim>
<dim>15</dim>
<dim>15</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1121,input.379">
<dim>1</dim>
<dim>672</dim>
<dim>15</dim>
<dim>15</dim>
</port>
</output>
</layer>
<layer id="552" name="Multiply_278275_compressed" type="Const" version="opset1">
<data element_type="f16" shape="112, 672, 1, 1" offset="1823936" size="150528" />
<output>
<port id="0" precision="FP16">
<dim>112</dim>
<dim>672</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="553" name="Multiply_278275" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>112</dim>
<dim>672</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>112</dim>
<dim>672</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="554" name="Multiply_277934" type="Convolution" version="opset1">
<data strides="1, 1" dilations="1, 1" pads_begin="0, 0" pads_end="0, 0" auto_pad="explicit" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>672</dim>
<dim>15</dim>
<dim>15</dim>
</port>
<port id="1" precision="FP32">
<dim>112</dim>
<dim>672</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1</dim>
<dim>112</dim>
<dim>15</dim>
<dim>15</dim>
</port>
</output>
</layer>
<layer id="555" name="Constant_277939_compressed" type="Const" version="opset1">
<data element_type="f16" shape="1, 112, 1, 1" offset="1974464" size="224" />
<output>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>112</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="556" name="Constant_277939" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>112</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>112</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="557" name="__module.features.5.2.block.3.1/aten::batch_norm/BatchNormInference" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>112</dim>
<dim>15</dim>
<dim>15</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>112</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1134_1">
<dim>1</dim>
<dim>112</dim>
<dim>15</dim>
<dim>15</dim>
</port>
</output>
</layer>
<layer id="558" name="__module.features.5.2/aten::add_/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>112</dim>
<dim>15</dim>
<dim>15</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>112</dim>
<dim>15</dim>
<dim>15</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1134,result.19">
<dim>1</dim>
<dim>112</dim>
<dim>15</dim>
<dim>15</dim>
</port>
</output>
</layer>
<layer id="559" name="Multiply_278279_compressed" type="Const" version="opset1">
<data element_type="f16" shape="672, 112, 1, 1" offset="1974688" size="150528" />
<output>
<port id="0" precision="FP16">
<dim>672</dim>
<dim>112</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="560" name="Multiply_278279" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>672</dim>
<dim>112</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>672</dim>
<dim>112</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="561" name="Multiply_277941" type="Convolution" version="opset1">
<data strides="1, 1" dilations="1, 1" pads_begin="0, 0" pads_end="0, 0" auto_pad="explicit" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>112</dim>
<dim>15</dim>
<dim>15</dim>
</port>
<port id="1" precision="FP32">
<dim>672</dim>
<dim>112</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1</dim>
<dim>672</dim>
<dim>15</dim>
<dim>15</dim>
</port>
</output>
</layer>
<layer id="562" name="Constant_277946_compressed" type="Const" version="opset1">
<data element_type="f16" shape="1, 672, 1, 1" offset="2125216" size="1344" />
<output>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>672</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="563" name="Constant_277946" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>672</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>672</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="564" name="__module.features.5.3.block.0.1/aten::batch_norm/BatchNormInference" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>672</dim>
<dim>15</dim>
<dim>15</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>672</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1155_1">
<dim>1</dim>
<dim>672</dim>
<dim>15</dim>
<dim>15</dim>
</port>
</output>
</layer>
<layer id="565" name="__module.features.5.3.block.0.2/aten::silu_/Swish" type="Swish" version="opset4">
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>672</dim>
<dim>15</dim>
<dim>15</dim>
</port>
</input>
<output>
<port id="1" precision="FP32" names="1155,input.387">
<dim>1</dim>
<dim>672</dim>
<dim>15</dim>
<dim>15</dim>
</port>
</output>
</layer>
<layer id="566" name="Multiply_278283_compressed" type="Const" version="opset1">
<data element_type="f16" shape="672, 1, 1, 5, 5" offset="2126560" size="33600" />
<output>
<port id="0" precision="FP16">
<dim>672</dim>
<dim>1</dim>
<dim>1</dim>
<dim>5</dim>
<dim>5</dim>
</port>
</output>
</layer>
<layer id="567" name="Multiply_278283" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>672</dim>
<dim>1</dim>
<dim>1</dim>
<dim>5</dim>
<dim>5</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>672</dim>
<dim>1</dim>
<dim>1</dim>
<dim>5</dim>
<dim>5</dim>
</port>
</output>
</layer>
<layer id="568" name="Multiply_277948" type="GroupConvolution" version="opset1">
<data strides="1, 1" pads_begin="2, 2" pads_end="2, 2" dilations="1, 1" auto_pad="explicit" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>672</dim>
<dim>15</dim>
<dim>15</dim>
</port>
<port id="1" precision="FP32">
<dim>672</dim>
<dim>1</dim>
<dim>1</dim>
<dim>5</dim>
<dim>5</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1165,input.391">
<dim>1</dim>
<dim>672</dim>
<dim>15</dim>
<dim>15</dim>
</port>
</output>
</layer>
<layer id="569" name="Constant_277953_compressed" type="Const" version="opset1">
<data element_type="f16" shape="1, 672, 1, 1" offset="2160160" size="1344" />
<output>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>672</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="570" name="Constant_277953" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>672</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>672</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="571" name="__module.features.5.3.block.1.1/aten::batch_norm/BatchNormInference" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>672</dim>
<dim>15</dim>
<dim>15</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>672</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1170_1">
<dim>1</dim>
<dim>672</dim>
<dim>15</dim>
<dim>15</dim>
</port>
</output>
</layer>
<layer id="572" name="__module.features.5.3.block.1.2/aten::silu_/Swish" type="Swish" version="opset4">
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>672</dim>
<dim>15</dim>
<dim>15</dim>
</port>
</input>
<output>
<port id="1" precision="FP32" names="1170,input.393">
<dim>1</dim>
<dim>672</dim>
<dim>15</dim>
<dim>15</dim>
</port>
</output>
</layer>
<layer id="573" name="Constant_272798" type="Const" version="opset1">
<data element_type="i64" shape="2" offset="2432" size="16" />
<output>
<port id="0" precision="I64">
<dim>2</dim>
</port>
</output>
</layer>
<layer id="574" name="__module.features.5.3.block.2.avgpool/aten::adaptive_avg_pool2d/AdaptiveAvgPool" type="ReduceMean" version="opset1">
<data keep_dims="true" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>672</dim>
<dim>15</dim>
<dim>15</dim>
</port>
<port id="1" precision="I64">
<dim>2</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1178,input.397">
<dim>1</dim>
<dim>672</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="575" name="self.features.5.3.block.2.fc1.weight_compressed" type="Const" version="opset1">
<data element_type="f16" shape="28, 672, 1, 1" offset="2161504" size="37632" />
<output>
<port id="0" precision="FP16" names="self.features.5.3.block.2.fc1.weight">
<dim>28</dim>
<dim>672</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="576" name="self.features.5.3.block.2.fc1.weight" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>28</dim>
<dim>672</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>28</dim>
<dim>672</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="577" name="__module.features.5.3.block.2.fc1/aten::_convolution/Convolution" type="Convolution" version="opset1">
<data strides="1, 1" dilations="1, 1" pads_begin="0, 0" pads_end="0, 0" auto_pad="explicit" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>672</dim>
<dim>1</dim>
<dim>1</dim>
</port>
<port id="1" precision="FP32">
<dim>28</dim>
<dim>672</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1</dim>
<dim>28</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="578" name="__module.features.5.3.block.2.fc1/aten::_convolution/Reshape_compressed" type="Const" version="opset1">
<data element_type="f16" shape="1, 28, 1, 1" offset="2199136" size="56" />
<output>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>28</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="579" name="__module.features.5.3.block.2.fc1/aten::_convolution/Reshape" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>28</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>28</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="580" name="__module.features.5.3.block.2.fc1/aten::_convolution/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>28</dim>
<dim>1</dim>
<dim>1</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>28</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1185_1">
<dim>1</dim>
<dim>28</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="581" name="__module.features.5.3.block.2.activation/aten::silu_/Swish" type="Swish" version="opset4">
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>28</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32" names="1185,input.399">
<dim>1</dim>
<dim>28</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="582" name="self.features.5.3.block.2.fc2.weight_compressed" type="Const" version="opset1">
<data element_type="f16" shape="672, 28, 1, 1" offset="2199192" size="37632" />
<output>
<port id="0" precision="FP16" names="self.features.5.3.block.2.fc2.weight">
<dim>672</dim>
<dim>28</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="583" name="self.features.5.3.block.2.fc2.weight" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>672</dim>
<dim>28</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>672</dim>
<dim>28</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="584" name="__module.features.5.3.block.2.fc2/aten::_convolution/Convolution" type="Convolution" version="opset1">
<data strides="1, 1" dilations="1, 1" pads_begin="0, 0" pads_end="0, 0" auto_pad="explicit" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>28</dim>
<dim>1</dim>
<dim>1</dim>
</port>
<port id="1" precision="FP32">
<dim>672</dim>
<dim>28</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1</dim>
<dim>672</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="585" name="__module.features.5.3.block.2.fc2/aten::_convolution/Reshape_compressed" type="Const" version="opset1">
<data element_type="f16" shape="1, 672, 1, 1" offset="2236824" size="1344" />
<output>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>672</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="586" name="__module.features.5.3.block.2.fc2/aten::_convolution/Reshape" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>672</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>672</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="587" name="__module.features.5.3.block.2.fc2/aten::_convolution/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>672</dim>
<dim>1</dim>
<dim>1</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>672</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1193,input.403">
<dim>1</dim>
<dim>672</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="588" name="__module.features.5.3.block.2.scale_activation/aten::sigmoid/Sigmoid" type="Sigmoid" version="opset1">
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>672</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32" names="1194,scale.31">
<dim>1</dim>
<dim>672</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="589" name="__module.features.5.3.block.2/aten::mul/Multiply" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>672</dim>
<dim>1</dim>
<dim>1</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>672</dim>
<dim>15</dim>
<dim>15</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1195,input.405">
<dim>1</dim>
<dim>672</dim>
<dim>15</dim>
<dim>15</dim>
</port>
</output>
</layer>
<layer id="590" name="Multiply_278286_compressed" type="Const" version="opset1">
<data element_type="f16" shape="112, 672, 1, 1" offset="2238168" size="150528" />
<output>
<port id="0" precision="FP16">
<dim>112</dim>
<dim>672</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="591" name="Multiply_278286" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>112</dim>
<dim>672</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>112</dim>
<dim>672</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="592" name="Multiply_277955" type="Convolution" version="opset1">
<data strides="1, 1" dilations="1, 1" pads_begin="0, 0" pads_end="0, 0" auto_pad="explicit" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>672</dim>
<dim>15</dim>
<dim>15</dim>
</port>
<port id="1" precision="FP32">
<dim>112</dim>
<dim>672</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1</dim>
<dim>112</dim>
<dim>15</dim>
<dim>15</dim>
</port>
</output>
</layer>
<layer id="593" name="Constant_277960_compressed" type="Const" version="opset1">
<data element_type="f16" shape="1, 112, 1, 1" offset="2388696" size="224" />
<output>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>112</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="594" name="Constant_277960" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>112</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>112</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="595" name="__module.features.5.3.block.3.1/aten::batch_norm/BatchNormInference" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>112</dim>
<dim>15</dim>
<dim>15</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>112</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1208_1">
<dim>1</dim>
<dim>112</dim>
<dim>15</dim>
<dim>15</dim>
</port>
</output>
</layer>
<layer id="596" name="__module.features.5.3/aten::add_/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>112</dim>
<dim>15</dim>
<dim>15</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>112</dim>
<dim>15</dim>
<dim>15</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1208,result.21">
<dim>1</dim>
<dim>112</dim>
<dim>15</dim>
<dim>15</dim>
</port>
</output>
</layer>
<layer id="597" name="Multiply_278290_compressed" type="Const" version="opset1">
<data element_type="f16" shape="672, 112, 1, 1" offset="2388920" size="150528" />
<output>
<port id="0" precision="FP16">
<dim>672</dim>
<dim>112</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="598" name="Multiply_278290" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>672</dim>
<dim>112</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>672</dim>
<dim>112</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="599" name="Multiply_277962" type="Convolution" version="opset1">
<data strides="1, 1" dilations="1, 1" pads_begin="0, 0" pads_end="0, 0" auto_pad="explicit" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>112</dim>
<dim>15</dim>
<dim>15</dim>
</port>
<port id="1" precision="FP32">
<dim>672</dim>
<dim>112</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1</dim>
<dim>672</dim>
<dim>15</dim>
<dim>15</dim>
</port>
</output>
</layer>
<layer id="600" name="Constant_277967_compressed" type="Const" version="opset1">
<data element_type="f16" shape="1, 672, 1, 1" offset="2539448" size="1344" />
<output>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>672</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="601" name="Constant_277967" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>672</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>672</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="602" name="__module.features.6.0.block.0.1/aten::batch_norm/BatchNormInference" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>672</dim>
<dim>15</dim>
<dim>15</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>672</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1233_1">
<dim>1</dim>
<dim>672</dim>
<dim>15</dim>
<dim>15</dim>
</port>
</output>
</layer>
<layer id="603" name="__module.features.6.0.block.0.2/aten::silu_/Swish" type="Swish" version="opset4">
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>672</dim>
<dim>15</dim>
<dim>15</dim>
</port>
</input>
<output>
<port id="1" precision="FP32" names="1233,input.413">
<dim>1</dim>
<dim>672</dim>
<dim>15</dim>
<dim>15</dim>
</port>
</output>
</layer>
<layer id="604" name="Multiply_278294_compressed" type="Const" version="opset1">
<data element_type="f16" shape="672, 1, 1, 5, 5" offset="2540792" size="33600" />
<output>
<port id="0" precision="FP16">
<dim>672</dim>
<dim>1</dim>
<dim>1</dim>
<dim>5</dim>
<dim>5</dim>
</port>
</output>
</layer>
<layer id="605" name="Multiply_278294" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>672</dim>
<dim>1</dim>
<dim>1</dim>
<dim>5</dim>
<dim>5</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>672</dim>
<dim>1</dim>
<dim>1</dim>
<dim>5</dim>
<dim>5</dim>
</port>
</output>
</layer>
<layer id="606" name="Multiply_277969" type="GroupConvolution" version="opset1">
<data strides="2, 2" pads_begin="2, 2" pads_end="2, 2" dilations="1, 1" auto_pad="explicit" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>672</dim>
<dim>15</dim>
<dim>15</dim>
</port>
<port id="1" precision="FP32">
<dim>672</dim>
<dim>1</dim>
<dim>1</dim>
<dim>5</dim>
<dim>5</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1243,input.417">
<dim>1</dim>
<dim>672</dim>
<dim>8</dim>
<dim>8</dim>
</port>
</output>
</layer>
<layer id="607" name="Constant_277974_compressed" type="Const" version="opset1">
<data element_type="f16" shape="1, 672, 1, 1" offset="2574392" size="1344" />
<output>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>672</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="608" name="Constant_277974" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>672</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>672</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="609" name="__module.features.6.0.block.1.1/aten::batch_norm/BatchNormInference" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>672</dim>
<dim>8</dim>
<dim>8</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>672</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1248_1">
<dim>1</dim>
<dim>672</dim>
<dim>8</dim>
<dim>8</dim>
</port>
</output>
</layer>
<layer id="610" name="__module.features.6.0.block.1.2/aten::silu_/Swish" type="Swish" version="opset4">
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>672</dim>
<dim>8</dim>
<dim>8</dim>
</port>
</input>
<output>
<port id="1" precision="FP32" names="1248,input.419">
<dim>1</dim>
<dim>672</dim>
<dim>8</dim>
<dim>8</dim>
</port>
</output>
</layer>
<layer id="611" name="Constant_272808" type="Const" version="opset1">
<data element_type="i64" shape="2" offset="2432" size="16" />
<output>
<port id="0" precision="I64">
<dim>2</dim>
</port>
</output>
</layer>
<layer id="612" name="__module.features.6.0.block.2.avgpool/aten::adaptive_avg_pool2d/AdaptiveAvgPool" type="ReduceMean" version="opset1">
<data keep_dims="true" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>672</dim>
<dim>8</dim>
<dim>8</dim>
</port>
<port id="1" precision="I64">
<dim>2</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1256,input.423">
<dim>1</dim>
<dim>672</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="613" name="self.features.6.0.block.2.fc1.weight_compressed" type="Const" version="opset1">
<data element_type="f16" shape="28, 672, 1, 1" offset="2575736" size="37632" />
<output>
<port id="0" precision="FP16" names="self.features.6.0.block.2.fc1.weight">
<dim>28</dim>
<dim>672</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="614" name="self.features.6.0.block.2.fc1.weight" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>28</dim>
<dim>672</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>28</dim>
<dim>672</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="615" name="__module.features.6.0.block.2.fc1/aten::_convolution/Convolution" type="Convolution" version="opset1">
<data strides="1, 1" dilations="1, 1" pads_begin="0, 0" pads_end="0, 0" auto_pad="explicit" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>672</dim>
<dim>1</dim>
<dim>1</dim>
</port>
<port id="1" precision="FP32">
<dim>28</dim>
<dim>672</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1</dim>
<dim>28</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="616" name="__module.features.6.0.block.2.fc1/aten::_convolution/Reshape_compressed" type="Const" version="opset1">
<data element_type="f16" shape="1, 28, 1, 1" offset="2613368" size="56" />
<output>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>28</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="617" name="__module.features.6.0.block.2.fc1/aten::_convolution/Reshape" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>28</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>28</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="618" name="__module.features.6.0.block.2.fc1/aten::_convolution/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>28</dim>
<dim>1</dim>
<dim>1</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>28</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1263_1">
<dim>1</dim>
<dim>28</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="619" name="__module.features.6.0.block.2.activation/aten::silu_/Swish" type="Swish" version="opset4">
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>28</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32" names="1263,input.425">
<dim>1</dim>
<dim>28</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="620" name="self.features.6.0.block.2.fc2.weight_compressed" type="Const" version="opset1">
<data element_type="f16" shape="672, 28, 1, 1" offset="2613424" size="37632" />
<output>
<port id="0" precision="FP16" names="self.features.6.0.block.2.fc2.weight">
<dim>672</dim>
<dim>28</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="621" name="self.features.6.0.block.2.fc2.weight" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>672</dim>
<dim>28</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>672</dim>
<dim>28</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="622" name="__module.features.6.0.block.2.fc2/aten::_convolution/Convolution" type="Convolution" version="opset1">
<data strides="1, 1" dilations="1, 1" pads_begin="0, 0" pads_end="0, 0" auto_pad="explicit" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>28</dim>
<dim>1</dim>
<dim>1</dim>
</port>
<port id="1" precision="FP32">
<dim>672</dim>
<dim>28</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1</dim>
<dim>672</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="623" name="__module.features.6.0.block.2.fc2/aten::_convolution/Reshape_compressed" type="Const" version="opset1">
<data element_type="f16" shape="1, 672, 1, 1" offset="2651056" size="1344" />
<output>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>672</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="624" name="__module.features.6.0.block.2.fc2/aten::_convolution/Reshape" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>672</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>672</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="625" name="__module.features.6.0.block.2.fc2/aten::_convolution/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>672</dim>
<dim>1</dim>
<dim>1</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>672</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1271,input.429">
<dim>1</dim>
<dim>672</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="626" name="__module.features.6.0.block.2.scale_activation/aten::sigmoid/Sigmoid" type="Sigmoid" version="opset1">
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>672</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32" names="1272,scale.33">
<dim>1</dim>
<dim>672</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="627" name="__module.features.6.0.block.2/aten::mul/Multiply" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>672</dim>
<dim>1</dim>
<dim>1</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>672</dim>
<dim>8</dim>
<dim>8</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1273,input.431">
<dim>1</dim>
<dim>672</dim>
<dim>8</dim>
<dim>8</dim>
</port>
</output>
</layer>
<layer id="628" name="Multiply_278297_compressed" type="Const" version="opset1">
<data element_type="f16" shape="192, 672, 1, 1" offset="2652400" size="258048" />
<output>
<port id="0" precision="FP16">
<dim>192</dim>
<dim>672</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="629" name="Multiply_278297" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>192</dim>
<dim>672</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>192</dim>
<dim>672</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="630" name="Multiply_277976" type="Convolution" version="opset1">
<data strides="1, 1" dilations="1, 1" pads_begin="0, 0" pads_end="0, 0" auto_pad="explicit" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>672</dim>
<dim>8</dim>
<dim>8</dim>
</port>
<port id="1" precision="FP32">
<dim>192</dim>
<dim>672</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1</dim>
<dim>192</dim>
<dim>8</dim>
<dim>8</dim>
</port>
</output>
</layer>
<layer id="631" name="Constant_277981_compressed" type="Const" version="opset1">
<data element_type="f16" shape="1, 192, 1, 1" offset="2910448" size="384" />
<output>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>192</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="632" name="Constant_277981" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>192</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>192</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="633" name="__module.features.6.0.block.3.1/aten::batch_norm/BatchNormInference" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>192</dim>
<dim>8</dim>
<dim>8</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>192</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1286,input.435">
<dim>1</dim>
<dim>192</dim>
<dim>8</dim>
<dim>8</dim>
</port>
</output>
</layer>
<layer id="634" name="Multiply_278301_compressed" type="Const" version="opset1">
<data element_type="f16" shape="1152, 192, 1, 1" offset="2910832" size="442368" />
<output>
<port id="0" precision="FP16">
<dim>1152</dim>
<dim>192</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="635" name="Multiply_278301" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>1152</dim>
<dim>192</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1152</dim>
<dim>192</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="636" name="Multiply_277983" type="Convolution" version="opset1">
<data strides="1, 1" dilations="1, 1" pads_begin="0, 0" pads_end="0, 0" auto_pad="explicit" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>192</dim>
<dim>8</dim>
<dim>8</dim>
</port>
<port id="1" precision="FP32">
<dim>1152</dim>
<dim>192</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1</dim>
<dim>1152</dim>
<dim>8</dim>
<dim>8</dim>
</port>
</output>
</layer>
<layer id="637" name="Constant_277988_compressed" type="Const" version="opset1">
<data element_type="f16" shape="1, 1152, 1, 1" offset="3353200" size="2304" />
<output>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>1152</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="638" name="Constant_277988" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>1152</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1152</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="639" name="__module.features.6.1.block.0.1/aten::batch_norm/BatchNormInference" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1152</dim>
<dim>8</dim>
<dim>8</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1152</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1306_1">
<dim>1</dim>
<dim>1152</dim>
<dim>8</dim>
<dim>8</dim>
</port>
</output>
</layer>
<layer id="640" name="__module.features.6.1.block.0.2/aten::silu_/Swish" type="Swish" version="opset4">
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1152</dim>
<dim>8</dim>
<dim>8</dim>
</port>
</input>
<output>
<port id="1" precision="FP32" names="1306,input.439">
<dim>1</dim>
<dim>1152</dim>
<dim>8</dim>
<dim>8</dim>
</port>
</output>
</layer>
<layer id="641" name="Multiply_278305_compressed" type="Const" version="opset1">
<data element_type="f16" shape="1152, 1, 1, 5, 5" offset="3355504" size="57600" />
<output>
<port id="0" precision="FP16">
<dim>1152</dim>
<dim>1</dim>
<dim>1</dim>
<dim>5</dim>
<dim>5</dim>
</port>
</output>
</layer>
<layer id="642" name="Multiply_278305" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>1152</dim>
<dim>1</dim>
<dim>1</dim>
<dim>5</dim>
<dim>5</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1152</dim>
<dim>1</dim>
<dim>1</dim>
<dim>5</dim>
<dim>5</dim>
</port>
</output>
</layer>
<layer id="643" name="Multiply_277990" type="GroupConvolution" version="opset1">
<data strides="1, 1" pads_begin="2, 2" pads_end="2, 2" dilations="1, 1" auto_pad="explicit" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1152</dim>
<dim>8</dim>
<dim>8</dim>
</port>
<port id="1" precision="FP32">
<dim>1152</dim>
<dim>1</dim>
<dim>1</dim>
<dim>5</dim>
<dim>5</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1316,input.443">
<dim>1</dim>
<dim>1152</dim>
<dim>8</dim>
<dim>8</dim>
</port>
</output>
</layer>
<layer id="644" name="Constant_277995_compressed" type="Const" version="opset1">
<data element_type="f16" shape="1, 1152, 1, 1" offset="3413104" size="2304" />
<output>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>1152</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="645" name="Constant_277995" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>1152</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1152</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="646" name="__module.features.6.1.block.1.1/aten::batch_norm/BatchNormInference" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1152</dim>
<dim>8</dim>
<dim>8</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1152</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1321_1">
<dim>1</dim>
<dim>1152</dim>
<dim>8</dim>
<dim>8</dim>
</port>
</output>
</layer>
<layer id="647" name="__module.features.6.1.block.1.2/aten::silu_/Swish" type="Swish" version="opset4">
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1152</dim>
<dim>8</dim>
<dim>8</dim>
</port>
</input>
<output>
<port id="1" precision="FP32" names="1321,input.445">
<dim>1</dim>
<dim>1152</dim>
<dim>8</dim>
<dim>8</dim>
</port>
</output>
</layer>
<layer id="648" name="Constant_272816" type="Const" version="opset1">
<data element_type="i64" shape="2" offset="2432" size="16" />
<output>
<port id="0" precision="I64">
<dim>2</dim>
</port>
</output>
</layer>
<layer id="649" name="__module.features.6.1.block.2.avgpool/aten::adaptive_avg_pool2d/AdaptiveAvgPool" type="ReduceMean" version="opset1">
<data keep_dims="true" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1152</dim>
<dim>8</dim>
<dim>8</dim>
</port>
<port id="1" precision="I64">
<dim>2</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1329,input.449">
<dim>1</dim>
<dim>1152</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="650" name="self.features.6.1.block.2.fc1.weight_compressed" type="Const" version="opset1">
<data element_type="f16" shape="48, 1152, 1, 1" offset="3415408" size="110592" />
<output>
<port id="0" precision="FP16" names="self.features.6.1.block.2.fc1.weight">
<dim>48</dim>
<dim>1152</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="651" name="self.features.6.1.block.2.fc1.weight" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>48</dim>
<dim>1152</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>48</dim>
<dim>1152</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="652" name="__module.features.6.1.block.2.fc1/aten::_convolution/Convolution" type="Convolution" version="opset1">
<data strides="1, 1" dilations="1, 1" pads_begin="0, 0" pads_end="0, 0" auto_pad="explicit" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1152</dim>
<dim>1</dim>
<dim>1</dim>
</port>
<port id="1" precision="FP32">
<dim>48</dim>
<dim>1152</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1</dim>
<dim>48</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="653" name="__module.features.6.1.block.2.fc1/aten::_convolution/Reshape_compressed" type="Const" version="opset1">
<data element_type="f16" shape="1, 48, 1, 1" offset="3526000" size="96" />
<output>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>48</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="654" name="__module.features.6.1.block.2.fc1/aten::_convolution/Reshape" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>48</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>48</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="655" name="__module.features.6.1.block.2.fc1/aten::_convolution/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>48</dim>
<dim>1</dim>
<dim>1</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>48</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1336_1">
<dim>1</dim>
<dim>48</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="656" name="__module.features.6.1.block.2.activation/aten::silu_/Swish" type="Swish" version="opset4">
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>48</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32" names="1336,input.451">
<dim>1</dim>
<dim>48</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="657" name="self.features.6.1.block.2.fc2.weight_compressed" type="Const" version="opset1">
<data element_type="f16" shape="1152, 48, 1, 1" offset="3526096" size="110592" />
<output>
<port id="0" precision="FP16" names="self.features.6.1.block.2.fc2.weight">
<dim>1152</dim>
<dim>48</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="658" name="self.features.6.1.block.2.fc2.weight" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>1152</dim>
<dim>48</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1152</dim>
<dim>48</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="659" name="__module.features.6.1.block.2.fc2/aten::_convolution/Convolution" type="Convolution" version="opset1">
<data strides="1, 1" dilations="1, 1" pads_begin="0, 0" pads_end="0, 0" auto_pad="explicit" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>48</dim>
<dim>1</dim>
<dim>1</dim>
</port>
<port id="1" precision="FP32">
<dim>1152</dim>
<dim>48</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1</dim>
<dim>1152</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="660" name="__module.features.6.1.block.2.fc2/aten::_convolution/Reshape_compressed" type="Const" version="opset1">
<data element_type="f16" shape="1, 1152, 1, 1" offset="3636688" size="2304" />
<output>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>1152</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="661" name="__module.features.6.1.block.2.fc2/aten::_convolution/Reshape" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>1152</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1152</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="662" name="__module.features.6.1.block.2.fc2/aten::_convolution/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1152</dim>
<dim>1</dim>
<dim>1</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1152</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1344,input.455">
<dim>1</dim>
<dim>1152</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="663" name="__module.features.6.1.block.2.scale_activation/aten::sigmoid/Sigmoid" type="Sigmoid" version="opset1">
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1152</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32" names="1345,scale.35">
<dim>1</dim>
<dim>1152</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="664" name="__module.features.6.1.block.2/aten::mul/Multiply" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1152</dim>
<dim>1</dim>
<dim>1</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1152</dim>
<dim>8</dim>
<dim>8</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1346,input.457">
<dim>1</dim>
<dim>1152</dim>
<dim>8</dim>
<dim>8</dim>
</port>
</output>
</layer>
<layer id="665" name="Multiply_278308_compressed" type="Const" version="opset1">
<data element_type="f16" shape="192, 1152, 1, 1" offset="3638992" size="442368" />
<output>
<port id="0" precision="FP16">
<dim>192</dim>
<dim>1152</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="666" name="Multiply_278308" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>192</dim>
<dim>1152</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>192</dim>
<dim>1152</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="667" name="Multiply_277997" type="Convolution" version="opset1">
<data strides="1, 1" dilations="1, 1" pads_begin="0, 0" pads_end="0, 0" auto_pad="explicit" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1152</dim>
<dim>8</dim>
<dim>8</dim>
</port>
<port id="1" precision="FP32">
<dim>192</dim>
<dim>1152</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1</dim>
<dim>192</dim>
<dim>8</dim>
<dim>8</dim>
</port>
</output>
</layer>
<layer id="668" name="Constant_278002_compressed" type="Const" version="opset1">
<data element_type="f16" shape="1, 192, 1, 1" offset="4081360" size="384" />
<output>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>192</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="669" name="Constant_278002" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>192</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>192</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="670" name="__module.features.6.1.block.3.1/aten::batch_norm/BatchNormInference" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>192</dim>
<dim>8</dim>
<dim>8</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>192</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1359_1">
<dim>1</dim>
<dim>192</dim>
<dim>8</dim>
<dim>8</dim>
</port>
</output>
</layer>
<layer id="671" name="__module.features.6.1/aten::add_/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>192</dim>
<dim>8</dim>
<dim>8</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>192</dim>
<dim>8</dim>
<dim>8</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1359,result.23">
<dim>1</dim>
<dim>192</dim>
<dim>8</dim>
<dim>8</dim>
</port>
</output>
</layer>
<layer id="672" name="Multiply_278312_compressed" type="Const" version="opset1">
<data element_type="f16" shape="1152, 192, 1, 1" offset="4081744" size="442368" />
<output>
<port id="0" precision="FP16">
<dim>1152</dim>
<dim>192</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="673" name="Multiply_278312" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>1152</dim>
<dim>192</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1152</dim>
<dim>192</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="674" name="Multiply_278004" type="Convolution" version="opset1">
<data strides="1, 1" dilations="1, 1" pads_begin="0, 0" pads_end="0, 0" auto_pad="explicit" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>192</dim>
<dim>8</dim>
<dim>8</dim>
</port>
<port id="1" precision="FP32">
<dim>1152</dim>
<dim>192</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1</dim>
<dim>1152</dim>
<dim>8</dim>
<dim>8</dim>
</port>
</output>
</layer>
<layer id="675" name="Constant_278009_compressed" type="Const" version="opset1">
<data element_type="f16" shape="1, 1152, 1, 1" offset="4524112" size="2304" />
<output>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>1152</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="676" name="Constant_278009" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>1152</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1152</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="677" name="__module.features.6.2.block.0.1/aten::batch_norm/BatchNormInference" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1152</dim>
<dim>8</dim>
<dim>8</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1152</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1380_1">
<dim>1</dim>
<dim>1152</dim>
<dim>8</dim>
<dim>8</dim>
</port>
</output>
</layer>
<layer id="678" name="__module.features.6.2.block.0.2/aten::silu_/Swish" type="Swish" version="opset4">
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1152</dim>
<dim>8</dim>
<dim>8</dim>
</port>
</input>
<output>
<port id="1" precision="FP32" names="1380,input.465">
<dim>1</dim>
<dim>1152</dim>
<dim>8</dim>
<dim>8</dim>
</port>
</output>
</layer>
<layer id="679" name="Multiply_278316_compressed" type="Const" version="opset1">
<data element_type="f16" shape="1152, 1, 1, 5, 5" offset="4526416" size="57600" />
<output>
<port id="0" precision="FP16">
<dim>1152</dim>
<dim>1</dim>
<dim>1</dim>
<dim>5</dim>
<dim>5</dim>
</port>
</output>
</layer>
<layer id="680" name="Multiply_278316" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>1152</dim>
<dim>1</dim>
<dim>1</dim>
<dim>5</dim>
<dim>5</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1152</dim>
<dim>1</dim>
<dim>1</dim>
<dim>5</dim>
<dim>5</dim>
</port>
</output>
</layer>
<layer id="681" name="Multiply_278011" type="GroupConvolution" version="opset1">
<data strides="1, 1" pads_begin="2, 2" pads_end="2, 2" dilations="1, 1" auto_pad="explicit" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1152</dim>
<dim>8</dim>
<dim>8</dim>
</port>
<port id="1" precision="FP32">
<dim>1152</dim>
<dim>1</dim>
<dim>1</dim>
<dim>5</dim>
<dim>5</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1390,input.469">
<dim>1</dim>
<dim>1152</dim>
<dim>8</dim>
<dim>8</dim>
</port>
</output>
</layer>
<layer id="682" name="Constant_278016_compressed" type="Const" version="opset1">
<data element_type="f16" shape="1, 1152, 1, 1" offset="4584016" size="2304" />
<output>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>1152</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="683" name="Constant_278016" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>1152</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1152</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="684" name="__module.features.6.2.block.1.1/aten::batch_norm/BatchNormInference" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1152</dim>
<dim>8</dim>
<dim>8</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1152</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1395_1">
<dim>1</dim>
<dim>1152</dim>
<dim>8</dim>
<dim>8</dim>
</port>
</output>
</layer>
<layer id="685" name="__module.features.6.2.block.1.2/aten::silu_/Swish" type="Swish" version="opset4">
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1152</dim>
<dim>8</dim>
<dim>8</dim>
</port>
</input>
<output>
<port id="1" precision="FP32" names="1395,input.471">
<dim>1</dim>
<dim>1152</dim>
<dim>8</dim>
<dim>8</dim>
</port>
</output>
</layer>
<layer id="686" name="Constant_272826" type="Const" version="opset1">
<data element_type="i64" shape="2" offset="2432" size="16" />
<output>
<port id="0" precision="I64">
<dim>2</dim>
</port>
</output>
</layer>
<layer id="687" name="__module.features.6.2.block.2.avgpool/aten::adaptive_avg_pool2d/AdaptiveAvgPool" type="ReduceMean" version="opset1">
<data keep_dims="true" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1152</dim>
<dim>8</dim>
<dim>8</dim>
</port>
<port id="1" precision="I64">
<dim>2</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1403,input.475">
<dim>1</dim>
<dim>1152</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="688" name="self.features.6.2.block.2.fc1.weight_compressed" type="Const" version="opset1">
<data element_type="f16" shape="48, 1152, 1, 1" offset="4586320" size="110592" />
<output>
<port id="0" precision="FP16" names="self.features.6.2.block.2.fc1.weight">
<dim>48</dim>
<dim>1152</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="689" name="self.features.6.2.block.2.fc1.weight" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>48</dim>
<dim>1152</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>48</dim>
<dim>1152</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="690" name="__module.features.6.2.block.2.fc1/aten::_convolution/Convolution" type="Convolution" version="opset1">
<data strides="1, 1" dilations="1, 1" pads_begin="0, 0" pads_end="0, 0" auto_pad="explicit" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1152</dim>
<dim>1</dim>
<dim>1</dim>
</port>
<port id="1" precision="FP32">
<dim>48</dim>
<dim>1152</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1</dim>
<dim>48</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="691" name="__module.features.6.2.block.2.fc1/aten::_convolution/Reshape_compressed" type="Const" version="opset1">
<data element_type="f16" shape="1, 48, 1, 1" offset="4696912" size="96" />
<output>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>48</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="692" name="__module.features.6.2.block.2.fc1/aten::_convolution/Reshape" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>48</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>48</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="693" name="__module.features.6.2.block.2.fc1/aten::_convolution/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>48</dim>
<dim>1</dim>
<dim>1</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>48</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1410_1">
<dim>1</dim>
<dim>48</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="694" name="__module.features.6.2.block.2.activation/aten::silu_/Swish" type="Swish" version="opset4">
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>48</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32" names="1410,input.477">
<dim>1</dim>
<dim>48</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="695" name="self.features.6.2.block.2.fc2.weight_compressed" type="Const" version="opset1">
<data element_type="f16" shape="1152, 48, 1, 1" offset="4697008" size="110592" />
<output>
<port id="0" precision="FP16" names="self.features.6.2.block.2.fc2.weight">
<dim>1152</dim>
<dim>48</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="696" name="self.features.6.2.block.2.fc2.weight" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>1152</dim>
<dim>48</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1152</dim>
<dim>48</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="697" name="__module.features.6.2.block.2.fc2/aten::_convolution/Convolution" type="Convolution" version="opset1">
<data strides="1, 1" dilations="1, 1" pads_begin="0, 0" pads_end="0, 0" auto_pad="explicit" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>48</dim>
<dim>1</dim>
<dim>1</dim>
</port>
<port id="1" precision="FP32">
<dim>1152</dim>
<dim>48</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1</dim>
<dim>1152</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="698" name="__module.features.6.2.block.2.fc2/aten::_convolution/Reshape_compressed" type="Const" version="opset1">
<data element_type="f16" shape="1, 1152, 1, 1" offset="4807600" size="2304" />
<output>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>1152</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="699" name="__module.features.6.2.block.2.fc2/aten::_convolution/Reshape" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>1152</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1152</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="700" name="__module.features.6.2.block.2.fc2/aten::_convolution/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1152</dim>
<dim>1</dim>
<dim>1</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1152</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1418,input.481">
<dim>1</dim>
<dim>1152</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="701" name="__module.features.6.2.block.2.scale_activation/aten::sigmoid/Sigmoid" type="Sigmoid" version="opset1">
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1152</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32" names="1419,scale.37">
<dim>1</dim>
<dim>1152</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="702" name="__module.features.6.2.block.2/aten::mul/Multiply" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1152</dim>
<dim>1</dim>
<dim>1</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1152</dim>
<dim>8</dim>
<dim>8</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1420,input.483">
<dim>1</dim>
<dim>1152</dim>
<dim>8</dim>
<dim>8</dim>
</port>
</output>
</layer>
<layer id="703" name="Multiply_278319_compressed" type="Const" version="opset1">
<data element_type="f16" shape="192, 1152, 1, 1" offset="4809904" size="442368" />
<output>
<port id="0" precision="FP16">
<dim>192</dim>
<dim>1152</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="704" name="Multiply_278319" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>192</dim>
<dim>1152</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>192</dim>
<dim>1152</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="705" name="Multiply_278018" type="Convolution" version="opset1">
<data strides="1, 1" dilations="1, 1" pads_begin="0, 0" pads_end="0, 0" auto_pad="explicit" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1152</dim>
<dim>8</dim>
<dim>8</dim>
</port>
<port id="1" precision="FP32">
<dim>192</dim>
<dim>1152</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1</dim>
<dim>192</dim>
<dim>8</dim>
<dim>8</dim>
</port>
</output>
</layer>
<layer id="706" name="Constant_278023_compressed" type="Const" version="opset1">
<data element_type="f16" shape="1, 192, 1, 1" offset="5252272" size="384" />
<output>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>192</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="707" name="Constant_278023" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>192</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>192</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="708" name="__module.features.6.2.block.3.1/aten::batch_norm/BatchNormInference" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>192</dim>
<dim>8</dim>
<dim>8</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>192</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1433_1">
<dim>1</dim>
<dim>192</dim>
<dim>8</dim>
<dim>8</dim>
</port>
</output>
</layer>
<layer id="709" name="__module.features.6.2/aten::add_/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>192</dim>
<dim>8</dim>
<dim>8</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>192</dim>
<dim>8</dim>
<dim>8</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1433,result.25">
<dim>1</dim>
<dim>192</dim>
<dim>8</dim>
<dim>8</dim>
</port>
</output>
</layer>
<layer id="710" name="Multiply_278323_compressed" type="Const" version="opset1">
<data element_type="f16" shape="1152, 192, 1, 1" offset="5252656" size="442368" />
<output>
<port id="0" precision="FP16">
<dim>1152</dim>
<dim>192</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="711" name="Multiply_278323" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>1152</dim>
<dim>192</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1152</dim>
<dim>192</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="712" name="Multiply_278025" type="Convolution" version="opset1">
<data strides="1, 1" dilations="1, 1" pads_begin="0, 0" pads_end="0, 0" auto_pad="explicit" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>192</dim>
<dim>8</dim>
<dim>8</dim>
</port>
<port id="1" precision="FP32">
<dim>1152</dim>
<dim>192</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1</dim>
<dim>1152</dim>
<dim>8</dim>
<dim>8</dim>
</port>
</output>
</layer>
<layer id="713" name="Constant_278030_compressed" type="Const" version="opset1">
<data element_type="f16" shape="1, 1152, 1, 1" offset="5695024" size="2304" />
<output>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>1152</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="714" name="Constant_278030" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>1152</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1152</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="715" name="__module.features.6.3.block.0.1/aten::batch_norm/BatchNormInference" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1152</dim>
<dim>8</dim>
<dim>8</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1152</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1454_1">
<dim>1</dim>
<dim>1152</dim>
<dim>8</dim>
<dim>8</dim>
</port>
</output>
</layer>
<layer id="716" name="__module.features.6.3.block.0.2/aten::silu_/Swish" type="Swish" version="opset4">
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1152</dim>
<dim>8</dim>
<dim>8</dim>
</port>
</input>
<output>
<port id="1" precision="FP32" names="1454,input.491">
<dim>1</dim>
<dim>1152</dim>
<dim>8</dim>
<dim>8</dim>
</port>
</output>
</layer>
<layer id="717" name="Multiply_278327_compressed" type="Const" version="opset1">
<data element_type="f16" shape="1152, 1, 1, 5, 5" offset="5697328" size="57600" />
<output>
<port id="0" precision="FP16">
<dim>1152</dim>
<dim>1</dim>
<dim>1</dim>
<dim>5</dim>
<dim>5</dim>
</port>
</output>
</layer>
<layer id="718" name="Multiply_278327" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>1152</dim>
<dim>1</dim>
<dim>1</dim>
<dim>5</dim>
<dim>5</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1152</dim>
<dim>1</dim>
<dim>1</dim>
<dim>5</dim>
<dim>5</dim>
</port>
</output>
</layer>
<layer id="719" name="Multiply_278032" type="GroupConvolution" version="opset1">
<data strides="1, 1" pads_begin="2, 2" pads_end="2, 2" dilations="1, 1" auto_pad="explicit" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1152</dim>
<dim>8</dim>
<dim>8</dim>
</port>
<port id="1" precision="FP32">
<dim>1152</dim>
<dim>1</dim>
<dim>1</dim>
<dim>5</dim>
<dim>5</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1464,input.495">
<dim>1</dim>
<dim>1152</dim>
<dim>8</dim>
<dim>8</dim>
</port>
</output>
</layer>
<layer id="720" name="Constant_278037_compressed" type="Const" version="opset1">
<data element_type="f16" shape="1, 1152, 1, 1" offset="5754928" size="2304" />
<output>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>1152</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="721" name="Constant_278037" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>1152</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1152</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="722" name="__module.features.6.3.block.1.1/aten::batch_norm/BatchNormInference" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1152</dim>
<dim>8</dim>
<dim>8</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1152</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1469_1">
<dim>1</dim>
<dim>1152</dim>
<dim>8</dim>
<dim>8</dim>
</port>
</output>
</layer>
<layer id="723" name="__module.features.6.3.block.1.2/aten::silu_/Swish" type="Swish" version="opset4">
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1152</dim>
<dim>8</dim>
<dim>8</dim>
</port>
</input>
<output>
<port id="1" precision="FP32" names="1469,input.497">
<dim>1</dim>
<dim>1152</dim>
<dim>8</dim>
<dim>8</dim>
</port>
</output>
</layer>
<layer id="724" name="Constant_272836" type="Const" version="opset1">
<data element_type="i64" shape="2" offset="2432" size="16" />
<output>
<port id="0" precision="I64">
<dim>2</dim>
</port>
</output>
</layer>
<layer id="725" name="__module.features.6.3.block.2.avgpool/aten::adaptive_avg_pool2d/AdaptiveAvgPool" type="ReduceMean" version="opset1">
<data keep_dims="true" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1152</dim>
<dim>8</dim>
<dim>8</dim>
</port>
<port id="1" precision="I64">
<dim>2</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1477,input.501">
<dim>1</dim>
<dim>1152</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="726" name="self.features.6.3.block.2.fc1.weight_compressed" type="Const" version="opset1">
<data element_type="f16" shape="48, 1152, 1, 1" offset="5757232" size="110592" />
<output>
<port id="0" precision="FP16" names="self.features.6.3.block.2.fc1.weight">
<dim>48</dim>
<dim>1152</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="727" name="self.features.6.3.block.2.fc1.weight" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>48</dim>
<dim>1152</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>48</dim>
<dim>1152</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="728" name="__module.features.6.3.block.2.fc1/aten::_convolution/Convolution" type="Convolution" version="opset1">
<data strides="1, 1" dilations="1, 1" pads_begin="0, 0" pads_end="0, 0" auto_pad="explicit" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1152</dim>
<dim>1</dim>
<dim>1</dim>
</port>
<port id="1" precision="FP32">
<dim>48</dim>
<dim>1152</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1</dim>
<dim>48</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="729" name="__module.features.6.3.block.2.fc1/aten::_convolution/Reshape_compressed" type="Const" version="opset1">
<data element_type="f16" shape="1, 48, 1, 1" offset="5867824" size="96" />
<output>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>48</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="730" name="__module.features.6.3.block.2.fc1/aten::_convolution/Reshape" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>48</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>48</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="731" name="__module.features.6.3.block.2.fc1/aten::_convolution/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>48</dim>
<dim>1</dim>
<dim>1</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>48</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1484_1">
<dim>1</dim>
<dim>48</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="732" name="__module.features.6.3.block.2.activation/aten::silu_/Swish" type="Swish" version="opset4">
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>48</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32" names="1484,input.503">
<dim>1</dim>
<dim>48</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="733" name="self.features.6.3.block.2.fc2.weight_compressed" type="Const" version="opset1">
<data element_type="f16" shape="1152, 48, 1, 1" offset="5867920" size="110592" />
<output>
<port id="0" precision="FP16" names="self.features.6.3.block.2.fc2.weight">
<dim>1152</dim>
<dim>48</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="734" name="self.features.6.3.block.2.fc2.weight" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>1152</dim>
<dim>48</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1152</dim>
<dim>48</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="735" name="__module.features.6.3.block.2.fc2/aten::_convolution/Convolution" type="Convolution" version="opset1">
<data strides="1, 1" dilations="1, 1" pads_begin="0, 0" pads_end="0, 0" auto_pad="explicit" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>48</dim>
<dim>1</dim>
<dim>1</dim>
</port>
<port id="1" precision="FP32">
<dim>1152</dim>
<dim>48</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1</dim>
<dim>1152</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="736" name="__module.features.6.3.block.2.fc2/aten::_convolution/Reshape_compressed" type="Const" version="opset1">
<data element_type="f16" shape="1, 1152, 1, 1" offset="5978512" size="2304" />
<output>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>1152</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="737" name="__module.features.6.3.block.2.fc2/aten::_convolution/Reshape" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>1152</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1152</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="738" name="__module.features.6.3.block.2.fc2/aten::_convolution/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1152</dim>
<dim>1</dim>
<dim>1</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1152</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1492,input.507">
<dim>1</dim>
<dim>1152</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="739" name="__module.features.6.3.block.2.scale_activation/aten::sigmoid/Sigmoid" type="Sigmoid" version="opset1">
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1152</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32" names="1493,scale.39">
<dim>1</dim>
<dim>1152</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="740" name="__module.features.6.3.block.2/aten::mul/Multiply" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1152</dim>
<dim>1</dim>
<dim>1</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1152</dim>
<dim>8</dim>
<dim>8</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1494,input.509">
<dim>1</dim>
<dim>1152</dim>
<dim>8</dim>
<dim>8</dim>
</port>
</output>
</layer>
<layer id="741" name="Multiply_278330_compressed" type="Const" version="opset1">
<data element_type="f16" shape="192, 1152, 1, 1" offset="5980816" size="442368" />
<output>
<port id="0" precision="FP16">
<dim>192</dim>
<dim>1152</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="742" name="Multiply_278330" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>192</dim>
<dim>1152</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>192</dim>
<dim>1152</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="743" name="Multiply_278039" type="Convolution" version="opset1">
<data strides="1, 1" dilations="1, 1" pads_begin="0, 0" pads_end="0, 0" auto_pad="explicit" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1152</dim>
<dim>8</dim>
<dim>8</dim>
</port>
<port id="1" precision="FP32">
<dim>192</dim>
<dim>1152</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1</dim>
<dim>192</dim>
<dim>8</dim>
<dim>8</dim>
</port>
</output>
</layer>
<layer id="744" name="Constant_278044_compressed" type="Const" version="opset1">
<data element_type="f16" shape="1, 192, 1, 1" offset="6423184" size="384" />
<output>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>192</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="745" name="Constant_278044" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>192</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>192</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="746" name="__module.features.6.3.block.3.1/aten::batch_norm/BatchNormInference" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>192</dim>
<dim>8</dim>
<dim>8</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>192</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1507_1">
<dim>1</dim>
<dim>192</dim>
<dim>8</dim>
<dim>8</dim>
</port>
</output>
</layer>
<layer id="747" name="__module.features.6.3/aten::add_/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>192</dim>
<dim>8</dim>
<dim>8</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>192</dim>
<dim>8</dim>
<dim>8</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1507,result.27">
<dim>1</dim>
<dim>192</dim>
<dim>8</dim>
<dim>8</dim>
</port>
</output>
</layer>
<layer id="748" name="Multiply_278334_compressed" type="Const" version="opset1">
<data element_type="f16" shape="1152, 192, 1, 1" offset="6423568" size="442368" />
<output>
<port id="0" precision="FP16">
<dim>1152</dim>
<dim>192</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="749" name="Multiply_278334" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>1152</dim>
<dim>192</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1152</dim>
<dim>192</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="750" name="Multiply_278046" type="Convolution" version="opset1">
<data strides="1, 1" dilations="1, 1" pads_begin="0, 0" pads_end="0, 0" auto_pad="explicit" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>192</dim>
<dim>8</dim>
<dim>8</dim>
</port>
<port id="1" precision="FP32">
<dim>1152</dim>
<dim>192</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1</dim>
<dim>1152</dim>
<dim>8</dim>
<dim>8</dim>
</port>
</output>
</layer>
<layer id="751" name="Constant_278051_compressed" type="Const" version="opset1">
<data element_type="f16" shape="1, 1152, 1, 1" offset="6865936" size="2304" />
<output>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>1152</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="752" name="Constant_278051" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>1152</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1152</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="753" name="__module.features.6.4.block.0.1/aten::batch_norm/BatchNormInference" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1152</dim>
<dim>8</dim>
<dim>8</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1152</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1528_1">
<dim>1</dim>
<dim>1152</dim>
<dim>8</dim>
<dim>8</dim>
</port>
</output>
</layer>
<layer id="754" name="__module.features.6.4.block.0.2/aten::silu_/Swish" type="Swish" version="opset4">
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1152</dim>
<dim>8</dim>
<dim>8</dim>
</port>
</input>
<output>
<port id="1" precision="FP32" names="1528,input.517">
<dim>1</dim>
<dim>1152</dim>
<dim>8</dim>
<dim>8</dim>
</port>
</output>
</layer>
<layer id="755" name="Multiply_278338_compressed" type="Const" version="opset1">
<data element_type="f16" shape="1152, 1, 1, 5, 5" offset="6868240" size="57600" />
<output>
<port id="0" precision="FP16">
<dim>1152</dim>
<dim>1</dim>
<dim>1</dim>
<dim>5</dim>
<dim>5</dim>
</port>
</output>
</layer>
<layer id="756" name="Multiply_278338" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>1152</dim>
<dim>1</dim>
<dim>1</dim>
<dim>5</dim>
<dim>5</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1152</dim>
<dim>1</dim>
<dim>1</dim>
<dim>5</dim>
<dim>5</dim>
</port>
</output>
</layer>
<layer id="757" name="Multiply_278053" type="GroupConvolution" version="opset1">
<data strides="1, 1" pads_begin="2, 2" pads_end="2, 2" dilations="1, 1" auto_pad="explicit" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1152</dim>
<dim>8</dim>
<dim>8</dim>
</port>
<port id="1" precision="FP32">
<dim>1152</dim>
<dim>1</dim>
<dim>1</dim>
<dim>5</dim>
<dim>5</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1538,input.521">
<dim>1</dim>
<dim>1152</dim>
<dim>8</dim>
<dim>8</dim>
</port>
</output>
</layer>
<layer id="758" name="Constant_278058_compressed" type="Const" version="opset1">
<data element_type="f16" shape="1, 1152, 1, 1" offset="6925840" size="2304" />
<output>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>1152</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="759" name="Constant_278058" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>1152</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1152</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="760" name="__module.features.6.4.block.1.1/aten::batch_norm/BatchNormInference" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1152</dim>
<dim>8</dim>
<dim>8</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1152</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1543_1">
<dim>1</dim>
<dim>1152</dim>
<dim>8</dim>
<dim>8</dim>
</port>
</output>
</layer>
<layer id="761" name="__module.features.6.4.block.1.2/aten::silu_/Swish" type="Swish" version="opset4">
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1152</dim>
<dim>8</dim>
<dim>8</dim>
</port>
</input>
<output>
<port id="1" precision="FP32" names="1543,input.523">
<dim>1</dim>
<dim>1152</dim>
<dim>8</dim>
<dim>8</dim>
</port>
</output>
</layer>
<layer id="762" name="Constant_272846" type="Const" version="opset1">
<data element_type="i64" shape="2" offset="2432" size="16" />
<output>
<port id="0" precision="I64">
<dim>2</dim>
</port>
</output>
</layer>
<layer id="763" name="__module.features.6.4.block.2.avgpool/aten::adaptive_avg_pool2d/AdaptiveAvgPool" type="ReduceMean" version="opset1">
<data keep_dims="true" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1152</dim>
<dim>8</dim>
<dim>8</dim>
</port>
<port id="1" precision="I64">
<dim>2</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1551,input.527">
<dim>1</dim>
<dim>1152</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="764" name="self.features.6.4.block.2.fc1.weight_compressed" type="Const" version="opset1">
<data element_type="f16" shape="48, 1152, 1, 1" offset="6928144" size="110592" />
<output>
<port id="0" precision="FP16" names="self.features.6.4.block.2.fc1.weight">
<dim>48</dim>
<dim>1152</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="765" name="self.features.6.4.block.2.fc1.weight" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>48</dim>
<dim>1152</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>48</dim>
<dim>1152</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="766" name="__module.features.6.4.block.2.fc1/aten::_convolution/Convolution" type="Convolution" version="opset1">
<data strides="1, 1" dilations="1, 1" pads_begin="0, 0" pads_end="0, 0" auto_pad="explicit" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1152</dim>
<dim>1</dim>
<dim>1</dim>
</port>
<port id="1" precision="FP32">
<dim>48</dim>
<dim>1152</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1</dim>
<dim>48</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="767" name="__module.features.6.4.block.2.fc1/aten::_convolution/Reshape_compressed" type="Const" version="opset1">
<data element_type="f16" shape="1, 48, 1, 1" offset="7038736" size="96" />
<output>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>48</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="768" name="__module.features.6.4.block.2.fc1/aten::_convolution/Reshape" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>48</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>48</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="769" name="__module.features.6.4.block.2.fc1/aten::_convolution/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>48</dim>
<dim>1</dim>
<dim>1</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>48</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1558_1">
<dim>1</dim>
<dim>48</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="770" name="__module.features.6.4.block.2.activation/aten::silu_/Swish" type="Swish" version="opset4">
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>48</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32" names="1558,input.529">
<dim>1</dim>
<dim>48</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="771" name="self.features.6.4.block.2.fc2.weight_compressed" type="Const" version="opset1">
<data element_type="f16" shape="1152, 48, 1, 1" offset="7038832" size="110592" />
<output>
<port id="0" precision="FP16" names="self.features.6.4.block.2.fc2.weight">
<dim>1152</dim>
<dim>48</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="772" name="self.features.6.4.block.2.fc2.weight" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>1152</dim>
<dim>48</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1152</dim>
<dim>48</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="773" name="__module.features.6.4.block.2.fc2/aten::_convolution/Convolution" type="Convolution" version="opset1">
<data strides="1, 1" dilations="1, 1" pads_begin="0, 0" pads_end="0, 0" auto_pad="explicit" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>48</dim>
<dim>1</dim>
<dim>1</dim>
</port>
<port id="1" precision="FP32">
<dim>1152</dim>
<dim>48</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1</dim>
<dim>1152</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="774" name="__module.features.6.4.block.2.fc2/aten::_convolution/Reshape_compressed" type="Const" version="opset1">
<data element_type="f16" shape="1, 1152, 1, 1" offset="7149424" size="2304" />
<output>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>1152</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="775" name="__module.features.6.4.block.2.fc2/aten::_convolution/Reshape" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>1152</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1152</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="776" name="__module.features.6.4.block.2.fc2/aten::_convolution/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1152</dim>
<dim>1</dim>
<dim>1</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1152</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1566,input.533">
<dim>1</dim>
<dim>1152</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="777" name="__module.features.6.4.block.2.scale_activation/aten::sigmoid/Sigmoid" type="Sigmoid" version="opset1">
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1152</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32" names="1567,scale.41">
<dim>1</dim>
<dim>1152</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="778" name="__module.features.6.4.block.2/aten::mul/Multiply" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1152</dim>
<dim>1</dim>
<dim>1</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1152</dim>
<dim>8</dim>
<dim>8</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1568,input.535">
<dim>1</dim>
<dim>1152</dim>
<dim>8</dim>
<dim>8</dim>
</port>
</output>
</layer>
<layer id="779" name="Multiply_278341_compressed" type="Const" version="opset1">
<data element_type="f16" shape="192, 1152, 1, 1" offset="7151728" size="442368" />
<output>
<port id="0" precision="FP16">
<dim>192</dim>
<dim>1152</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="780" name="Multiply_278341" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>192</dim>
<dim>1152</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>192</dim>
<dim>1152</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="781" name="Multiply_278060" type="Convolution" version="opset1">
<data strides="1, 1" dilations="1, 1" pads_begin="0, 0" pads_end="0, 0" auto_pad="explicit" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1152</dim>
<dim>8</dim>
<dim>8</dim>
</port>
<port id="1" precision="FP32">
<dim>192</dim>
<dim>1152</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1</dim>
<dim>192</dim>
<dim>8</dim>
<dim>8</dim>
</port>
</output>
</layer>
<layer id="782" name="Constant_278065_compressed" type="Const" version="opset1">
<data element_type="f16" shape="1, 192, 1, 1" offset="7594096" size="384" />
<output>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>192</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="783" name="Constant_278065" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>192</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>192</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="784" name="__module.features.6.4.block.3.1/aten::batch_norm/BatchNormInference" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>192</dim>
<dim>8</dim>
<dim>8</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>192</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1581_1">
<dim>1</dim>
<dim>192</dim>
<dim>8</dim>
<dim>8</dim>
</port>
</output>
</layer>
<layer id="785" name="__module.features.6.4/aten::add_/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>192</dim>
<dim>8</dim>
<dim>8</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>192</dim>
<dim>8</dim>
<dim>8</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1581,result.29">
<dim>1</dim>
<dim>192</dim>
<dim>8</dim>
<dim>8</dim>
</port>
</output>
</layer>
<layer id="786" name="Multiply_278345_compressed" type="Const" version="opset1">
<data element_type="f16" shape="1152, 192, 1, 1" offset="7594480" size="442368" />
<output>
<port id="0" precision="FP16">
<dim>1152</dim>
<dim>192</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="787" name="Multiply_278345" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>1152</dim>
<dim>192</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1152</dim>
<dim>192</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="788" name="Multiply_278067" type="Convolution" version="opset1">
<data strides="1, 1" dilations="1, 1" pads_begin="0, 0" pads_end="0, 0" auto_pad="explicit" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>192</dim>
<dim>8</dim>
<dim>8</dim>
</port>
<port id="1" precision="FP32">
<dim>1152</dim>
<dim>192</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1</dim>
<dim>1152</dim>
<dim>8</dim>
<dim>8</dim>
</port>
</output>
</layer>
<layer id="789" name="Constant_278072_compressed" type="Const" version="opset1">
<data element_type="f16" shape="1, 1152, 1, 1" offset="8036848" size="2304" />
<output>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>1152</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="790" name="Constant_278072" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>1152</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1152</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="791" name="__module.features.7.0.block.0.1/aten::batch_norm/BatchNormInference" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1152</dim>
<dim>8</dim>
<dim>8</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1152</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1603_1">
<dim>1</dim>
<dim>1152</dim>
<dim>8</dim>
<dim>8</dim>
</port>
</output>
</layer>
<layer id="792" name="__module.features.7.0.block.0.2/aten::silu_/Swish" type="Swish" version="opset4">
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1152</dim>
<dim>8</dim>
<dim>8</dim>
</port>
</input>
<output>
<port id="1" precision="FP32" names="1603,input.543">
<dim>1</dim>
<dim>1152</dim>
<dim>8</dim>
<dim>8</dim>
</port>
</output>
</layer>
<layer id="793" name="Multiply_278349_compressed" type="Const" version="opset1">
<data element_type="f16" shape="1152, 1, 1, 3, 3" offset="8039152" size="20736" />
<output>
<port id="0" precision="FP16">
<dim>1152</dim>
<dim>1</dim>
<dim>1</dim>
<dim>3</dim>
<dim>3</dim>
</port>
</output>
</layer>
<layer id="794" name="Multiply_278349" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>1152</dim>
<dim>1</dim>
<dim>1</dim>
<dim>3</dim>
<dim>3</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1152</dim>
<dim>1</dim>
<dim>1</dim>
<dim>3</dim>
<dim>3</dim>
</port>
</output>
</layer>
<layer id="795" name="Multiply_278074" type="GroupConvolution" version="opset1">
<data strides="1, 1" pads_begin="1, 1" pads_end="1, 1" dilations="1, 1" auto_pad="explicit" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1152</dim>
<dim>8</dim>
<dim>8</dim>
</port>
<port id="1" precision="FP32">
<dim>1152</dim>
<dim>1</dim>
<dim>1</dim>
<dim>3</dim>
<dim>3</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1613,input.547">
<dim>1</dim>
<dim>1152</dim>
<dim>8</dim>
<dim>8</dim>
</port>
</output>
</layer>
<layer id="796" name="Constant_278079_compressed" type="Const" version="opset1">
<data element_type="f16" shape="1, 1152, 1, 1" offset="8059888" size="2304" />
<output>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>1152</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="797" name="Constant_278079" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>1152</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1152</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="798" name="__module.features.7.0.block.1.1/aten::batch_norm/BatchNormInference" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1152</dim>
<dim>8</dim>
<dim>8</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1152</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1618_1">
<dim>1</dim>
<dim>1152</dim>
<dim>8</dim>
<dim>8</dim>
</port>
</output>
</layer>
<layer id="799" name="__module.features.7.0.block.1.2/aten::silu_/Swish" type="Swish" version="opset4">
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1152</dim>
<dim>8</dim>
<dim>8</dim>
</port>
</input>
<output>
<port id="1" precision="FP32" names="1618,input.549">
<dim>1</dim>
<dim>1152</dim>
<dim>8</dim>
<dim>8</dim>
</port>
</output>
</layer>
<layer id="800" name="Constant_272856" type="Const" version="opset1">
<data element_type="i64" shape="2" offset="2432" size="16" />
<output>
<port id="0" precision="I64">
<dim>2</dim>
</port>
</output>
</layer>
<layer id="801" name="__module.features.7.0.block.2.avgpool/aten::adaptive_avg_pool2d/AdaptiveAvgPool" type="ReduceMean" version="opset1">
<data keep_dims="true" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1152</dim>
<dim>8</dim>
<dim>8</dim>
</port>
<port id="1" precision="I64">
<dim>2</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1626,input.553">
<dim>1</dim>
<dim>1152</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="802" name="self.features.7.0.block.2.fc1.weight_compressed" type="Const" version="opset1">
<data element_type="f16" shape="48, 1152, 1, 1" offset="8062192" size="110592" />
<output>
<port id="0" precision="FP16" names="self.features.7.0.block.2.fc1.weight">
<dim>48</dim>
<dim>1152</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="803" name="self.features.7.0.block.2.fc1.weight" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>48</dim>
<dim>1152</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>48</dim>
<dim>1152</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="804" name="__module.features.7.0.block.2.fc1/aten::_convolution/Convolution" type="Convolution" version="opset1">
<data strides="1, 1" dilations="1, 1" pads_begin="0, 0" pads_end="0, 0" auto_pad="explicit" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1152</dim>
<dim>1</dim>
<dim>1</dim>
</port>
<port id="1" precision="FP32">
<dim>48</dim>
<dim>1152</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1</dim>
<dim>48</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="805" name="__module.features.7.0.block.2.fc1/aten::_convolution/Reshape_compressed" type="Const" version="opset1">
<data element_type="f16" shape="1, 48, 1, 1" offset="8172784" size="96" />
<output>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>48</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="806" name="__module.features.7.0.block.2.fc1/aten::_convolution/Reshape" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>48</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>48</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="807" name="__module.features.7.0.block.2.fc1/aten::_convolution/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>48</dim>
<dim>1</dim>
<dim>1</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>48</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1633_1">
<dim>1</dim>
<dim>48</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="808" name="__module.features.7.0.block.2.activation/aten::silu_/Swish" type="Swish" version="opset4">
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>48</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32" names="1633,input.555">
<dim>1</dim>
<dim>48</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="809" name="self.features.7.0.block.2.fc2.weight_compressed" type="Const" version="opset1">
<data element_type="f16" shape="1152, 48, 1, 1" offset="8172880" size="110592" />
<output>
<port id="0" precision="FP16" names="self.features.7.0.block.2.fc2.weight">
<dim>1152</dim>
<dim>48</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="810" name="self.features.7.0.block.2.fc2.weight" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>1152</dim>
<dim>48</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1152</dim>
<dim>48</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="811" name="__module.features.7.0.block.2.fc2/aten::_convolution/Convolution" type="Convolution" version="opset1">
<data strides="1, 1" dilations="1, 1" pads_begin="0, 0" pads_end="0, 0" auto_pad="explicit" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>48</dim>
<dim>1</dim>
<dim>1</dim>
</port>
<port id="1" precision="FP32">
<dim>1152</dim>
<dim>48</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1</dim>
<dim>1152</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="812" name="__module.features.7.0.block.2.fc2/aten::_convolution/Reshape_compressed" type="Const" version="opset1">
<data element_type="f16" shape="1, 1152, 1, 1" offset="8283472" size="2304" />
<output>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>1152</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="813" name="__module.features.7.0.block.2.fc2/aten::_convolution/Reshape" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>1152</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1152</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="814" name="__module.features.7.0.block.2.fc2/aten::_convolution/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1152</dim>
<dim>1</dim>
<dim>1</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1152</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1641,input.559">
<dim>1</dim>
<dim>1152</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="815" name="__module.features.7.0.block.2.scale_activation/aten::sigmoid/Sigmoid" type="Sigmoid" version="opset1">
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1152</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32" names="1642,scale.43">
<dim>1</dim>
<dim>1152</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="816" name="__module.features.7.0.block.2/aten::mul/Multiply" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1152</dim>
<dim>1</dim>
<dim>1</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1152</dim>
<dim>8</dim>
<dim>8</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1643,input.561">
<dim>1</dim>
<dim>1152</dim>
<dim>8</dim>
<dim>8</dim>
</port>
</output>
</layer>
<layer id="817" name="Multiply_278352_compressed" type="Const" version="opset1">
<data element_type="f16" shape="320, 1152, 1, 1" offset="8285776" size="737280" />
<output>
<port id="0" precision="FP16">
<dim>320</dim>
<dim>1152</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="818" name="Multiply_278352" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>320</dim>
<dim>1152</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>320</dim>
<dim>1152</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="819" name="Multiply_278081" type="Convolution" version="opset1">
<data strides="1, 1" dilations="1, 1" pads_begin="0, 0" pads_end="0, 0" auto_pad="explicit" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1152</dim>
<dim>8</dim>
<dim>8</dim>
</port>
<port id="1" precision="FP32">
<dim>320</dim>
<dim>1152</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1</dim>
<dim>320</dim>
<dim>8</dim>
<dim>8</dim>
</port>
</output>
</layer>
<layer id="820" name="Constant_278086_compressed" type="Const" version="opset1">
<data element_type="f16" shape="1, 320, 1, 1" offset="9023056" size="640" />
<output>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>320</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="821" name="Constant_278086" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>320</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>320</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="822" name="__module.features.7.0.block.3.1/aten::batch_norm/BatchNormInference" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>320</dim>
<dim>8</dim>
<dim>8</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>320</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1656,input.565">
<dim>1</dim>
<dim>320</dim>
<dim>8</dim>
<dim>8</dim>
</port>
</output>
</layer>
<layer id="823" name="Multiply_278356_compressed" type="Const" version="opset1">
<data element_type="f16" shape="1920, 320, 1, 1" offset="9023696" size="1228800" />
<output>
<port id="0" precision="FP16">
<dim>1920</dim>
<dim>320</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="824" name="Multiply_278356" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>1920</dim>
<dim>320</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1920</dim>
<dim>320</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="825" name="Multiply_278088" type="Convolution" version="opset1">
<data strides="1, 1" dilations="1, 1" pads_begin="0, 0" pads_end="0, 0" auto_pad="explicit" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>320</dim>
<dim>8</dim>
<dim>8</dim>
</port>
<port id="1" precision="FP32">
<dim>1920</dim>
<dim>320</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1</dim>
<dim>1920</dim>
<dim>8</dim>
<dim>8</dim>
</port>
</output>
</layer>
<layer id="826" name="Constant_278093_compressed" type="Const" version="opset1">
<data element_type="f16" shape="1, 1920, 1, 1" offset="10252496" size="3840" />
<output>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>1920</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="827" name="Constant_278093" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>1920</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1920</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="828" name="__module.features.7.1.block.0.1/aten::batch_norm/BatchNormInference" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1920</dim>
<dim>8</dim>
<dim>8</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1920</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1676_1">
<dim>1</dim>
<dim>1920</dim>
<dim>8</dim>
<dim>8</dim>
</port>
</output>
</layer>
<layer id="829" name="__module.features.7.1.block.0.2/aten::silu_/Swish" type="Swish" version="opset4">
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1920</dim>
<dim>8</dim>
<dim>8</dim>
</port>
</input>
<output>
<port id="1" precision="FP32" names="1676,input.569">
<dim>1</dim>
<dim>1920</dim>
<dim>8</dim>
<dim>8</dim>
</port>
</output>
</layer>
<layer id="830" name="Multiply_278360_compressed" type="Const" version="opset1">
<data element_type="f16" shape="1920, 1, 1, 3, 3" offset="10256336" size="34560" />
<output>
<port id="0" precision="FP16">
<dim>1920</dim>
<dim>1</dim>
<dim>1</dim>
<dim>3</dim>
<dim>3</dim>
</port>
</output>
</layer>
<layer id="831" name="Multiply_278360" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>1920</dim>
<dim>1</dim>
<dim>1</dim>
<dim>3</dim>
<dim>3</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1920</dim>
<dim>1</dim>
<dim>1</dim>
<dim>3</dim>
<dim>3</dim>
</port>
</output>
</layer>
<layer id="832" name="Multiply_278095" type="GroupConvolution" version="opset1">
<data strides="1, 1" pads_begin="1, 1" pads_end="1, 1" dilations="1, 1" auto_pad="explicit" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1920</dim>
<dim>8</dim>
<dim>8</dim>
</port>
<port id="1" precision="FP32">
<dim>1920</dim>
<dim>1</dim>
<dim>1</dim>
<dim>3</dim>
<dim>3</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1686,input.573">
<dim>1</dim>
<dim>1920</dim>
<dim>8</dim>
<dim>8</dim>
</port>
</output>
</layer>
<layer id="833" name="Constant_278100_compressed" type="Const" version="opset1">
<data element_type="f16" shape="1, 1920, 1, 1" offset="10290896" size="3840" />
<output>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>1920</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="834" name="Constant_278100" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>1920</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1920</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="835" name="__module.features.7.1.block.1.1/aten::batch_norm/BatchNormInference" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1920</dim>
<dim>8</dim>
<dim>8</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1920</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1691_1">
<dim>1</dim>
<dim>1920</dim>
<dim>8</dim>
<dim>8</dim>
</port>
</output>
</layer>
<layer id="836" name="__module.features.7.1.block.1.2/aten::silu_/Swish" type="Swish" version="opset4">
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1920</dim>
<dim>8</dim>
<dim>8</dim>
</port>
</input>
<output>
<port id="1" precision="FP32" names="1691,input.575">
<dim>1</dim>
<dim>1920</dim>
<dim>8</dim>
<dim>8</dim>
</port>
</output>
</layer>
<layer id="837" name="Constant_272864" type="Const" version="opset1">
<data element_type="i64" shape="2" offset="2432" size="16" />
<output>
<port id="0" precision="I64">
<dim>2</dim>
</port>
</output>
</layer>
<layer id="838" name="__module.features.7.1.block.2.avgpool/aten::adaptive_avg_pool2d/AdaptiveAvgPool" type="ReduceMean" version="opset1">
<data keep_dims="true" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1920</dim>
<dim>8</dim>
<dim>8</dim>
</port>
<port id="1" precision="I64">
<dim>2</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1699,input.579">
<dim>1</dim>
<dim>1920</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="839" name="self.features.7.1.block.2.fc1.weight_compressed" type="Const" version="opset1">
<data element_type="f16" shape="80, 1920, 1, 1" offset="10294736" size="307200" />
<output>
<port id="0" precision="FP16" names="self.features.7.1.block.2.fc1.weight">
<dim>80</dim>
<dim>1920</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="840" name="self.features.7.1.block.2.fc1.weight" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>80</dim>
<dim>1920</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>80</dim>
<dim>1920</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="841" name="__module.features.7.1.block.2.fc1/aten::_convolution/Convolution" type="Convolution" version="opset1">
<data strides="1, 1" dilations="1, 1" pads_begin="0, 0" pads_end="0, 0" auto_pad="explicit" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1920</dim>
<dim>1</dim>
<dim>1</dim>
</port>
<port id="1" precision="FP32">
<dim>80</dim>
<dim>1920</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1</dim>
<dim>80</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="842" name="__module.features.7.1.block.2.fc1/aten::_convolution/Reshape_compressed" type="Const" version="opset1">
<data element_type="f16" shape="1, 80, 1, 1" offset="10601936" size="160" />
<output>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>80</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="843" name="__module.features.7.1.block.2.fc1/aten::_convolution/Reshape" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>80</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>80</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="844" name="__module.features.7.1.block.2.fc1/aten::_convolution/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>80</dim>
<dim>1</dim>
<dim>1</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>80</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1706_1">
<dim>1</dim>
<dim>80</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="845" name="__module.features.7.1.block.2.activation/aten::silu_/Swish" type="Swish" version="opset4">
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>80</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32" names="1706,input.581">
<dim>1</dim>
<dim>80</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="846" name="self.features.7.1.block.2.fc2.weight_compressed" type="Const" version="opset1">
<data element_type="f16" shape="1920, 80, 1, 1" offset="10602096" size="307200" />
<output>
<port id="0" precision="FP16" names="self.features.7.1.block.2.fc2.weight">
<dim>1920</dim>
<dim>80</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="847" name="self.features.7.1.block.2.fc2.weight" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>1920</dim>
<dim>80</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1920</dim>
<dim>80</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="848" name="__module.features.7.1.block.2.fc2/aten::_convolution/Convolution" type="Convolution" version="opset1">
<data strides="1, 1" dilations="1, 1" pads_begin="0, 0" pads_end="0, 0" auto_pad="explicit" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>80</dim>
<dim>1</dim>
<dim>1</dim>
</port>
<port id="1" precision="FP32">
<dim>1920</dim>
<dim>80</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1</dim>
<dim>1920</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="849" name="__module.features.7.1.block.2.fc2/aten::_convolution/Reshape_compressed" type="Const" version="opset1">
<data element_type="f16" shape="1, 1920, 1, 1" offset="10909296" size="3840" />
<output>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>1920</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="850" name="__module.features.7.1.block.2.fc2/aten::_convolution/Reshape" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>1920</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1920</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="851" name="__module.features.7.1.block.2.fc2/aten::_convolution/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1920</dim>
<dim>1</dim>
<dim>1</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1920</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1714,input.585">
<dim>1</dim>
<dim>1920</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="852" name="__module.features.7.1.block.2.scale_activation/aten::sigmoid/Sigmoid" type="Sigmoid" version="opset1">
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1920</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32" names="1715,scale">
<dim>1</dim>
<dim>1920</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="853" name="__module.features.7.1.block.2/aten::mul/Multiply" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1920</dim>
<dim>1</dim>
<dim>1</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1920</dim>
<dim>8</dim>
<dim>8</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1716,input.587">
<dim>1</dim>
<dim>1920</dim>
<dim>8</dim>
<dim>8</dim>
</port>
</output>
</layer>
<layer id="854" name="Multiply_278363_compressed" type="Const" version="opset1">
<data element_type="f16" shape="320, 1920, 1, 1" offset="10913136" size="1228800" />
<output>
<port id="0" precision="FP16">
<dim>320</dim>
<dim>1920</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="855" name="Multiply_278363" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>320</dim>
<dim>1920</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>320</dim>
<dim>1920</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="856" name="Multiply_278102" type="Convolution" version="opset1">
<data strides="1, 1" dilations="1, 1" pads_begin="0, 0" pads_end="0, 0" auto_pad="explicit" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1920</dim>
<dim>8</dim>
<dim>8</dim>
</port>
<port id="1" precision="FP32">
<dim>320</dim>
<dim>1920</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1</dim>
<dim>320</dim>
<dim>8</dim>
<dim>8</dim>
</port>
</output>
</layer>
<layer id="857" name="Constant_278107_compressed" type="Const" version="opset1">
<data element_type="f16" shape="1, 320, 1, 1" offset="12141936" size="640" />
<output>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>320</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="858" name="Constant_278107" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>320</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>320</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="859" name="__module.features.7.1.block.3.1/aten::batch_norm/BatchNormInference" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>320</dim>
<dim>8</dim>
<dim>8</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>320</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1729_1">
<dim>1</dim>
<dim>320</dim>
<dim>8</dim>
<dim>8</dim>
</port>
</output>
</layer>
<layer id="860" name="__module.features.7.1/aten::add_/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>320</dim>
<dim>8</dim>
<dim>8</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>320</dim>
<dim>8</dim>
<dim>8</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1729,result">
<dim>1</dim>
<dim>320</dim>
<dim>8</dim>
<dim>8</dim>
</port>
</output>
</layer>
<layer id="861" name="Multiply_278367_compressed" type="Const" version="opset1">
<data element_type="f16" shape="1280, 320, 1, 1" offset="12142576" size="819200" />
<output>
<port id="0" precision="FP16">
<dim>1280</dim>
<dim>320</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="862" name="Multiply_278367" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>1280</dim>
<dim>320</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1280</dim>
<dim>320</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="863" name="Multiply_278109" type="Convolution" version="opset1">
<data strides="1, 1" dilations="1, 1" pads_begin="0, 0" pads_end="0, 0" auto_pad="explicit" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>320</dim>
<dim>8</dim>
<dim>8</dim>
</port>
<port id="1" precision="FP32">
<dim>1280</dim>
<dim>320</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1</dim>
<dim>1280</dim>
<dim>8</dim>
<dim>8</dim>
</port>
</output>
</layer>
<layer id="864" name="Constant_278114_compressed" type="Const" version="opset1">
<data element_type="f16" shape="1, 1280, 1, 1" offset="12961776" size="2560" />
<output>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>1280</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="865" name="Constant_278114" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>1280</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1280</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="866" name="__module.features.8.1/aten::batch_norm/BatchNormInference" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1280</dim>
<dim>8</dim>
<dim>8</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1280</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1744_1">
<dim>1</dim>
<dim>1280</dim>
<dim>8</dim>
<dim>8</dim>
</port>
</output>
</layer>
<layer id="867" name="__module.features.8.2/aten::silu_/Swish" type="Swish" version="opset4">
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1280</dim>
<dim>8</dim>
<dim>8</dim>
</port>
</input>
<output>
<port id="1" precision="FP32" names="1744,input.595">
<dim>1</dim>
<dim>1280</dim>
<dim>8</dim>
<dim>8</dim>
</port>
</output>
</layer>
<layer id="868" name="Constant_272874" type="Const" version="opset1">
<data element_type="i64" shape="2" offset="2432" size="16" />
<output>
<port id="0" precision="I64">
<dim>2</dim>
</port>
</output>
</layer>
<layer id="869" name="__module.avgpool/aten::adaptive_avg_pool2d/AdaptiveAvgPool" type="ReduceMean" version="opset1">
<data keep_dims="true" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1280</dim>
<dim>8</dim>
<dim>8</dim>
</port>
<port id="1" precision="I64">
<dim>2</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1748,x_1">
<dim>1</dim>
<dim>1280</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="870" name="Constant_279180" type="Const" version="opset1">
<data element_type="i64" shape="1" offset="12964336" size="8" />
<rt_info>
<attribute name="precise" version="0" />
</rt_info>
<output>
<port id="0" precision="I64">
<dim>1</dim>
</port>
</output>
</layer>
<layer id="871" name="ShapeOf_279176" type="Const" version="opset1">
<data element_type="i64" shape="2" offset="12964344" size="16" />
<rt_info>
<attribute name="precise" version="0" />
</rt_info>
<output>
<port id="0" precision="I64">
<dim>2</dim>
</port>
</output>
</layer>
<layer id="872" name="Constant_279177" type="Const" version="opset1">
<data element_type="i64" shape="1" offset="12964336" size="8" />
<rt_info>
<attribute name="precise" version="0" />
</rt_info>
<output>
<port id="0" precision="I64">
<dim>1</dim>
</port>
</output>
</layer>
<layer id="873" name="Constant_279178" type="Const" version="opset1">
<data element_type="i64" shape="" offset="12964360" size="8" />
<rt_info>
<attribute name="precise" version="0" />
</rt_info>
<output>
<port id="0" precision="I64" />
</output>
</layer>
<layer id="874" name="Gather_279179" type="Gather" version="opset8">
<data batch_dims="0" />
<input>
<port id="0" precision="I64">
<dim>2</dim>
</port>
<port id="1" precision="I64">
<dim>1</dim>
</port>
<port id="2" precision="I64" />
</input>
<output>
<port id="3" precision="I64">
<dim>1</dim>
</port>
</output>
</layer>
<layer id="875" name="Concat_279181" type="Concat" version="opset1">
<data axis="0" />
<input>
<port id="0" precision="I64">
<dim>1</dim>
</port>
<port id="1" precision="I64">
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="I64">
<dim>2</dim>
</port>
</output>
</layer>
<layer id="876" name="aten::flatten/Reshape" type="Reshape" version="opset1">
<data special_zero="true" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1280</dim>
<dim>1</dim>
<dim>1</dim>
</port>
<port id="1" precision="I64">
<dim>2</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="9,input.599">
<dim>1</dim>
<dim>1280</dim>
</port>
</output>
</layer>
<layer id="877" name="self.classifier.1.weight_compressed" type="Const" version="opset1">
<data element_type="f16" shape="1000, 1280" offset="12964368" size="2560000" />
<output>
<port id="0" precision="FP16" names="self.classifier.1.weight">
<dim>1000</dim>
<dim>1280</dim>
</port>
</output>
</layer>
<layer id="878" name="self.classifier.1.weight" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>1000</dim>
<dim>1280</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1000</dim>
<dim>1280</dim>
</port>
</output>
</layer>
<layer id="879" name="__module.classifier.1/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1280</dim>
</port>
<port id="1" precision="FP32">
<dim>1000</dim>
<dim>1280</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1</dim>
<dim>1000</dim>
</port>
</output>
</layer>
<layer id="880" name="Constant_278705_compressed" type="Const" version="opset1">
<data element_type="f16" shape="1, 1000" offset="15524368" size="2000" />
<output>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>1000</dim>
</port>
</output>
</layer>
<layer id="881" name="Constant_278705" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>1000</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1000</dim>
</port>
</output>
</layer>
<layer id="882" name="__module.classifier.1/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1000</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1000</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="logits">
<dim>1</dim>
<dim>1000</dim>
</port>
</output>
</layer>
<layer id="883" name="Result_264831" type="Result" version="opset1" output_names="logits">
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1000</dim>
</port>
</input>
</layer>
</layers>
<edges>
<edge from-layer="0" from-port="0" to-layer="3" to-port="0" />
<edge from-layer="1" from-port="0" to-layer="2" to-port="0" />
<edge from-layer="2" from-port="1" to-layer="3" to-port="1" />
<edge from-layer="3" from-port="2" to-layer="6" to-port="0" />
<edge from-layer="4" from-port="0" to-layer="5" to-port="0" />
<edge from-layer="5" from-port="1" to-layer="6" to-port="1" />
<edge from-layer="6" from-port="2" to-layer="7" to-port="0" />
<edge from-layer="7" from-port="1" to-layer="10" to-port="0" />
<edge from-layer="8" from-port="0" to-layer="9" to-port="0" />
<edge from-layer="9" from-port="1" to-layer="10" to-port="1" />
<edge from-layer="10" from-port="2" to-layer="13" to-port="0" />
<edge from-layer="11" from-port="0" to-layer="12" to-port="0" />
<edge from-layer="12" from-port="1" to-layer="13" to-port="1" />
<edge from-layer="13" from-port="2" to-layer="14" to-port="0" />
<edge from-layer="14" from-port="1" to-layer="16" to-port="0" />
<edge from-layer="14" from-port="1" to-layer="31" to-port="1" />
<edge from-layer="15" from-port="0" to-layer="16" to-port="1" />
<edge from-layer="16" from-port="2" to-layer="19" to-port="0" />
<edge from-layer="17" from-port="0" to-layer="18" to-port="0" />
<edge from-layer="18" from-port="1" to-layer="19" to-port="1" />
<edge from-layer="19" from-port="2" to-layer="22" to-port="0" />
<edge from-layer="20" from-port="0" to-layer="21" to-port="0" />
<edge from-layer="21" from-port="1" to-layer="22" to-port="1" />
<edge from-layer="22" from-port="2" to-layer="23" to-port="0" />
<edge from-layer="23" from-port="1" to-layer="26" to-port="0" />
<edge from-layer="24" from-port="0" to-layer="25" to-port="0" />
<edge from-layer="25" from-port="1" to-layer="26" to-port="1" />
<edge from-layer="26" from-port="2" to-layer="29" to-port="0" />
<edge from-layer="27" from-port="0" to-layer="28" to-port="0" />
<edge from-layer="28" from-port="1" to-layer="29" to-port="1" />
<edge from-layer="29" from-port="2" to-layer="30" to-port="0" />
<edge from-layer="30" from-port="1" to-layer="31" to-port="0" />
<edge from-layer="31" from-port="2" to-layer="34" to-port="0" />
<edge from-layer="32" from-port="0" to-layer="33" to-port="0" />
<edge from-layer="33" from-port="1" to-layer="34" to-port="1" />
<edge from-layer="34" from-port="2" to-layer="37" to-port="0" />
<edge from-layer="35" from-port="0" to-layer="36" to-port="0" />
<edge from-layer="36" from-port="1" to-layer="37" to-port="1" />
<edge from-layer="37" from-port="2" to-layer="68" to-port="1" />
<edge from-layer="37" from-port="2" to-layer="40" to-port="0" />
<edge from-layer="38" from-port="0" to-layer="39" to-port="0" />
<edge from-layer="39" from-port="1" to-layer="40" to-port="1" />
<edge from-layer="40" from-port="2" to-layer="43" to-port="0" />
<edge from-layer="41" from-port="0" to-layer="42" to-port="0" />
<edge from-layer="42" from-port="1" to-layer="43" to-port="1" />
<edge from-layer="43" from-port="2" to-layer="44" to-port="0" />
<edge from-layer="44" from-port="1" to-layer="46" to-port="0" />
<edge from-layer="44" from-port="1" to-layer="61" to-port="1" />
<edge from-layer="45" from-port="0" to-layer="46" to-port="1" />
<edge from-layer="46" from-port="2" to-layer="49" to-port="0" />
<edge from-layer="47" from-port="0" to-layer="48" to-port="0" />
<edge from-layer="48" from-port="1" to-layer="49" to-port="1" />
<edge from-layer="49" from-port="2" to-layer="52" to-port="0" />
<edge from-layer="50" from-port="0" to-layer="51" to-port="0" />
<edge from-layer="51" from-port="1" to-layer="52" to-port="1" />
<edge from-layer="52" from-port="2" to-layer="53" to-port="0" />
<edge from-layer="53" from-port="1" to-layer="56" to-port="0" />
<edge from-layer="54" from-port="0" to-layer="55" to-port="0" />
<edge from-layer="55" from-port="1" to-layer="56" to-port="1" />
<edge from-layer="56" from-port="2" to-layer="59" to-port="0" />
<edge from-layer="57" from-port="0" to-layer="58" to-port="0" />
<edge from-layer="58" from-port="1" to-layer="59" to-port="1" />
<edge from-layer="59" from-port="2" to-layer="60" to-port="0" />
<edge from-layer="60" from-port="1" to-layer="61" to-port="0" />
<edge from-layer="61" from-port="2" to-layer="64" to-port="0" />
<edge from-layer="62" from-port="0" to-layer="63" to-port="0" />
<edge from-layer="63" from-port="1" to-layer="64" to-port="1" />
<edge from-layer="64" from-port="2" to-layer="67" to-port="0" />
<edge from-layer="65" from-port="0" to-layer="66" to-port="0" />
<edge from-layer="66" from-port="1" to-layer="67" to-port="1" />
<edge from-layer="67" from-port="2" to-layer="68" to-port="0" />
<edge from-layer="68" from-port="2" to-layer="71" to-port="0" />
<edge from-layer="69" from-port="0" to-layer="70" to-port="0" />
<edge from-layer="70" from-port="1" to-layer="71" to-port="1" />
<edge from-layer="71" from-port="2" to-layer="74" to-port="0" />
<edge from-layer="72" from-port="0" to-layer="73" to-port="0" />
<edge from-layer="73" from-port="1" to-layer="74" to-port="1" />
<edge from-layer="74" from-port="2" to-layer="75" to-port="0" />
<edge from-layer="75" from-port="1" to-layer="78" to-port="0" />
<edge from-layer="76" from-port="0" to-layer="77" to-port="0" />
<edge from-layer="77" from-port="1" to-layer="78" to-port="1" />
<edge from-layer="78" from-port="2" to-layer="81" to-port="0" />
<edge from-layer="79" from-port="0" to-layer="80" to-port="0" />
<edge from-layer="80" from-port="1" to-layer="81" to-port="1" />
<edge from-layer="81" from-port="2" to-layer="82" to-port="0" />
<edge from-layer="82" from-port="1" to-layer="84" to-port="0" />
<edge from-layer="82" from-port="1" to-layer="99" to-port="1" />
<edge from-layer="83" from-port="0" to-layer="84" to-port="1" />
<edge from-layer="84" from-port="2" to-layer="87" to-port="0" />
<edge from-layer="85" from-port="0" to-layer="86" to-port="0" />
<edge from-layer="86" from-port="1" to-layer="87" to-port="1" />
<edge from-layer="87" from-port="2" to-layer="90" to-port="0" />
<edge from-layer="88" from-port="0" to-layer="89" to-port="0" />
<edge from-layer="89" from-port="1" to-layer="90" to-port="1" />
<edge from-layer="90" from-port="2" to-layer="91" to-port="0" />
<edge from-layer="91" from-port="1" to-layer="94" to-port="0" />
<edge from-layer="92" from-port="0" to-layer="93" to-port="0" />
<edge from-layer="93" from-port="1" to-layer="94" to-port="1" />
<edge from-layer="94" from-port="2" to-layer="97" to-port="0" />
<edge from-layer="95" from-port="0" to-layer="96" to-port="0" />
<edge from-layer="96" from-port="1" to-layer="97" to-port="1" />
<edge from-layer="97" from-port="2" to-layer="98" to-port="0" />
<edge from-layer="98" from-port="1" to-layer="99" to-port="0" />
<edge from-layer="99" from-port="2" to-layer="102" to-port="0" />
<edge from-layer="100" from-port="0" to-layer="101" to-port="0" />
<edge from-layer="101" from-port="1" to-layer="102" to-port="1" />
<edge from-layer="102" from-port="2" to-layer="105" to-port="0" />
<edge from-layer="103" from-port="0" to-layer="104" to-port="0" />
<edge from-layer="104" from-port="1" to-layer="105" to-port="1" />
<edge from-layer="105" from-port="2" to-layer="108" to-port="0" />
<edge from-layer="105" from-port="2" to-layer="143" to-port="1" />
<edge from-layer="106" from-port="0" to-layer="107" to-port="0" />
<edge from-layer="107" from-port="1" to-layer="108" to-port="1" />
<edge from-layer="108" from-port="2" to-layer="111" to-port="0" />
<edge from-layer="109" from-port="0" to-layer="110" to-port="0" />
<edge from-layer="110" from-port="1" to-layer="111" to-port="1" />
<edge from-layer="111" from-port="2" to-layer="112" to-port="0" />
<edge from-layer="112" from-port="1" to-layer="115" to-port="0" />
<edge from-layer="113" from-port="0" to-layer="114" to-port="0" />
<edge from-layer="114" from-port="1" to-layer="115" to-port="1" />
<edge from-layer="115" from-port="2" to-layer="118" to-port="0" />
<edge from-layer="116" from-port="0" to-layer="117" to-port="0" />
<edge from-layer="117" from-port="1" to-layer="118" to-port="1" />
<edge from-layer="118" from-port="2" to-layer="119" to-port="0" />
<edge from-layer="119" from-port="1" to-layer="136" to-port="1" />
<edge from-layer="119" from-port="1" to-layer="121" to-port="0" />
<edge from-layer="120" from-port="0" to-layer="121" to-port="1" />
<edge from-layer="121" from-port="2" to-layer="124" to-port="0" />
<edge from-layer="122" from-port="0" to-layer="123" to-port="0" />
<edge from-layer="123" from-port="1" to-layer="124" to-port="1" />
<edge from-layer="124" from-port="2" to-layer="127" to-port="0" />
<edge from-layer="125" from-port="0" to-layer="126" to-port="0" />
<edge from-layer="126" from-port="1" to-layer="127" to-port="1" />
<edge from-layer="127" from-port="2" to-layer="128" to-port="0" />
<edge from-layer="128" from-port="1" to-layer="131" to-port="0" />
<edge from-layer="129" from-port="0" to-layer="130" to-port="0" />
<edge from-layer="130" from-port="1" to-layer="131" to-port="1" />
<edge from-layer="131" from-port="2" to-layer="134" to-port="0" />
<edge from-layer="132" from-port="0" to-layer="133" to-port="0" />
<edge from-layer="133" from-port="1" to-layer="134" to-port="1" />
<edge from-layer="134" from-port="2" to-layer="135" to-port="0" />
<edge from-layer="135" from-port="1" to-layer="136" to-port="0" />
<edge from-layer="136" from-port="2" to-layer="139" to-port="0" />
<edge from-layer="137" from-port="0" to-layer="138" to-port="0" />
<edge from-layer="138" from-port="1" to-layer="139" to-port="1" />
<edge from-layer="139" from-port="2" to-layer="142" to-port="0" />
<edge from-layer="140" from-port="0" to-layer="141" to-port="0" />
<edge from-layer="141" from-port="1" to-layer="142" to-port="1" />
<edge from-layer="142" from-port="2" to-layer="143" to-port="0" />
<edge from-layer="143" from-port="2" to-layer="181" to-port="1" />
<edge from-layer="143" from-port="2" to-layer="146" to-port="0" />
<edge from-layer="144" from-port="0" to-layer="145" to-port="0" />
<edge from-layer="145" from-port="1" to-layer="146" to-port="1" />
<edge from-layer="146" from-port="2" to-layer="149" to-port="0" />
<edge from-layer="147" from-port="0" to-layer="148" to-port="0" />
<edge from-layer="148" from-port="1" to-layer="149" to-port="1" />
<edge from-layer="149" from-port="2" to-layer="150" to-port="0" />
<edge from-layer="150" from-port="1" to-layer="153" to-port="0" />
<edge from-layer="151" from-port="0" to-layer="152" to-port="0" />
<edge from-layer="152" from-port="1" to-layer="153" to-port="1" />
<edge from-layer="153" from-port="2" to-layer="156" to-port="0" />
<edge from-layer="154" from-port="0" to-layer="155" to-port="0" />
<edge from-layer="155" from-port="1" to-layer="156" to-port="1" />
<edge from-layer="156" from-port="2" to-layer="157" to-port="0" />
<edge from-layer="157" from-port="1" to-layer="174" to-port="1" />
<edge from-layer="157" from-port="1" to-layer="159" to-port="0" />
<edge from-layer="158" from-port="0" to-layer="159" to-port="1" />
<edge from-layer="159" from-port="2" to-layer="162" to-port="0" />
<edge from-layer="160" from-port="0" to-layer="161" to-port="0" />
<edge from-layer="161" from-port="1" to-layer="162" to-port="1" />
<edge from-layer="162" from-port="2" to-layer="165" to-port="0" />
<edge from-layer="163" from-port="0" to-layer="164" to-port="0" />
<edge from-layer="164" from-port="1" to-layer="165" to-port="1" />
<edge from-layer="165" from-port="2" to-layer="166" to-port="0" />
<edge from-layer="166" from-port="1" to-layer="169" to-port="0" />
<edge from-layer="167" from-port="0" to-layer="168" to-port="0" />
<edge from-layer="168" from-port="1" to-layer="169" to-port="1" />
<edge from-layer="169" from-port="2" to-layer="172" to-port="0" />
<edge from-layer="170" from-port="0" to-layer="171" to-port="0" />
<edge from-layer="171" from-port="1" to-layer="172" to-port="1" />
<edge from-layer="172" from-port="2" to-layer="173" to-port="0" />
<edge from-layer="173" from-port="1" to-layer="174" to-port="0" />
<edge from-layer="174" from-port="2" to-layer="177" to-port="0" />
<edge from-layer="175" from-port="0" to-layer="176" to-port="0" />
<edge from-layer="176" from-port="1" to-layer="177" to-port="1" />
<edge from-layer="177" from-port="2" to-layer="180" to-port="0" />
<edge from-layer="178" from-port="0" to-layer="179" to-port="0" />
<edge from-layer="179" from-port="1" to-layer="180" to-port="1" />
<edge from-layer="180" from-port="2" to-layer="181" to-port="0" />
<edge from-layer="181" from-port="2" to-layer="184" to-port="0" />
<edge from-layer="182" from-port="0" to-layer="183" to-port="0" />
<edge from-layer="183" from-port="1" to-layer="184" to-port="1" />
<edge from-layer="184" from-port="2" to-layer="187" to-port="0" />
<edge from-layer="185" from-port="0" to-layer="186" to-port="0" />
<edge from-layer="186" from-port="1" to-layer="187" to-port="1" />
<edge from-layer="187" from-port="2" to-layer="188" to-port="0" />
<edge from-layer="188" from-port="1" to-layer="191" to-port="0" />
<edge from-layer="189" from-port="0" to-layer="190" to-port="0" />
<edge from-layer="190" from-port="1" to-layer="191" to-port="1" />
<edge from-layer="191" from-port="2" to-layer="194" to-port="0" />
<edge from-layer="192" from-port="0" to-layer="193" to-port="0" />
<edge from-layer="193" from-port="1" to-layer="194" to-port="1" />
<edge from-layer="194" from-port="2" to-layer="195" to-port="0" />
<edge from-layer="195" from-port="1" to-layer="197" to-port="0" />
<edge from-layer="195" from-port="1" to-layer="212" to-port="1" />
<edge from-layer="196" from-port="0" to-layer="197" to-port="1" />
<edge from-layer="197" from-port="2" to-layer="200" to-port="0" />
<edge from-layer="198" from-port="0" to-layer="199" to-port="0" />
<edge from-layer="199" from-port="1" to-layer="200" to-port="1" />
<edge from-layer="200" from-port="2" to-layer="203" to-port="0" />
<edge from-layer="201" from-port="0" to-layer="202" to-port="0" />
<edge from-layer="202" from-port="1" to-layer="203" to-port="1" />
<edge from-layer="203" from-port="2" to-layer="204" to-port="0" />
<edge from-layer="204" from-port="1" to-layer="207" to-port="0" />
<edge from-layer="205" from-port="0" to-layer="206" to-port="0" />
<edge from-layer="206" from-port="1" to-layer="207" to-port="1" />
<edge from-layer="207" from-port="2" to-layer="210" to-port="0" />
<edge from-layer="208" from-port="0" to-layer="209" to-port="0" />
<edge from-layer="209" from-port="1" to-layer="210" to-port="1" />
<edge from-layer="210" from-port="2" to-layer="211" to-port="0" />
<edge from-layer="211" from-port="1" to-layer="212" to-port="0" />
<edge from-layer="212" from-port="2" to-layer="215" to-port="0" />
<edge from-layer="213" from-port="0" to-layer="214" to-port="0" />
<edge from-layer="214" from-port="1" to-layer="215" to-port="1" />
<edge from-layer="215" from-port="2" to-layer="218" to-port="0" />
<edge from-layer="216" from-port="0" to-layer="217" to-port="0" />
<edge from-layer="217" from-port="1" to-layer="218" to-port="1" />
<edge from-layer="218" from-port="2" to-layer="221" to-port="0" />
<edge from-layer="218" from-port="2" to-layer="256" to-port="1" />
<edge from-layer="219" from-port="0" to-layer="220" to-port="0" />
<edge from-layer="220" from-port="1" to-layer="221" to-port="1" />
<edge from-layer="221" from-port="2" to-layer="224" to-port="0" />
<edge from-layer="222" from-port="0" to-layer="223" to-port="0" />
<edge from-layer="223" from-port="1" to-layer="224" to-port="1" />
<edge from-layer="224" from-port="2" to-layer="225" to-port="0" />
<edge from-layer="225" from-port="1" to-layer="228" to-port="0" />
<edge from-layer="226" from-port="0" to-layer="227" to-port="0" />
<edge from-layer="227" from-port="1" to-layer="228" to-port="1" />
<edge from-layer="228" from-port="2" to-layer="231" to-port="0" />
<edge from-layer="229" from-port="0" to-layer="230" to-port="0" />
<edge from-layer="230" from-port="1" to-layer="231" to-port="1" />
<edge from-layer="231" from-port="2" to-layer="232" to-port="0" />
<edge from-layer="232" from-port="1" to-layer="249" to-port="1" />
<edge from-layer="232" from-port="1" to-layer="234" to-port="0" />
<edge from-layer="233" from-port="0" to-layer="234" to-port="1" />
<edge from-layer="234" from-port="2" to-layer="237" to-port="0" />
<edge from-layer="235" from-port="0" to-layer="236" to-port="0" />
<edge from-layer="236" from-port="1" to-layer="237" to-port="1" />
<edge from-layer="237" from-port="2" to-layer="240" to-port="0" />
<edge from-layer="238" from-port="0" to-layer="239" to-port="0" />
<edge from-layer="239" from-port="1" to-layer="240" to-port="1" />
<edge from-layer="240" from-port="2" to-layer="241" to-port="0" />
<edge from-layer="241" from-port="1" to-layer="244" to-port="0" />
<edge from-layer="242" from-port="0" to-layer="243" to-port="0" />
<edge from-layer="243" from-port="1" to-layer="244" to-port="1" />
<edge from-layer="244" from-port="2" to-layer="247" to-port="0" />
<edge from-layer="245" from-port="0" to-layer="246" to-port="0" />
<edge from-layer="246" from-port="1" to-layer="247" to-port="1" />
<edge from-layer="247" from-port="2" to-layer="248" to-port="0" />
<edge from-layer="248" from-port="1" to-layer="249" to-port="0" />
<edge from-layer="249" from-port="2" to-layer="252" to-port="0" />
<edge from-layer="250" from-port="0" to-layer="251" to-port="0" />
<edge from-layer="251" from-port="1" to-layer="252" to-port="1" />
<edge from-layer="252" from-port="2" to-layer="255" to-port="0" />
<edge from-layer="253" from-port="0" to-layer="254" to-port="0" />
<edge from-layer="254" from-port="1" to-layer="255" to-port="1" />
<edge from-layer="255" from-port="2" to-layer="256" to-port="0" />
<edge from-layer="256" from-port="2" to-layer="259" to-port="0" />
<edge from-layer="256" from-port="2" to-layer="294" to-port="1" />
<edge from-layer="257" from-port="0" to-layer="258" to-port="0" />
<edge from-layer="258" from-port="1" to-layer="259" to-port="1" />
<edge from-layer="259" from-port="2" to-layer="262" to-port="0" />
<edge from-layer="260" from-port="0" to-layer="261" to-port="0" />
<edge from-layer="261" from-port="1" to-layer="262" to-port="1" />
<edge from-layer="262" from-port="2" to-layer="263" to-port="0" />
<edge from-layer="263" from-port="1" to-layer="266" to-port="0" />
<edge from-layer="264" from-port="0" to-layer="265" to-port="0" />
<edge from-layer="265" from-port="1" to-layer="266" to-port="1" />
<edge from-layer="266" from-port="2" to-layer="269" to-port="0" />
<edge from-layer="267" from-port="0" to-layer="268" to-port="0" />
<edge from-layer="268" from-port="1" to-layer="269" to-port="1" />
<edge from-layer="269" from-port="2" to-layer="270" to-port="0" />
<edge from-layer="270" from-port="1" to-layer="272" to-port="0" />
<edge from-layer="270" from-port="1" to-layer="287" to-port="1" />
<edge from-layer="271" from-port="0" to-layer="272" to-port="1" />
<edge from-layer="272" from-port="2" to-layer="275" to-port="0" />
<edge from-layer="273" from-port="0" to-layer="274" to-port="0" />
<edge from-layer="274" from-port="1" to-layer="275" to-port="1" />
<edge from-layer="275" from-port="2" to-layer="278" to-port="0" />
<edge from-layer="276" from-port="0" to-layer="277" to-port="0" />
<edge from-layer="277" from-port="1" to-layer="278" to-port="1" />
<edge from-layer="278" from-port="2" to-layer="279" to-port="0" />
<edge from-layer="279" from-port="1" to-layer="282" to-port="0" />
<edge from-layer="280" from-port="0" to-layer="281" to-port="0" />
<edge from-layer="281" from-port="1" to-layer="282" to-port="1" />
<edge from-layer="282" from-port="2" to-layer="285" to-port="0" />
<edge from-layer="283" from-port="0" to-layer="284" to-port="0" />
<edge from-layer="284" from-port="1" to-layer="285" to-port="1" />
<edge from-layer="285" from-port="2" to-layer="286" to-port="0" />
<edge from-layer="286" from-port="1" to-layer="287" to-port="0" />
<edge from-layer="287" from-port="2" to-layer="290" to-port="0" />
<edge from-layer="288" from-port="0" to-layer="289" to-port="0" />
<edge from-layer="289" from-port="1" to-layer="290" to-port="1" />
<edge from-layer="290" from-port="2" to-layer="293" to-port="0" />
<edge from-layer="291" from-port="0" to-layer="292" to-port="0" />
<edge from-layer="292" from-port="1" to-layer="293" to-port="1" />
<edge from-layer="293" from-port="2" to-layer="294" to-port="0" />
<edge from-layer="294" from-port="2" to-layer="297" to-port="0" />
<edge from-layer="295" from-port="0" to-layer="296" to-port="0" />
<edge from-layer="296" from-port="1" to-layer="297" to-port="1" />
<edge from-layer="297" from-port="2" to-layer="300" to-port="0" />
<edge from-layer="298" from-port="0" to-layer="299" to-port="0" />
<edge from-layer="299" from-port="1" to-layer="300" to-port="1" />
<edge from-layer="300" from-port="2" to-layer="301" to-port="0" />
<edge from-layer="301" from-port="1" to-layer="304" to-port="0" />
<edge from-layer="302" from-port="0" to-layer="303" to-port="0" />
<edge from-layer="303" from-port="1" to-layer="304" to-port="1" />
<edge from-layer="304" from-port="2" to-layer="307" to-port="0" />
<edge from-layer="305" from-port="0" to-layer="306" to-port="0" />
<edge from-layer="306" from-port="1" to-layer="307" to-port="1" />
<edge from-layer="307" from-port="2" to-layer="308" to-port="0" />
<edge from-layer="308" from-port="1" to-layer="310" to-port="0" />
<edge from-layer="308" from-port="1" to-layer="325" to-port="1" />
<edge from-layer="309" from-port="0" to-layer="310" to-port="1" />
<edge from-layer="310" from-port="2" to-layer="313" to-port="0" />
<edge from-layer="311" from-port="0" to-layer="312" to-port="0" />
<edge from-layer="312" from-port="1" to-layer="313" to-port="1" />
<edge from-layer="313" from-port="2" to-layer="316" to-port="0" />
<edge from-layer="314" from-port="0" to-layer="315" to-port="0" />
<edge from-layer="315" from-port="1" to-layer="316" to-port="1" />
<edge from-layer="316" from-port="2" to-layer="317" to-port="0" />
<edge from-layer="317" from-port="1" to-layer="320" to-port="0" />
<edge from-layer="318" from-port="0" to-layer="319" to-port="0" />
<edge from-layer="319" from-port="1" to-layer="320" to-port="1" />
<edge from-layer="320" from-port="2" to-layer="323" to-port="0" />
<edge from-layer="321" from-port="0" to-layer="322" to-port="0" />
<edge from-layer="322" from-port="1" to-layer="323" to-port="1" />
<edge from-layer="323" from-port="2" to-layer="324" to-port="0" />
<edge from-layer="324" from-port="1" to-layer="325" to-port="0" />
<edge from-layer="325" from-port="2" to-layer="328" to-port="0" />
<edge from-layer="326" from-port="0" to-layer="327" to-port="0" />
<edge from-layer="327" from-port="1" to-layer="328" to-port="1" />
<edge from-layer="328" from-port="2" to-layer="331" to-port="0" />
<edge from-layer="329" from-port="0" to-layer="330" to-port="0" />
<edge from-layer="330" from-port="1" to-layer="331" to-port="1" />
<edge from-layer="331" from-port="2" to-layer="334" to-port="0" />
<edge from-layer="331" from-port="2" to-layer="369" to-port="1" />
<edge from-layer="332" from-port="0" to-layer="333" to-port="0" />
<edge from-layer="333" from-port="1" to-layer="334" to-port="1" />
<edge from-layer="334" from-port="2" to-layer="337" to-port="0" />
<edge from-layer="335" from-port="0" to-layer="336" to-port="0" />
<edge from-layer="336" from-port="1" to-layer="337" to-port="1" />
<edge from-layer="337" from-port="2" to-layer="338" to-port="0" />
<edge from-layer="338" from-port="1" to-layer="341" to-port="0" />
<edge from-layer="339" from-port="0" to-layer="340" to-port="0" />
<edge from-layer="340" from-port="1" to-layer="341" to-port="1" />
<edge from-layer="341" from-port="2" to-layer="344" to-port="0" />
<edge from-layer="342" from-port="0" to-layer="343" to-port="0" />
<edge from-layer="343" from-port="1" to-layer="344" to-port="1" />
<edge from-layer="344" from-port="2" to-layer="345" to-port="0" />
<edge from-layer="345" from-port="1" to-layer="347" to-port="0" />
<edge from-layer="345" from-port="1" to-layer="362" to-port="1" />
<edge from-layer="346" from-port="0" to-layer="347" to-port="1" />
<edge from-layer="347" from-port="2" to-layer="350" to-port="0" />
<edge from-layer="348" from-port="0" to-layer="349" to-port="0" />
<edge from-layer="349" from-port="1" to-layer="350" to-port="1" />
<edge from-layer="350" from-port="2" to-layer="353" to-port="0" />
<edge from-layer="351" from-port="0" to-layer="352" to-port="0" />
<edge from-layer="352" from-port="1" to-layer="353" to-port="1" />
<edge from-layer="353" from-port="2" to-layer="354" to-port="0" />
<edge from-layer="354" from-port="1" to-layer="357" to-port="0" />
<edge from-layer="355" from-port="0" to-layer="356" to-port="0" />
<edge from-layer="356" from-port="1" to-layer="357" to-port="1" />
<edge from-layer="357" from-port="2" to-layer="360" to-port="0" />
<edge from-layer="358" from-port="0" to-layer="359" to-port="0" />
<edge from-layer="359" from-port="1" to-layer="360" to-port="1" />
<edge from-layer="360" from-port="2" to-layer="361" to-port="0" />
<edge from-layer="361" from-port="1" to-layer="362" to-port="0" />
<edge from-layer="362" from-port="2" to-layer="365" to-port="0" />
<edge from-layer="363" from-port="0" to-layer="364" to-port="0" />
<edge from-layer="364" from-port="1" to-layer="365" to-port="1" />
<edge from-layer="365" from-port="2" to-layer="368" to-port="0" />
<edge from-layer="366" from-port="0" to-layer="367" to-port="0" />
<edge from-layer="367" from-port="1" to-layer="368" to-port="1" />
<edge from-layer="368" from-port="2" to-layer="369" to-port="0" />
<edge from-layer="369" from-port="2" to-layer="372" to-port="0" />
<edge from-layer="369" from-port="2" to-layer="407" to-port="1" />
<edge from-layer="370" from-port="0" to-layer="371" to-port="0" />
<edge from-layer="371" from-port="1" to-layer="372" to-port="1" />
<edge from-layer="372" from-port="2" to-layer="375" to-port="0" />
<edge from-layer="373" from-port="0" to-layer="374" to-port="0" />
<edge from-layer="374" from-port="1" to-layer="375" to-port="1" />
<edge from-layer="375" from-port="2" to-layer="376" to-port="0" />
<edge from-layer="376" from-port="1" to-layer="379" to-port="0" />
<edge from-layer="377" from-port="0" to-layer="378" to-port="0" />
<edge from-layer="378" from-port="1" to-layer="379" to-port="1" />
<edge from-layer="379" from-port="2" to-layer="382" to-port="0" />
<edge from-layer="380" from-port="0" to-layer="381" to-port="0" />
<edge from-layer="381" from-port="1" to-layer="382" to-port="1" />
<edge from-layer="382" from-port="2" to-layer="383" to-port="0" />
<edge from-layer="383" from-port="1" to-layer="385" to-port="0" />
<edge from-layer="383" from-port="1" to-layer="400" to-port="1" />
<edge from-layer="384" from-port="0" to-layer="385" to-port="1" />
<edge from-layer="385" from-port="2" to-layer="388" to-port="0" />
<edge from-layer="386" from-port="0" to-layer="387" to-port="0" />
<edge from-layer="387" from-port="1" to-layer="388" to-port="1" />
<edge from-layer="388" from-port="2" to-layer="391" to-port="0" />
<edge from-layer="389" from-port="0" to-layer="390" to-port="0" />
<edge from-layer="390" from-port="1" to-layer="391" to-port="1" />
<edge from-layer="391" from-port="2" to-layer="392" to-port="0" />
<edge from-layer="392" from-port="1" to-layer="395" to-port="0" />
<edge from-layer="393" from-port="0" to-layer="394" to-port="0" />
<edge from-layer="394" from-port="1" to-layer="395" to-port="1" />
<edge from-layer="395" from-port="2" to-layer="398" to-port="0" />
<edge from-layer="396" from-port="0" to-layer="397" to-port="0" />
<edge from-layer="397" from-port="1" to-layer="398" to-port="1" />
<edge from-layer="398" from-port="2" to-layer="399" to-port="0" />
<edge from-layer="399" from-port="1" to-layer="400" to-port="0" />
<edge from-layer="400" from-port="2" to-layer="403" to-port="0" />
<edge from-layer="401" from-port="0" to-layer="402" to-port="0" />
<edge from-layer="402" from-port="1" to-layer="403" to-port="1" />
<edge from-layer="403" from-port="2" to-layer="406" to-port="0" />
<edge from-layer="404" from-port="0" to-layer="405" to-port="0" />
<edge from-layer="405" from-port="1" to-layer="406" to-port="1" />
<edge from-layer="406" from-port="2" to-layer="407" to-port="0" />
<edge from-layer="407" from-port="2" to-layer="410" to-port="0" />
<edge from-layer="407" from-port="2" to-layer="445" to-port="1" />
<edge from-layer="408" from-port="0" to-layer="409" to-port="0" />
<edge from-layer="409" from-port="1" to-layer="410" to-port="1" />
<edge from-layer="410" from-port="2" to-layer="413" to-port="0" />
<edge from-layer="411" from-port="0" to-layer="412" to-port="0" />
<edge from-layer="412" from-port="1" to-layer="413" to-port="1" />
<edge from-layer="413" from-port="2" to-layer="414" to-port="0" />
<edge from-layer="414" from-port="1" to-layer="417" to-port="0" />
<edge from-layer="415" from-port="0" to-layer="416" to-port="0" />
<edge from-layer="416" from-port="1" to-layer="417" to-port="1" />
<edge from-layer="417" from-port="2" to-layer="420" to-port="0" />
<edge from-layer="418" from-port="0" to-layer="419" to-port="0" />
<edge from-layer="419" from-port="1" to-layer="420" to-port="1" />
<edge from-layer="420" from-port="2" to-layer="421" to-port="0" />
<edge from-layer="421" from-port="1" to-layer="423" to-port="0" />
<edge from-layer="421" from-port="1" to-layer="438" to-port="1" />
<edge from-layer="422" from-port="0" to-layer="423" to-port="1" />
<edge from-layer="423" from-port="2" to-layer="426" to-port="0" />
<edge from-layer="424" from-port="0" to-layer="425" to-port="0" />
<edge from-layer="425" from-port="1" to-layer="426" to-port="1" />
<edge from-layer="426" from-port="2" to-layer="429" to-port="0" />
<edge from-layer="427" from-port="0" to-layer="428" to-port="0" />
<edge from-layer="428" from-port="1" to-layer="429" to-port="1" />
<edge from-layer="429" from-port="2" to-layer="430" to-port="0" />
<edge from-layer="430" from-port="1" to-layer="433" to-port="0" />
<edge from-layer="431" from-port="0" to-layer="432" to-port="0" />
<edge from-layer="432" from-port="1" to-layer="433" to-port="1" />
<edge from-layer="433" from-port="2" to-layer="436" to-port="0" />
<edge from-layer="434" from-port="0" to-layer="435" to-port="0" />
<edge from-layer="435" from-port="1" to-layer="436" to-port="1" />
<edge from-layer="436" from-port="2" to-layer="437" to-port="0" />
<edge from-layer="437" from-port="1" to-layer="438" to-port="0" />
<edge from-layer="438" from-port="2" to-layer="441" to-port="0" />
<edge from-layer="439" from-port="0" to-layer="440" to-port="0" />
<edge from-layer="440" from-port="1" to-layer="441" to-port="1" />
<edge from-layer="441" from-port="2" to-layer="444" to-port="0" />
<edge from-layer="442" from-port="0" to-layer="443" to-port="0" />
<edge from-layer="443" from-port="1" to-layer="444" to-port="1" />
<edge from-layer="444" from-port="2" to-layer="445" to-port="0" />
<edge from-layer="445" from-port="2" to-layer="448" to-port="0" />
<edge from-layer="446" from-port="0" to-layer="447" to-port="0" />
<edge from-layer="447" from-port="1" to-layer="448" to-port="1" />
<edge from-layer="448" from-port="2" to-layer="451" to-port="0" />
<edge from-layer="449" from-port="0" to-layer="450" to-port="0" />
<edge from-layer="450" from-port="1" to-layer="451" to-port="1" />
<edge from-layer="451" from-port="2" to-layer="452" to-port="0" />
<edge from-layer="452" from-port="1" to-layer="455" to-port="0" />
<edge from-layer="453" from-port="0" to-layer="454" to-port="0" />
<edge from-layer="454" from-port="1" to-layer="455" to-port="1" />
<edge from-layer="455" from-port="2" to-layer="458" to-port="0" />
<edge from-layer="456" from-port="0" to-layer="457" to-port="0" />
<edge from-layer="457" from-port="1" to-layer="458" to-port="1" />
<edge from-layer="458" from-port="2" to-layer="459" to-port="0" />
<edge from-layer="459" from-port="1" to-layer="476" to-port="1" />
<edge from-layer="459" from-port="1" to-layer="461" to-port="0" />
<edge from-layer="460" from-port="0" to-layer="461" to-port="1" />
<edge from-layer="461" from-port="2" to-layer="464" to-port="0" />
<edge from-layer="462" from-port="0" to-layer="463" to-port="0" />
<edge from-layer="463" from-port="1" to-layer="464" to-port="1" />
<edge from-layer="464" from-port="2" to-layer="467" to-port="0" />
<edge from-layer="465" from-port="0" to-layer="466" to-port="0" />
<edge from-layer="466" from-port="1" to-layer="467" to-port="1" />
<edge from-layer="467" from-port="2" to-layer="468" to-port="0" />
<edge from-layer="468" from-port="1" to-layer="471" to-port="0" />
<edge from-layer="469" from-port="0" to-layer="470" to-port="0" />
<edge from-layer="470" from-port="1" to-layer="471" to-port="1" />
<edge from-layer="471" from-port="2" to-layer="474" to-port="0" />
<edge from-layer="472" from-port="0" to-layer="473" to-port="0" />
<edge from-layer="473" from-port="1" to-layer="474" to-port="1" />
<edge from-layer="474" from-port="2" to-layer="475" to-port="0" />
<edge from-layer="475" from-port="1" to-layer="476" to-port="0" />
<edge from-layer="476" from-port="2" to-layer="479" to-port="0" />
<edge from-layer="477" from-port="0" to-layer="478" to-port="0" />
<edge from-layer="478" from-port="1" to-layer="479" to-port="1" />
<edge from-layer="479" from-port="2" to-layer="482" to-port="0" />
<edge from-layer="480" from-port="0" to-layer="481" to-port="0" />
<edge from-layer="481" from-port="1" to-layer="482" to-port="1" />
<edge from-layer="482" from-port="2" to-layer="485" to-port="0" />
<edge from-layer="482" from-port="2" to-layer="520" to-port="1" />
<edge from-layer="483" from-port="0" to-layer="484" to-port="0" />
<edge from-layer="484" from-port="1" to-layer="485" to-port="1" />
<edge from-layer="485" from-port="2" to-layer="488" to-port="0" />
<edge from-layer="486" from-port="0" to-layer="487" to-port="0" />
<edge from-layer="487" from-port="1" to-layer="488" to-port="1" />
<edge from-layer="488" from-port="2" to-layer="489" to-port="0" />
<edge from-layer="489" from-port="1" to-layer="492" to-port="0" />
<edge from-layer="490" from-port="0" to-layer="491" to-port="0" />
<edge from-layer="491" from-port="1" to-layer="492" to-port="1" />
<edge from-layer="492" from-port="2" to-layer="495" to-port="0" />
<edge from-layer="493" from-port="0" to-layer="494" to-port="0" />
<edge from-layer="494" from-port="1" to-layer="495" to-port="1" />
<edge from-layer="495" from-port="2" to-layer="496" to-port="0" />
<edge from-layer="496" from-port="1" to-layer="498" to-port="0" />
<edge from-layer="496" from-port="1" to-layer="513" to-port="1" />
<edge from-layer="497" from-port="0" to-layer="498" to-port="1" />
<edge from-layer="498" from-port="2" to-layer="501" to-port="0" />
<edge from-layer="499" from-port="0" to-layer="500" to-port="0" />
<edge from-layer="500" from-port="1" to-layer="501" to-port="1" />
<edge from-layer="501" from-port="2" to-layer="504" to-port="0" />
<edge from-layer="502" from-port="0" to-layer="503" to-port="0" />
<edge from-layer="503" from-port="1" to-layer="504" to-port="1" />
<edge from-layer="504" from-port="2" to-layer="505" to-port="0" />
<edge from-layer="505" from-port="1" to-layer="508" to-port="0" />
<edge from-layer="506" from-port="0" to-layer="507" to-port="0" />
<edge from-layer="507" from-port="1" to-layer="508" to-port="1" />
<edge from-layer="508" from-port="2" to-layer="511" to-port="0" />
<edge from-layer="509" from-port="0" to-layer="510" to-port="0" />
<edge from-layer="510" from-port="1" to-layer="511" to-port="1" />
<edge from-layer="511" from-port="2" to-layer="512" to-port="0" />
<edge from-layer="512" from-port="1" to-layer="513" to-port="0" />
<edge from-layer="513" from-port="2" to-layer="516" to-port="0" />
<edge from-layer="514" from-port="0" to-layer="515" to-port="0" />
<edge from-layer="515" from-port="1" to-layer="516" to-port="1" />
<edge from-layer="516" from-port="2" to-layer="519" to-port="0" />
<edge from-layer="517" from-port="0" to-layer="518" to-port="0" />
<edge from-layer="518" from-port="1" to-layer="519" to-port="1" />
<edge from-layer="519" from-port="2" to-layer="520" to-port="0" />
<edge from-layer="520" from-port="2" to-layer="523" to-port="0" />
<edge from-layer="520" from-port="2" to-layer="558" to-port="1" />
<edge from-layer="521" from-port="0" to-layer="522" to-port="0" />
<edge from-layer="522" from-port="1" to-layer="523" to-port="1" />
<edge from-layer="523" from-port="2" to-layer="526" to-port="0" />
<edge from-layer="524" from-port="0" to-layer="525" to-port="0" />
<edge from-layer="525" from-port="1" to-layer="526" to-port="1" />
<edge from-layer="526" from-port="2" to-layer="527" to-port="0" />
<edge from-layer="527" from-port="1" to-layer="530" to-port="0" />
<edge from-layer="528" from-port="0" to-layer="529" to-port="0" />
<edge from-layer="529" from-port="1" to-layer="530" to-port="1" />
<edge from-layer="530" from-port="2" to-layer="533" to-port="0" />
<edge from-layer="531" from-port="0" to-layer="532" to-port="0" />
<edge from-layer="532" from-port="1" to-layer="533" to-port="1" />
<edge from-layer="533" from-port="2" to-layer="534" to-port="0" />
<edge from-layer="534" from-port="1" to-layer="551" to-port="1" />
<edge from-layer="534" from-port="1" to-layer="536" to-port="0" />
<edge from-layer="535" from-port="0" to-layer="536" to-port="1" />
<edge from-layer="536" from-port="2" to-layer="539" to-port="0" />
<edge from-layer="537" from-port="0" to-layer="538" to-port="0" />
<edge from-layer="538" from-port="1" to-layer="539" to-port="1" />
<edge from-layer="539" from-port="2" to-layer="542" to-port="0" />
<edge from-layer="540" from-port="0" to-layer="541" to-port="0" />
<edge from-layer="541" from-port="1" to-layer="542" to-port="1" />
<edge from-layer="542" from-port="2" to-layer="543" to-port="0" />
<edge from-layer="543" from-port="1" to-layer="546" to-port="0" />
<edge from-layer="544" from-port="0" to-layer="545" to-port="0" />
<edge from-layer="545" from-port="1" to-layer="546" to-port="1" />
<edge from-layer="546" from-port="2" to-layer="549" to-port="0" />
<edge from-layer="547" from-port="0" to-layer="548" to-port="0" />
<edge from-layer="548" from-port="1" to-layer="549" to-port="1" />
<edge from-layer="549" from-port="2" to-layer="550" to-port="0" />
<edge from-layer="550" from-port="1" to-layer="551" to-port="0" />
<edge from-layer="551" from-port="2" to-layer="554" to-port="0" />
<edge from-layer="552" from-port="0" to-layer="553" to-port="0" />
<edge from-layer="553" from-port="1" to-layer="554" to-port="1" />
<edge from-layer="554" from-port="2" to-layer="557" to-port="0" />
<edge from-layer="555" from-port="0" to-layer="556" to-port="0" />
<edge from-layer="556" from-port="1" to-layer="557" to-port="1" />
<edge from-layer="557" from-port="2" to-layer="558" to-port="0" />
<edge from-layer="558" from-port="2" to-layer="561" to-port="0" />
<edge from-layer="558" from-port="2" to-layer="596" to-port="1" />
<edge from-layer="559" from-port="0" to-layer="560" to-port="0" />
<edge from-layer="560" from-port="1" to-layer="561" to-port="1" />
<edge from-layer="561" from-port="2" to-layer="564" to-port="0" />
<edge from-layer="562" from-port="0" to-layer="563" to-port="0" />
<edge from-layer="563" from-port="1" to-layer="564" to-port="1" />
<edge from-layer="564" from-port="2" to-layer="565" to-port="0" />
<edge from-layer="565" from-port="1" to-layer="568" to-port="0" />
<edge from-layer="566" from-port="0" to-layer="567" to-port="0" />
<edge from-layer="567" from-port="1" to-layer="568" to-port="1" />
<edge from-layer="568" from-port="2" to-layer="571" to-port="0" />
<edge from-layer="569" from-port="0" to-layer="570" to-port="0" />
<edge from-layer="570" from-port="1" to-layer="571" to-port="1" />
<edge from-layer="571" from-port="2" to-layer="572" to-port="0" />
<edge from-layer="572" from-port="1" to-layer="574" to-port="0" />
<edge from-layer="572" from-port="1" to-layer="589" to-port="1" />
<edge from-layer="573" from-port="0" to-layer="574" to-port="1" />
<edge from-layer="574" from-port="2" to-layer="577" to-port="0" />
<edge from-layer="575" from-port="0" to-layer="576" to-port="0" />
<edge from-layer="576" from-port="1" to-layer="577" to-port="1" />
<edge from-layer="577" from-port="2" to-layer="580" to-port="0" />
<edge from-layer="578" from-port="0" to-layer="579" to-port="0" />
<edge from-layer="579" from-port="1" to-layer="580" to-port="1" />
<edge from-layer="580" from-port="2" to-layer="581" to-port="0" />
<edge from-layer="581" from-port="1" to-layer="584" to-port="0" />
<edge from-layer="582" from-port="0" to-layer="583" to-port="0" />
<edge from-layer="583" from-port="1" to-layer="584" to-port="1" />
<edge from-layer="584" from-port="2" to-layer="587" to-port="0" />
<edge from-layer="585" from-port="0" to-layer="586" to-port="0" />
<edge from-layer="586" from-port="1" to-layer="587" to-port="1" />
<edge from-layer="587" from-port="2" to-layer="588" to-port="0" />
<edge from-layer="588" from-port="1" to-layer="589" to-port="0" />
<edge from-layer="589" from-port="2" to-layer="592" to-port="0" />
<edge from-layer="590" from-port="0" to-layer="591" to-port="0" />
<edge from-layer="591" from-port="1" to-layer="592" to-port="1" />
<edge from-layer="592" from-port="2" to-layer="595" to-port="0" />
<edge from-layer="593" from-port="0" to-layer="594" to-port="0" />
<edge from-layer="594" from-port="1" to-layer="595" to-port="1" />
<edge from-layer="595" from-port="2" to-layer="596" to-port="0" />
<edge from-layer="596" from-port="2" to-layer="599" to-port="0" />
<edge from-layer="597" from-port="0" to-layer="598" to-port="0" />
<edge from-layer="598" from-port="1" to-layer="599" to-port="1" />
<edge from-layer="599" from-port="2" to-layer="602" to-port="0" />
<edge from-layer="600" from-port="0" to-layer="601" to-port="0" />
<edge from-layer="601" from-port="1" to-layer="602" to-port="1" />
<edge from-layer="602" from-port="2" to-layer="603" to-port="0" />
<edge from-layer="603" from-port="1" to-layer="606" to-port="0" />
<edge from-layer="604" from-port="0" to-layer="605" to-port="0" />
<edge from-layer="605" from-port="1" to-layer="606" to-port="1" />
<edge from-layer="606" from-port="2" to-layer="609" to-port="0" />
<edge from-layer="607" from-port="0" to-layer="608" to-port="0" />
<edge from-layer="608" from-port="1" to-layer="609" to-port="1" />
<edge from-layer="609" from-port="2" to-layer="610" to-port="0" />
<edge from-layer="610" from-port="1" to-layer="627" to-port="1" />
<edge from-layer="610" from-port="1" to-layer="612" to-port="0" />
<edge from-layer="611" from-port="0" to-layer="612" to-port="1" />
<edge from-layer="612" from-port="2" to-layer="615" to-port="0" />
<edge from-layer="613" from-port="0" to-layer="614" to-port="0" />
<edge from-layer="614" from-port="1" to-layer="615" to-port="1" />
<edge from-layer="615" from-port="2" to-layer="618" to-port="0" />
<edge from-layer="616" from-port="0" to-layer="617" to-port="0" />
<edge from-layer="617" from-port="1" to-layer="618" to-port="1" />
<edge from-layer="618" from-port="2" to-layer="619" to-port="0" />
<edge from-layer="619" from-port="1" to-layer="622" to-port="0" />
<edge from-layer="620" from-port="0" to-layer="621" to-port="0" />
<edge from-layer="621" from-port="1" to-layer="622" to-port="1" />
<edge from-layer="622" from-port="2" to-layer="625" to-port="0" />
<edge from-layer="623" from-port="0" to-layer="624" to-port="0" />
<edge from-layer="624" from-port="1" to-layer="625" to-port="1" />
<edge from-layer="625" from-port="2" to-layer="626" to-port="0" />
<edge from-layer="626" from-port="1" to-layer="627" to-port="0" />
<edge from-layer="627" from-port="2" to-layer="630" to-port="0" />
<edge from-layer="628" from-port="0" to-layer="629" to-port="0" />
<edge from-layer="629" from-port="1" to-layer="630" to-port="1" />
<edge from-layer="630" from-port="2" to-layer="633" to-port="0" />
<edge from-layer="631" from-port="0" to-layer="632" to-port="0" />
<edge from-layer="632" from-port="1" to-layer="633" to-port="1" />
<edge from-layer="633" from-port="2" to-layer="636" to-port="0" />
<edge from-layer="633" from-port="2" to-layer="671" to-port="1" />
<edge from-layer="634" from-port="0" to-layer="635" to-port="0" />
<edge from-layer="635" from-port="1" to-layer="636" to-port="1" />
<edge from-layer="636" from-port="2" to-layer="639" to-port="0" />
<edge from-layer="637" from-port="0" to-layer="638" to-port="0" />
<edge from-layer="638" from-port="1" to-layer="639" to-port="1" />
<edge from-layer="639" from-port="2" to-layer="640" to-port="0" />
<edge from-layer="640" from-port="1" to-layer="643" to-port="0" />
<edge from-layer="641" from-port="0" to-layer="642" to-port="0" />
<edge from-layer="642" from-port="1" to-layer="643" to-port="1" />
<edge from-layer="643" from-port="2" to-layer="646" to-port="0" />
<edge from-layer="644" from-port="0" to-layer="645" to-port="0" />
<edge from-layer="645" from-port="1" to-layer="646" to-port="1" />
<edge from-layer="646" from-port="2" to-layer="647" to-port="0" />
<edge from-layer="647" from-port="1" to-layer="664" to-port="1" />
<edge from-layer="647" from-port="1" to-layer="649" to-port="0" />
<edge from-layer="648" from-port="0" to-layer="649" to-port="1" />
<edge from-layer="649" from-port="2" to-layer="652" to-port="0" />
<edge from-layer="650" from-port="0" to-layer="651" to-port="0" />
<edge from-layer="651" from-port="1" to-layer="652" to-port="1" />
<edge from-layer="652" from-port="2" to-layer="655" to-port="0" />
<edge from-layer="653" from-port="0" to-layer="654" to-port="0" />
<edge from-layer="654" from-port="1" to-layer="655" to-port="1" />
<edge from-layer="655" from-port="2" to-layer="656" to-port="0" />
<edge from-layer="656" from-port="1" to-layer="659" to-port="0" />
<edge from-layer="657" from-port="0" to-layer="658" to-port="0" />
<edge from-layer="658" from-port="1" to-layer="659" to-port="1" />
<edge from-layer="659" from-port="2" to-layer="662" to-port="0" />
<edge from-layer="660" from-port="0" to-layer="661" to-port="0" />
<edge from-layer="661" from-port="1" to-layer="662" to-port="1" />
<edge from-layer="662" from-port="2" to-layer="663" to-port="0" />
<edge from-layer="663" from-port="1" to-layer="664" to-port="0" />
<edge from-layer="664" from-port="2" to-layer="667" to-port="0" />
<edge from-layer="665" from-port="0" to-layer="666" to-port="0" />
<edge from-layer="666" from-port="1" to-layer="667" to-port="1" />
<edge from-layer="667" from-port="2" to-layer="670" to-port="0" />
<edge from-layer="668" from-port="0" to-layer="669" to-port="0" />
<edge from-layer="669" from-port="1" to-layer="670" to-port="1" />
<edge from-layer="670" from-port="2" to-layer="671" to-port="0" />
<edge from-layer="671" from-port="2" to-layer="709" to-port="1" />
<edge from-layer="671" from-port="2" to-layer="674" to-port="0" />
<edge from-layer="672" from-port="0" to-layer="673" to-port="0" />
<edge from-layer="673" from-port="1" to-layer="674" to-port="1" />
<edge from-layer="674" from-port="2" to-layer="677" to-port="0" />
<edge from-layer="675" from-port="0" to-layer="676" to-port="0" />
<edge from-layer="676" from-port="1" to-layer="677" to-port="1" />
<edge from-layer="677" from-port="2" to-layer="678" to-port="0" />
<edge from-layer="678" from-port="1" to-layer="681" to-port="0" />
<edge from-layer="679" from-port="0" to-layer="680" to-port="0" />
<edge from-layer="680" from-port="1" to-layer="681" to-port="1" />
<edge from-layer="681" from-port="2" to-layer="684" to-port="0" />
<edge from-layer="682" from-port="0" to-layer="683" to-port="0" />
<edge from-layer="683" from-port="1" to-layer="684" to-port="1" />
<edge from-layer="684" from-port="2" to-layer="685" to-port="0" />
<edge from-layer="685" from-port="1" to-layer="687" to-port="0" />
<edge from-layer="685" from-port="1" to-layer="702" to-port="1" />
<edge from-layer="686" from-port="0" to-layer="687" to-port="1" />
<edge from-layer="687" from-port="2" to-layer="690" to-port="0" />
<edge from-layer="688" from-port="0" to-layer="689" to-port="0" />
<edge from-layer="689" from-port="1" to-layer="690" to-port="1" />
<edge from-layer="690" from-port="2" to-layer="693" to-port="0" />
<edge from-layer="691" from-port="0" to-layer="692" to-port="0" />
<edge from-layer="692" from-port="1" to-layer="693" to-port="1" />
<edge from-layer="693" from-port="2" to-layer="694" to-port="0" />
<edge from-layer="694" from-port="1" to-layer="697" to-port="0" />
<edge from-layer="695" from-port="0" to-layer="696" to-port="0" />
<edge from-layer="696" from-port="1" to-layer="697" to-port="1" />
<edge from-layer="697" from-port="2" to-layer="700" to-port="0" />
<edge from-layer="698" from-port="0" to-layer="699" to-port="0" />
<edge from-layer="699" from-port="1" to-layer="700" to-port="1" />
<edge from-layer="700" from-port="2" to-layer="701" to-port="0" />
<edge from-layer="701" from-port="1" to-layer="702" to-port="0" />
<edge from-layer="702" from-port="2" to-layer="705" to-port="0" />
<edge from-layer="703" from-port="0" to-layer="704" to-port="0" />
<edge from-layer="704" from-port="1" to-layer="705" to-port="1" />
<edge from-layer="705" from-port="2" to-layer="708" to-port="0" />
<edge from-layer="706" from-port="0" to-layer="707" to-port="0" />
<edge from-layer="707" from-port="1" to-layer="708" to-port="1" />
<edge from-layer="708" from-port="2" to-layer="709" to-port="0" />
<edge from-layer="709" from-port="2" to-layer="712" to-port="0" />
<edge from-layer="709" from-port="2" to-layer="747" to-port="1" />
<edge from-layer="710" from-port="0" to-layer="711" to-port="0" />
<edge from-layer="711" from-port="1" to-layer="712" to-port="1" />
<edge from-layer="712" from-port="2" to-layer="715" to-port="0" />
<edge from-layer="713" from-port="0" to-layer="714" to-port="0" />
<edge from-layer="714" from-port="1" to-layer="715" to-port="1" />
<edge from-layer="715" from-port="2" to-layer="716" to-port="0" />
<edge from-layer="716" from-port="1" to-layer="719" to-port="0" />
<edge from-layer="717" from-port="0" to-layer="718" to-port="0" />
<edge from-layer="718" from-port="1" to-layer="719" to-port="1" />
<edge from-layer="719" from-port="2" to-layer="722" to-port="0" />
<edge from-layer="720" from-port="0" to-layer="721" to-port="0" />
<edge from-layer="721" from-port="1" to-layer="722" to-port="1" />
<edge from-layer="722" from-port="2" to-layer="723" to-port="0" />
<edge from-layer="723" from-port="1" to-layer="725" to-port="0" />
<edge from-layer="723" from-port="1" to-layer="740" to-port="1" />
<edge from-layer="724" from-port="0" to-layer="725" to-port="1" />
<edge from-layer="725" from-port="2" to-layer="728" to-port="0" />
<edge from-layer="726" from-port="0" to-layer="727" to-port="0" />
<edge from-layer="727" from-port="1" to-layer="728" to-port="1" />
<edge from-layer="728" from-port="2" to-layer="731" to-port="0" />
<edge from-layer="729" from-port="0" to-layer="730" to-port="0" />
<edge from-layer="730" from-port="1" to-layer="731" to-port="1" />
<edge from-layer="731" from-port="2" to-layer="732" to-port="0" />
<edge from-layer="732" from-port="1" to-layer="735" to-port="0" />
<edge from-layer="733" from-port="0" to-layer="734" to-port="0" />
<edge from-layer="734" from-port="1" to-layer="735" to-port="1" />
<edge from-layer="735" from-port="2" to-layer="738" to-port="0" />
<edge from-layer="736" from-port="0" to-layer="737" to-port="0" />
<edge from-layer="737" from-port="1" to-layer="738" to-port="1" />
<edge from-layer="738" from-port="2" to-layer="739" to-port="0" />
<edge from-layer="739" from-port="1" to-layer="740" to-port="0" />
<edge from-layer="740" from-port="2" to-layer="743" to-port="0" />
<edge from-layer="741" from-port="0" to-layer="742" to-port="0" />
<edge from-layer="742" from-port="1" to-layer="743" to-port="1" />
<edge from-layer="743" from-port="2" to-layer="746" to-port="0" />
<edge from-layer="744" from-port="0" to-layer="745" to-port="0" />
<edge from-layer="745" from-port="1" to-layer="746" to-port="1" />
<edge from-layer="746" from-port="2" to-layer="747" to-port="0" />
<edge from-layer="747" from-port="2" to-layer="785" to-port="1" />
<edge from-layer="747" from-port="2" to-layer="750" to-port="0" />
<edge from-layer="748" from-port="0" to-layer="749" to-port="0" />
<edge from-layer="749" from-port="1" to-layer="750" to-port="1" />
<edge from-layer="750" from-port="2" to-layer="753" to-port="0" />
<edge from-layer="751" from-port="0" to-layer="752" to-port="0" />
<edge from-layer="752" from-port="1" to-layer="753" to-port="1" />
<edge from-layer="753" from-port="2" to-layer="754" to-port="0" />
<edge from-layer="754" from-port="1" to-layer="757" to-port="0" />
<edge from-layer="755" from-port="0" to-layer="756" to-port="0" />
<edge from-layer="756" from-port="1" to-layer="757" to-port="1" />
<edge from-layer="757" from-port="2" to-layer="760" to-port="0" />
<edge from-layer="758" from-port="0" to-layer="759" to-port="0" />
<edge from-layer="759" from-port="1" to-layer="760" to-port="1" />
<edge from-layer="760" from-port="2" to-layer="761" to-port="0" />
<edge from-layer="761" from-port="1" to-layer="763" to-port="0" />
<edge from-layer="761" from-port="1" to-layer="778" to-port="1" />
<edge from-layer="762" from-port="0" to-layer="763" to-port="1" />
<edge from-layer="763" from-port="2" to-layer="766" to-port="0" />
<edge from-layer="764" from-port="0" to-layer="765" to-port="0" />
<edge from-layer="765" from-port="1" to-layer="766" to-port="1" />
<edge from-layer="766" from-port="2" to-layer="769" to-port="0" />
<edge from-layer="767" from-port="0" to-layer="768" to-port="0" />
<edge from-layer="768" from-port="1" to-layer="769" to-port="1" />
<edge from-layer="769" from-port="2" to-layer="770" to-port="0" />
<edge from-layer="770" from-port="1" to-layer="773" to-port="0" />
<edge from-layer="771" from-port="0" to-layer="772" to-port="0" />
<edge from-layer="772" from-port="1" to-layer="773" to-port="1" />
<edge from-layer="773" from-port="2" to-layer="776" to-port="0" />
<edge from-layer="774" from-port="0" to-layer="775" to-port="0" />
<edge from-layer="775" from-port="1" to-layer="776" to-port="1" />
<edge from-layer="776" from-port="2" to-layer="777" to-port="0" />
<edge from-layer="777" from-port="1" to-layer="778" to-port="0" />
<edge from-layer="778" from-port="2" to-layer="781" to-port="0" />
<edge from-layer="779" from-port="0" to-layer="780" to-port="0" />
<edge from-layer="780" from-port="1" to-layer="781" to-port="1" />
<edge from-layer="781" from-port="2" to-layer="784" to-port="0" />
<edge from-layer="782" from-port="0" to-layer="783" to-port="0" />
<edge from-layer="783" from-port="1" to-layer="784" to-port="1" />
<edge from-layer="784" from-port="2" to-layer="785" to-port="0" />
<edge from-layer="785" from-port="2" to-layer="788" to-port="0" />
<edge from-layer="786" from-port="0" to-layer="787" to-port="0" />
<edge from-layer="787" from-port="1" to-layer="788" to-port="1" />
<edge from-layer="788" from-port="2" to-layer="791" to-port="0" />
<edge from-layer="789" from-port="0" to-layer="790" to-port="0" />
<edge from-layer="790" from-port="1" to-layer="791" to-port="1" />
<edge from-layer="791" from-port="2" to-layer="792" to-port="0" />
<edge from-layer="792" from-port="1" to-layer="795" to-port="0" />
<edge from-layer="793" from-port="0" to-layer="794" to-port="0" />
<edge from-layer="794" from-port="1" to-layer="795" to-port="1" />
<edge from-layer="795" from-port="2" to-layer="798" to-port="0" />
<edge from-layer="796" from-port="0" to-layer="797" to-port="0" />
<edge from-layer="797" from-port="1" to-layer="798" to-port="1" />
<edge from-layer="798" from-port="2" to-layer="799" to-port="0" />
<edge from-layer="799" from-port="1" to-layer="801" to-port="0" />
<edge from-layer="799" from-port="1" to-layer="816" to-port="1" />
<edge from-layer="800" from-port="0" to-layer="801" to-port="1" />
<edge from-layer="801" from-port="2" to-layer="804" to-port="0" />
<edge from-layer="802" from-port="0" to-layer="803" to-port="0" />
<edge from-layer="803" from-port="1" to-layer="804" to-port="1" />
<edge from-layer="804" from-port="2" to-layer="807" to-port="0" />
<edge from-layer="805" from-port="0" to-layer="806" to-port="0" />
<edge from-layer="806" from-port="1" to-layer="807" to-port="1" />
<edge from-layer="807" from-port="2" to-layer="808" to-port="0" />
<edge from-layer="808" from-port="1" to-layer="811" to-port="0" />
<edge from-layer="809" from-port="0" to-layer="810" to-port="0" />
<edge from-layer="810" from-port="1" to-layer="811" to-port="1" />
<edge from-layer="811" from-port="2" to-layer="814" to-port="0" />
<edge from-layer="812" from-port="0" to-layer="813" to-port="0" />
<edge from-layer="813" from-port="1" to-layer="814" to-port="1" />
<edge from-layer="814" from-port="2" to-layer="815" to-port="0" />
<edge from-layer="815" from-port="1" to-layer="816" to-port="0" />
<edge from-layer="816" from-port="2" to-layer="819" to-port="0" />
<edge from-layer="817" from-port="0" to-layer="818" to-port="0" />
<edge from-layer="818" from-port="1" to-layer="819" to-port="1" />
<edge from-layer="819" from-port="2" to-layer="822" to-port="0" />
<edge from-layer="820" from-port="0" to-layer="821" to-port="0" />
<edge from-layer="821" from-port="1" to-layer="822" to-port="1" />
<edge from-layer="822" from-port="2" to-layer="825" to-port="0" />
<edge from-layer="822" from-port="2" to-layer="860" to-port="1" />
<edge from-layer="823" from-port="0" to-layer="824" to-port="0" />
<edge from-layer="824" from-port="1" to-layer="825" to-port="1" />
<edge from-layer="825" from-port="2" to-layer="828" to-port="0" />
<edge from-layer="826" from-port="0" to-layer="827" to-port="0" />
<edge from-layer="827" from-port="1" to-layer="828" to-port="1" />
<edge from-layer="828" from-port="2" to-layer="829" to-port="0" />
<edge from-layer="829" from-port="1" to-layer="832" to-port="0" />
<edge from-layer="830" from-port="0" to-layer="831" to-port="0" />
<edge from-layer="831" from-port="1" to-layer="832" to-port="1" />
<edge from-layer="832" from-port="2" to-layer="835" to-port="0" />
<edge from-layer="833" from-port="0" to-layer="834" to-port="0" />
<edge from-layer="834" from-port="1" to-layer="835" to-port="1" />
<edge from-layer="835" from-port="2" to-layer="836" to-port="0" />
<edge from-layer="836" from-port="1" to-layer="853" to-port="1" />
<edge from-layer="836" from-port="1" to-layer="838" to-port="0" />
<edge from-layer="837" from-port="0" to-layer="838" to-port="1" />
<edge from-layer="838" from-port="2" to-layer="841" to-port="0" />
<edge from-layer="839" from-port="0" to-layer="840" to-port="0" />
<edge from-layer="840" from-port="1" to-layer="841" to-port="1" />
<edge from-layer="841" from-port="2" to-layer="844" to-port="0" />
<edge from-layer="842" from-port="0" to-layer="843" to-port="0" />
<edge from-layer="843" from-port="1" to-layer="844" to-port="1" />
<edge from-layer="844" from-port="2" to-layer="845" to-port="0" />
<edge from-layer="845" from-port="1" to-layer="848" to-port="0" />
<edge from-layer="846" from-port="0" to-layer="847" to-port="0" />
<edge from-layer="847" from-port="1" to-layer="848" to-port="1" />
<edge from-layer="848" from-port="2" to-layer="851" to-port="0" />
<edge from-layer="849" from-port="0" to-layer="850" to-port="0" />
<edge from-layer="850" from-port="1" to-layer="851" to-port="1" />
<edge from-layer="851" from-port="2" to-layer="852" to-port="0" />
<edge from-layer="852" from-port="1" to-layer="853" to-port="0" />
<edge from-layer="853" from-port="2" to-layer="856" to-port="0" />
<edge from-layer="854" from-port="0" to-layer="855" to-port="0" />
<edge from-layer="855" from-port="1" to-layer="856" to-port="1" />
<edge from-layer="856" from-port="2" to-layer="859" to-port="0" />
<edge from-layer="857" from-port="0" to-layer="858" to-port="0" />
<edge from-layer="858" from-port="1" to-layer="859" to-port="1" />
<edge from-layer="859" from-port="2" to-layer="860" to-port="0" />
<edge from-layer="860" from-port="2" to-layer="863" to-port="0" />
<edge from-layer="861" from-port="0" to-layer="862" to-port="0" />
<edge from-layer="862" from-port="1" to-layer="863" to-port="1" />
<edge from-layer="863" from-port="2" to-layer="866" to-port="0" />
<edge from-layer="864" from-port="0" to-layer="865" to-port="0" />
<edge from-layer="865" from-port="1" to-layer="866" to-port="1" />
<edge from-layer="866" from-port="2" to-layer="867" to-port="0" />
<edge from-layer="867" from-port="1" to-layer="869" to-port="0" />
<edge from-layer="868" from-port="0" to-layer="869" to-port="1" />
<edge from-layer="869" from-port="2" to-layer="876" to-port="0" />
<edge from-layer="870" from-port="0" to-layer="875" to-port="0" />
<edge from-layer="871" from-port="0" to-layer="874" to-port="0" />
<edge from-layer="872" from-port="0" to-layer="874" to-port="1" />
<edge from-layer="873" from-port="0" to-layer="874" to-port="2" />
<edge from-layer="874" from-port="3" to-layer="875" to-port="1" />
<edge from-layer="875" from-port="2" to-layer="876" to-port="1" />
<edge from-layer="876" from-port="2" to-layer="879" to-port="0" />
<edge from-layer="877" from-port="0" to-layer="878" to-port="0" />
<edge from-layer="878" from-port="1" to-layer="879" to-port="1" />
<edge from-layer="879" from-port="2" to-layer="882" to-port="0" />
<edge from-layer="880" from-port="0" to-layer="881" to-port="0" />
<edge from-layer="881" from-port="1" to-layer="882" to-port="1" />
<edge from-layer="882" from-port="2" to-layer="883" to-port="0" />
</edges>
<rt_info>
<Runtime_version value="2026.1.0-21367-63e31528c62-releases/2026/1" />
<conversion_parameters>
<framework value="pytorch" />
<is_python_object value="True" />
</conversion_parameters>
<model_info>
<labels value="tench goldfish great_white_shark tiger_shark hammerhead electric_ray stingray cock hen ostrich brambling goldfinch house_finch junco indigo_bunting robin bulbul jay magpie chickadee water_ouzel kite bald_eagle vulture great_grey_owl European_fire_salamander common_newt eft spotted_salamander axolotl bullfrog tree_frog tailed_frog loggerhead leatherback_turtle mud_turtle terrapin box_turtle banded_gecko common_iguana American_chameleon whiptail agama frilled_lizard alligator_lizard Gila_monster green_lizard African_chameleon Komodo_dragon African_crocodile American_alligator triceratops thunder_snake ringneck_snake hognose_snake green_snake king_snake garter_snake water_snake vine_snake night_snake boa_constrictor rock_python Indian_cobra green_mamba sea_snake horned_viper diamondback sidewinder trilobite harvestman scorpion black_and_gold_garden_spider barn_spider garden_spider black_widow tarantula wolf_spider tick centipede black_grouse ptarmigan ruffed_grouse prairie_chicken peacock quail partridge African_grey macaw sulphur-crested_cockatoo lorikeet coucal bee_eater hornbill hummingbird jacamar toucan drake red-breasted_merganser goose black_swan tusker echidna platypus wallaby koala wombat jellyfish sea_anemone brain_coral flatworm nematode conch snail slug sea_slug chiton chambered_nautilus Dungeness_crab rock_crab fiddler_crab king_crab American_lobster spiny_lobster crayfish hermit_crab isopod white_stork black_stork spoonbill flamingo little_blue_heron American_egret bittern crane_bird limpkin European_gallinule American_coot bustard ruddy_turnstone red-backed_sandpiper redshank dowitcher oystercatcher pelican king_penguin albatross grey_whale killer_whale dugong sea_lion Chihuahua Japanese_spaniel Maltese_dog Pekinese Shih-Tzu Blenheim_spaniel papillon toy_terrier Rhodesian_ridgeback Afghan_hound basset beagle bloodhound bluetick black-and-tan_coonhound Walker_hound English_foxhound redbone borzoi Irish_wolfhound Italian_greyhound whippet Ibizan_hound Norwegian_elkhound otterhound Saluki Scottish_deerhound Weimaraner Staffordshire_bullterrier American_Staffordshire_terrier Bedlington_terrier Border_terrier Kerry_blue_terrier Irish_terrier Norfolk_terrier Norwich_terrier Yorkshire_terrier wire-haired_fox_terrier Lakeland_terrier Sealyham_terrier Airedale cairn Australian_terrier Dandie_Dinmont Boston_bull miniature_schnauzer giant_schnauzer standard_schnauzer Scotch_terrier Tibetan_terrier silky_terrier soft-coated_wheaten_terrier West_Highland_white_terrier Lhasa flat-coated_retriever curly-coated_retriever golden_retriever Labrador_retriever Chesapeake_Bay_retriever German_short-haired_pointer vizsla English_setter Irish_setter Gordon_setter Brittany_spaniel clumber English_springer Welsh_springer_spaniel cocker_spaniel Sussex_spaniel Irish_water_spaniel kuvasz schipperke groenendael malinois briard kelpie komondor Old_English_sheepdog Shetland_sheepdog collie Border_collie Bouvier_des_Flandres Rottweiler German_shepherd Doberman miniature_pinscher Greater_Swiss_Mountain_dog Bernese_mountain_dog Appenzeller EntleBucher boxer bull_mastiff Tibetan_mastiff French_bulldog Great_Dane Saint_Bernard Eskimo_dog malamute Siberian_husky dalmatian affenpinscher basenji pug Leonberg Newfoundland Great_Pyrenees Samoyed Pomeranian chow keeshond Brabancon_griffon Pembroke Cardigan toy_poodle miniature_poodle standard_poodle Mexican_hairless timber_wolf white_wolf red_wolf coyote dingo dhole African_hunting_dog hyena red_fox kit_fox Arctic_fox grey_fox tabby tiger_cat Persian_cat Siamese_cat Egyptian_cat cougar lynx leopard snow_leopard jaguar lion tiger cheetah brown_bear American_black_bear ice_bear sloth_bear mongoose meerkat tiger_beetle ladybug ground_beetle long-horned_beetle leaf_beetle dung_beetle rhinoceros_beetle weevil fly bee ant grasshopper cricket walking_stick cockroach mantis cicada leafhopper lacewing dragonfly damselfly admiral ringlet monarch cabbage_butterfly sulphur_butterfly lycaenid starfish sea_urchin sea_cucumber wood_rabbit hare Angora hamster porcupine fox_squirrel marmot beaver guinea_pig sorrel zebra hog wild_boar warthog hippopotamus ox water_buffalo bison ram bighorn ibex hartebeest impala gazelle Arabian_camel llama weasel mink polecat black-footed_ferret otter skunk badger armadillo three-toed_sloth orangutan gorilla chimpanzee gibbon siamang guenon patas baboon macaque langur colobus proboscis_monkey marmoset capuchin howler_monkey titi spider_monkey squirrel_monkey Madagascar_cat indri Indian_elephant African_elephant lesser_panda giant_panda barracouta eel coho rock_beauty anemone_fish sturgeon gar lionfish puffer abacus abaya academic_gown accordion acoustic_guitar aircraft_carrier airliner airship altar ambulance amphibian analog_clock apiary apron ashcan assault_rifle backpack bakery balance_beam balloon ballpoint Band_Aid banjo bannister barbell barber_chair barbershop barn barometer barrel barrow baseball basketball bassinet bassoon bathing_cap bath_towel bathtub beach_wagon beacon beaker bearskin beer_bottle beer_glass bell_cote bib bicycle-built-for-two bikini binder binoculars birdhouse boathouse bobsled bolo_tie bonnet bookcase bookshop bottlecap bow bow_tie brass brassiere breakwater breastplate broom bucket buckle bulletproof_vest bullet_train butcher_shop cab caldron candle cannon canoe can_opener cardigan car_mirror carousel carpenter's_kit carton car_wheel cash_machine cassette cassette_player castle catamaran CD_player cello cellular_telephone chain chainlink_fence chain_mail chain_saw chest chiffonier chime china_cabinet Christmas_stocking church cinema cleaver cliff_dwelling cloak clog cocktail_shaker coffee_mug coffeepot coil combination_lock computer_keyboard confectionery container_ship convertible corkscrew cornet cowboy_boot cowboy_hat cradle crane crash_helmet crate crib Crock_Pot croquet_ball crutch cuirass dam desk desktop_computer dial_telephone diaper digital_clock digital_watch dining_table dishrag dishwasher disk_brake dock dogsled dome doormat drilling_platform drum drumstick dumbbell Dutch_oven electric_fan electric_guitar electric_locomotive entertainment_center envelope espresso_maker face_powder feather_boa file fireboat fire_engine fire_screen flagpole flute folding_chair football_helmet forklift fountain fountain_pen four-poster freight_car French_horn frying_pan fur_coat garbage_truck gasmask gas_pump goblet go-kart golf_ball golfcart gondola gong gown grand_piano greenhouse grille grocery_store guillotine hair_slide hair_spray half_track hammer hamper hand_blower hand-held_computer handkerchief hard_disc harmonica harp harvester hatchet holster home_theater honeycomb hook hoopskirt horizontal_bar horse_cart hourglass iPod iron jack-o'-lantern jean jeep jersey jigsaw_puzzle jinrikisha joystick kimono knee_pad knot lab_coat ladle lampshade laptop lawn_mower lens_cap letter_opener library lifeboat lighter limousine liner lipstick Loafer lotion loudspeaker loupe lumbermill magnetic_compass mailbag mailbox maillot maillot_tank_suit manhole_cover maraca marimba mask matchstick maypole maze measuring_cup medicine_chest megalith microphone microwave military_uniform milk_can minibus miniskirt minivan missile mitten mixing_bowl mobile_home Model_T modem monastery monitor moped mortar mortarboard mosque mosquito_net motor_scooter mountain_bike mountain_tent mouse mousetrap moving_van muzzle nail neck_brace necklace nipple notebook obelisk oboe ocarina odometer oil_filter organ oscilloscope overskirt oxcart oxygen_mask packet paddle paddlewheel padlock paintbrush pajama palace panpipe paper_towel parachute parallel_bars park_bench parking_meter passenger_car patio pay-phone pedestal pencil_box pencil_sharpener perfume Petri_dish photocopier pick pickelhaube picket_fence pickup pier piggy_bank pill_bottle pillow ping-pong_ball pinwheel pirate pitcher plane planetarium plastic_bag plate_rack plow plunger Polaroid_camera pole police_van poncho pool_table pop_bottle pot potter's_wheel power_drill prayer_rug printer prison projectile projector puck punching_bag purse quill quilt racer racket radiator radio radio_telescope rain_barrel recreational_vehicle reel reflex_camera refrigerator remote_control restaurant revolver rifle rocking_chair rotisserie rubber_eraser rugby_ball rule running_shoe safe safety_pin saltshaker sandal sarong sax scabbard scale school_bus schooner scoreboard screen screw screwdriver seat_belt sewing_machine shield shoe_shop shoji shopping_basket shopping_cart shovel shower_cap shower_curtain ski ski_mask sleeping_bag slide_rule sliding_door slot snorkel snowmobile snowplow soap_dispenser soccer_ball sock solar_dish sombrero soup_bowl space_bar space_heater space_shuttle spatula speedboat spider_web spindle sports_car spotlight stage steam_locomotive steel_arch_bridge steel_drum stethoscope stole stone_wall stopwatch stove strainer streetcar stretcher studio_couch stupa submarine suit sundial sunglass sunglasses sunscreen suspension_bridge swab sweatshirt swimming_trunks swing switch syringe table_lamp tank tape_player teapot teddy television tennis_ball thatch theater_curtain thimble thresher throne tile_roof toaster tobacco_shop toilet_seat torch totem_pole tow_truck toyshop tractor trailer_truck tray trench_coat tricycle trimaran tripod triumphal_arch trolleybus trombone tub turnstile typewriter_keyboard umbrella unicycle upright vacuum vase vault velvet vending_machine vestment viaduct violin volleyball waffle_iron wall_clock wallet wardrobe warplane washbasin washer water_bottle water_jug water_tower whiskey_jug whistle wig window_screen window_shade Windsor_tie wine_bottle wing wok wooden_spoon wool worm_fence wreck yawl yurt web_site comic_book crossword_puzzle street_sign traffic_light book_jacket menu plate guacamole consomme hot_pot trifle ice_cream ice_lolly French_loaf bagel pretzel cheeseburger hotdog mashed_potato head_cabbage broccoli cauliflower zucchini spaghetti_squash acorn_squash butternut_squash cucumber artichoke bell_pepper cardoon mushroom Granny_Smith strawberry orange lemon fig pineapple banana jackfruit custard_apple pomegranate hay carbonara chocolate_sauce dough meat_loaf pizza potpie burrito red_wine espresso cup eggnog alp bubble cliff coral_reef geyser lakeside promontory sandbar seashore valley volcano ballplayer groom scuba_diver rapeseed daisy yellow_lady's_slipper corn acorn hip buckeye coral_fungus agaric gyromitra stinkhorn earthstar hen-of-the-woods bolete ear toilet_tissue" />
<mean_values value="123.675 116.28 103.53" />
<model_short_name value="efficientnet_b1" />
<model_type value="Classification" />
<reverse_input_channels value="True" />
<scale_values value="58.395 57.12 57.375" />
</model_info>
</rt_info>
</net>