AnkitAI's picture
Adding OpenVINO file of this model
5072382 verified
raw
history blame
410 kB
<?xml version="1.0"?>
<net name="Model0" version="11">
<layers>
<layer id="1" name="input_ids" type="Parameter" version="opset1">
<data shape="?,?" element_type="i64" />
<output>
<port id="0" precision="I64" names="input_ids">
<dim>-1</dim>
<dim>-1</dim>
</port>
</output>
</layer>
<layer id="0" name="attention_mask" type="Parameter" version="opset1">
<data shape="?,?" element_type="i64" />
<output>
<port id="0" precision="I64" names="attention_mask">
<dim>-1</dim>
<dim>-1</dim>
</port>
</output>
</layer>
<layer id="2" name="self.roberta.embeddings.word_embeddings.weight" type="Const" version="opset1">
<data element_type="f32" shape="50265, 768" offset="0" size="154414080" />
<output>
<port id="0" precision="FP32" names="self.roberta.embeddings.word_embeddings.weight">
<dim>50265</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="3" name="__module.roberta.embeddings.word_embeddings/aten::embedding/Convert" type="Convert" version="opset1">
<data destination_type="i32" />
<input>
<port id="0" precision="I64">
<dim>-1</dim>
<dim>-1</dim>
</port>
</input>
<output>
<port id="1" precision="I32">
<dim>-1</dim>
<dim>-1</dim>
</port>
</output>
</layer>
<layer id="4" name="__module.roberta.embeddings.word_embeddings/aten::embedding/Constant" type="Const" version="opset1">
<data element_type="i32" shape="" offset="154414080" size="4" />
<output>
<port id="0" precision="I32" />
</output>
</layer>
<layer id="5" name="__module.roberta.embeddings.word_embeddings/aten::embedding/Gather" type="Gather" version="opset8">
<data batch_dims="0" />
<input>
<port id="0" precision="FP32">
<dim>50265</dim>
<dim>768</dim>
</port>
<port id="1" precision="I32">
<dim>-1</dim>
<dim>-1</dim>
</port>
<port id="2" precision="I32" />
</input>
<output>
<port id="3" precision="FP32" names="61,inputs_embeds">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="6" name="self.roberta.embeddings.token_type_embeddings.weight" type="Const" version="opset1">
<data element_type="f32" shape="1, 768" offset="154414084" size="3072" />
<output>
<port id="0" precision="FP32" names="self.roberta.embeddings.token_type_embeddings.weight">
<dim>1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="7" name="__module.roberta/aten::slice/Slice" type="Const" version="opset1">
<data element_type="i64" shape="1, 514" offset="154417156" size="4112" />
<output>
<port id="0" precision="I64" names="37">
<dim>1</dim>
<dim>514</dim>
</port>
</output>
</layer>
<layer id="8" name="Constant_6994" type="Const" version="opset1">
<data element_type="i64" shape="2" offset="154421268" size="16" />
<output>
<port id="0" precision="I64">
<dim>2</dim>
</port>
</output>
</layer>
<layer id="9" name="Constant_6995" type="Const" version="opset1">
<data element_type="i64" shape="2" offset="154421268" size="16" />
<output>
<port id="0" precision="I64">
<dim>2</dim>
</port>
</output>
</layer>
<layer id="10" name="Constant_6991" type="Const" version="opset1">
<data element_type="i64" shape="1" offset="154421284" size="8" />
<output>
<port id="0" precision="I64">
<dim>1</dim>
</port>
</output>
</layer>
<layer id="11" name="__module.roberta/aten::size/ShapeOf_1" type="ShapeOf" version="opset3">
<data output_type="i64" />
<input>
<port id="0" precision="I64">
<dim>-1</dim>
<dim>-1</dim>
</port>
</input>
<output>
<port id="1" precision="I64">
<dim>2</dim>
</port>
</output>
</layer>
<layer id="12" name="29" type="Const" version="opset1">
<data element_type="i64" shape="" offset="154421284" size="8" />
<output>
<port id="0" precision="I64" names="29" />
</output>
</layer>
<layer id="13" name="__module.roberta/aten::size/Constant" type="Const" version="opset1">
<data element_type="i32" shape="" offset="154414080" size="4" />
<output>
<port id="0" precision="I32" />
</output>
</layer>
<layer id="14" name="__module.roberta/aten::size/Gather" type="Gather" version="opset8">
<data batch_dims="0" />
<input>
<port id="0" precision="I64">
<dim>2</dim>
</port>
<port id="1" precision="I64" />
<port id="2" precision="I32" />
</input>
<output>
<port id="3" precision="I64" names="36" />
</output>
</layer>
<layer id="15" name="__module.roberta/aten::slice/Constant" type="Const" version="opset1">
<data element_type="i32" shape="" offset="154414080" size="4" />
<output>
<port id="0" precision="I32" />
</output>
</layer>
<layer id="16" name="__module.roberta/aten::slice/Unsqueeze_1" type="Unsqueeze" version="opset1">
<input>
<port id="0" precision="I64" />
<port id="1" precision="I32" />
</input>
<output>
<port id="2" precision="I64">
<dim>1</dim>
</port>
</output>
</layer>
<layer id="17" name="Constant_6990" type="Const" version="opset1">
<data element_type="i32" shape="1" offset="154414080" size="4" />
<output>
<port id="0" precision="I32">
<dim>1</dim>
</port>
</output>
</layer>
<layer id="18" name="ScatterUpdate_6996" type="ScatterUpdate" version="opset3">
<input>
<port id="0" precision="I64">
<dim>2</dim>
</port>
<port id="1" precision="I64">
<dim>1</dim>
</port>
<port id="2" precision="I64">
<dim>1</dim>
</port>
<port id="3" precision="I32">
<dim>1</dim>
</port>
</input>
<output>
<port id="4" precision="I64">
<dim>2</dim>
</port>
</output>
</layer>
<layer id="19" name="Constant_6999" type="Const" version="opset1">
<data element_type="i64" shape="2" offset="154421292" size="16" />
<output>
<port id="0" precision="I64">
<dim>2</dim>
</port>
</output>
</layer>
<layer id="20" name="__module.roberta/aten::slice/Slice_1" type="StridedSlice" version="opset1">
<data begin_mask="1, 0" end_mask="1, 0" new_axis_mask="" shrink_axis_mask="" ellipsis_mask="" />
<input>
<port id="0" precision="I64">
<dim>1</dim>
<dim>514</dim>
</port>
<port id="1" precision="I64">
<dim>2</dim>
</port>
<port id="2" precision="I64">
<dim>2</dim>
</port>
<port id="3" precision="I64">
<dim>2</dim>
</port>
</input>
<output>
<port id="4" precision="I64" names="38,buffered_token_type_ids">
<dim>1</dim>
<dim>-1</dim>
</port>
</output>
</layer>
<layer id="21" name="Constant_6216" type="Const" version="opset1">
<data element_type="i64" shape="1" offset="154421308" size="8" />
<output>
<port id="0" precision="I64">
<dim>1</dim>
</port>
</output>
</layer>
<layer id="22" name="__module.roberta/aten::size/Constant_1" type="Const" version="opset1">
<data element_type="i32" shape="" offset="154414080" size="4" />
<output>
<port id="0" precision="I32" />
</output>
</layer>
<layer id="23" name="__module.roberta/aten::size/Gather_1" type="Gather" version="opset8">
<data batch_dims="0" />
<input>
<port id="0" precision="I64">
<dim>2</dim>
</port>
<port id="1" precision="I64">
<dim>1</dim>
</port>
<port id="2" precision="I32" />
</input>
<output>
<port id="3" precision="I64" names="35">
<dim>1</dim>
</port>
</output>
</layer>
<layer id="24" name="Constant_10218" type="Const" version="opset1">
<data element_type="i64" shape="1" offset="154421316" size="8" />
<output>
<port id="0" precision="I64">
<dim>1</dim>
</port>
</output>
</layer>
<layer id="25" name="__module.roberta/prim::ListConstruct/Reshape_0" type="Reshape" version="opset1">
<data special_zero="false" />
<input>
<port id="0" precision="I64" />
<port id="1" precision="I64">
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="I64">
<dim>1</dim>
</port>
</output>
</layer>
<layer id="26" name="__module.roberta/prim::ListConstruct/Concat" type="Concat" version="opset1">
<data axis="0" />
<input>
<port id="0" precision="I64">
<dim>1</dim>
</port>
<port id="1" precision="I64">
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="I64" names="39">
<dim>2</dim>
</port>
</output>
</layer>
<layer id="27" name="__module.roberta/aten::expand/Abs" type="Abs" version="opset1">
<input>
<port id="0" precision="I64">
<dim>2</dim>
</port>
</input>
<output>
<port id="1" precision="I64">
<dim>2</dim>
</port>
</output>
</layer>
<layer id="28" name="__module.roberta/aten::expand/Broadcast" type="Broadcast" version="opset3">
<data mode="bidirectional" />
<input>
<port id="0" precision="I64">
<dim>1</dim>
<dim>-1</dim>
</port>
<port id="1" precision="I64">
<dim>2</dim>
</port>
</input>
<output>
<port id="2" precision="I64" names="40">
<dim>-1</dim>
<dim>-1</dim>
</port>
</output>
</layer>
<layer id="29" name="__module.roberta.embeddings.token_type_embeddings/aten::embedding/Convert" type="Convert" version="opset1">
<data destination_type="i32" />
<input>
<port id="0" precision="I64">
<dim>-1</dim>
<dim>-1</dim>
</port>
</input>
<output>
<port id="1" precision="I32">
<dim>-1</dim>
<dim>-1</dim>
</port>
</output>
</layer>
<layer id="30" name="__module.roberta.embeddings.token_type_embeddings/aten::embedding/Constant" type="Const" version="opset1">
<data element_type="i32" shape="" offset="154414080" size="4" />
<output>
<port id="0" precision="I32" />
</output>
</layer>
<layer id="31" name="__module.roberta.embeddings.token_type_embeddings/aten::embedding/Gather" type="Gather" version="opset8">
<data batch_dims="0" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>768</dim>
</port>
<port id="1" precision="I32">
<dim>-1</dim>
<dim>-1</dim>
</port>
<port id="2" precision="I32" />
</input>
<output>
<port id="3" precision="FP32" names="63,token_type_embeddings.1">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="32" name="__module.roberta.embeddings/aten::add/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="64_1">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="33" name="self.roberta.embeddings.position_embeddings.weight" type="Const" version="opset1">
<data element_type="f32" shape="514, 768" offset="154421324" size="1579008" />
<output>
<port id="0" precision="FP32" names="self.roberta.embeddings.position_embeddings.weight">
<dim>514</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="34" name="Constant_7503" type="Const" version="opset1">
<data element_type="i64" shape="1, 1" offset="154421284" size="8" />
<output>
<port id="0" precision="I64">
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="35" name="__module.roberta.embeddings/aten::ne/NotEqual" type="NotEqual" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="I64">
<dim>-1</dim>
<dim>-1</dim>
</port>
<port id="1" precision="I64">
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="BOOL" names="52">
<dim>-1</dim>
<dim>-1</dim>
</port>
</output>
</layer>
<layer id="36" name="__module.roberta.embeddings/aten::to/Convert" type="Convert" version="opset1">
<data destination_type="i32" />
<input>
<port id="0" precision="BOOL">
<dim>-1</dim>
<dim>-1</dim>
</port>
</input>
<output>
<port id="1" precision="I32" names="53,mask">
<dim>-1</dim>
<dim>-1</dim>
</port>
</output>
</layer>
<layer id="37" name="__module.roberta.embeddings/aten::cumsum/CumSum" type="CumSum" version="opset3">
<data exclusive="false" reverse="false" />
<input>
<port id="0" precision="I32">
<dim>-1</dim>
<dim>-1</dim>
</port>
<port id="1" precision="I64" />
</input>
<output>
<port id="2" precision="I32" names="54,55,56">
<dim>-1</dim>
<dim>-1</dim>
</port>
</output>
</layer>
<layer id="38" name="__module.roberta.embeddings/aten::mul/Multiply" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="I32">
<dim>-1</dim>
<dim>-1</dim>
</port>
<port id="1" precision="I32">
<dim>-1</dim>
<dim>-1</dim>
</port>
</input>
<output>
<port id="2" precision="I32" names="57,incremental_indices">
<dim>-1</dim>
<dim>-1</dim>
</port>
</output>
</layer>
<layer id="39" name="__module.roberta.embeddings/aten::to/Convert_1" type="Convert" version="opset1">
<data destination_type="i64" />
<input>
<port id="0" precision="I32">
<dim>-1</dim>
<dim>-1</dim>
</port>
</input>
<output>
<port id="1" precision="I64" names="58">
<dim>-1</dim>
<dim>-1</dim>
</port>
</output>
</layer>
<layer id="40" name="Constant_7504" type="Const" version="opset1">
<data element_type="i64" shape="1, 1" offset="154421284" size="8" />
<output>
<port id="0" precision="I64">
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="41" name="__module.roberta.embeddings/aten::add/Add_2" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="I64">
<dim>-1</dim>
<dim>-1</dim>
</port>
<port id="1" precision="I64">
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="I64" names="59">
<dim>-1</dim>
<dim>-1</dim>
</port>
</output>
</layer>
<layer id="42" name="__module.roberta.embeddings.position_embeddings/aten::embedding/Convert" type="Convert" version="opset1">
<data destination_type="i32" />
<input>
<port id="0" precision="I64">
<dim>-1</dim>
<dim>-1</dim>
</port>
</input>
<output>
<port id="1" precision="I32">
<dim>-1</dim>
<dim>-1</dim>
</port>
</output>
</layer>
<layer id="43" name="__module.roberta.embeddings.position_embeddings/aten::embedding/Constant" type="Const" version="opset1">
<data element_type="i32" shape="" offset="154414080" size="4" />
<output>
<port id="0" precision="I32" />
</output>
</layer>
<layer id="44" name="__module.roberta.embeddings.position_embeddings/aten::embedding/Gather" type="Gather" version="opset8">
<data batch_dims="0" />
<input>
<port id="0" precision="FP32">
<dim>514</dim>
<dim>768</dim>
</port>
<port id="1" precision="I32">
<dim>-1</dim>
<dim>-1</dim>
</port>
<port id="2" precision="I32" />
</input>
<output>
<port id="3" precision="FP32" names="66,position_embeddings.1">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="45" name="__module.roberta.embeddings/aten::add_/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="64,embeddings.1">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="46" name="__module.roberta.embeddings.LayerNorm/aten::layer_norm/Constant" type="Const" version="opset1">
<data element_type="i32" shape="1" offset="156000332" size="4" />
<output>
<port id="0" precision="I32">
<dim>1</dim>
</port>
</output>
</layer>
<layer id="47" name="__module.roberta.embeddings.LayerNorm/aten::layer_norm/MVN" type="MVN" version="opset6">
<data eps="9.9999997473787516e-06" normalize_variance="true" eps_mode="INSIDE_SQRT" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="I32">
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="48" name="Constant_7505" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 768" offset="156000336" size="3072" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="49" name="__module.roberta.embeddings.LayerNorm/aten::layer_norm/Multiply" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="50" name="Constant_7506" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 768" offset="156003408" size="3072" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="51" name="__module.roberta.embeddings.LayerNorm/aten::layer_norm/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="71,input.1">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="52" name="self.roberta.encoder.layer.0.attention.self.query.weight" type="Const" version="opset1">
<data element_type="f32" shape="768, 768" offset="156006480" size="2359296" />
<output>
<port id="0" precision="FP32" names="self.roberta.encoder.layer.0.attention.self.query.weight">
<dim>768</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="53" name="__module.roberta.encoder.layer.0.attention.self.query/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="FP32">
<dim>768</dim>
<dim>768</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="54" name="Constant_7507" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 768" offset="158365776" size="3072" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="55" name="__module.roberta.encoder.layer.0.attention.self.query/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="107,x.9">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="56" name="__module.roberta.encoder.layer.0.attention.self/prim::ListConstruct/Concat" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="158368848" size="32" />
<output>
<port id="0" precision="I64">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="57" name="__module.roberta.encoder.layer.0.attention.self/aten::view/Reshape" type="Reshape" version="opset1">
<data special_zero="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="129,x.11">
<dim>-1</dim>
<dim>-1</dim>
<dim>12</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="58" name="Constant_260" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="158368880" size="32" />
<output>
<port id="0" precision="I64" names="130">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="59" name="__module.roberta.encoder.layer.0.attention.self/aten::permute/Transpose" type="Transpose" version="opset1">
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>12</dim>
<dim>64</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="131">
<dim>-1</dim>
<dim>12</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="60" name="self.roberta.encoder.layer.0.attention.self.key.weight" type="Const" version="opset1">
<data element_type="f32" shape="768, 768" offset="158368912" size="2359296" />
<output>
<port id="0" precision="FP32" names="self.roberta.encoder.layer.0.attention.self.key.weight">
<dim>768</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="61" name="__module.roberta.encoder.layer.0.attention.self.key/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="FP32">
<dim>768</dim>
<dim>768</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="62" name="Constant_7508" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 768" offset="160728208" size="3072" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="63" name="__module.roberta.encoder.layer.0.attention.self.key/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="110,x.1">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="64" name="__module.roberta.encoder.layer.0.attention.self/prim::ListConstruct/Concat_1" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="158368848" size="32" />
<output>
<port id="0" precision="I64">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="65" name="__module.roberta.encoder.layer.0.attention.self/aten::view/Reshape_1" type="Reshape" version="opset1">
<data special_zero="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="114,x.3">
<dim>-1</dim>
<dim>-1</dim>
<dim>12</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="66" name="Constant_220" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="158368880" size="32" />
<output>
<port id="0" precision="I64" names="115">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="67" name="__module.roberta.encoder.layer.0.attention.self/aten::permute/Transpose_1" type="Transpose" version="opset1">
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>12</dim>
<dim>64</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="116,key_layer.1">
<dim>-1</dim>
<dim>12</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="68" name="__module.roberta.encoder.layer.0.attention.self/aten::matmul/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>12</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
<port id="1" precision="FP32">
<dim>-1</dim>
<dim>12</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="133,attention_scores.1">
<dim>-1</dim>
<dim>12</dim>
<dim>-1</dim>
<dim>-1</dim>
</port>
</output>
</layer>
<layer id="69" name="Constant_7509" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1, 1" offset="160731280" size="4" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="70" name="__module.roberta.encoder.layer.0.attention.self/aten::div/Divide" type="Divide" version="opset1">
<data auto_broadcast="numpy" m_pythondiv="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>12</dim>
<dim>-1</dim>
<dim>-1</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="134,attention_scores.3">
<dim>-1</dim>
<dim>12</dim>
<dim>-1</dim>
<dim>-1</dim>
</port>
</output>
</layer>
<layer id="71" name="Constant_7511" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1, 1" offset="160731284" size="4" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="72" name="__module.roberta/aten::unsqueeze/Unsqueeze" type="Unsqueeze" version="opset1">
<input>
<port id="0" precision="I64">
<dim>-1</dim>
<dim>-1</dim>
</port>
<port id="1" precision="I64" />
</input>
<output>
<port id="2" precision="I64" names="42">
<dim>-1</dim>
<dim>1</dim>
<dim>-1</dim>
</port>
</output>
</layer>
<layer id="73" name="26" type="Const" version="opset1">
<data element_type="i64" shape="" offset="160731288" size="8" />
<output>
<port id="0" precision="I64" names="26" />
</output>
</layer>
<layer id="74" name="__module.roberta/aten::unsqueeze/Unsqueeze_1" type="Unsqueeze" version="opset1">
<input>
<port id="0" precision="I64">
<dim>-1</dim>
<dim>1</dim>
<dim>-1</dim>
</port>
<port id="1" precision="I64" />
</input>
<output>
<port id="2" precision="I64" names="43,44,extended_attention_mask">
<dim>-1</dim>
<dim>1</dim>
<dim>1</dim>
<dim>-1</dim>
</port>
</output>
</layer>
<layer id="75" name="__module.roberta/aten::to/Convert" type="Convert" version="opset1">
<data destination_type="f32" />
<input>
<port id="0" precision="I64">
<dim>-1</dim>
<dim>1</dim>
<dim>1</dim>
<dim>-1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32" names="45">
<dim>-1</dim>
<dim>1</dim>
<dim>1</dim>
<dim>-1</dim>
</port>
</output>
</layer>
<layer id="76" name="Constant_7510" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1, 1" offset="160731284" size="4" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="77" name="__module.roberta/aten::rsub/Multiply" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>1</dim>
<dim>1</dim>
<dim>-1</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>1</dim>
<dim>1</dim>
<dim>-1</dim>
</port>
</output>
</layer>
<layer id="78" name="__module.roberta/aten::rsub/Subtract" type="Subtract" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1</dim>
<dim>1</dim>
</port>
<port id="1" precision="FP32">
<dim>-1</dim>
<dim>1</dim>
<dim>1</dim>
<dim>-1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="46">
<dim>-1</dim>
<dim>1</dim>
<dim>1</dim>
<dim>-1</dim>
</port>
</output>
</layer>
<layer id="79" name="Constant_7512" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1, 1" offset="160731296" size="4" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="80" name="__module.roberta/aten::mul/Multiply" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>1</dim>
<dim>1</dim>
<dim>-1</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="47,attention_mask">
<dim>-1</dim>
<dim>1</dim>
<dim>1</dim>
<dim>-1</dim>
</port>
</output>
</layer>
<layer id="81" name="__module.roberta.encoder.layer.0.attention.self/aten::add/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>12</dim>
<dim>-1</dim>
<dim>-1</dim>
</port>
<port id="1" precision="FP32">
<dim>-1</dim>
<dim>1</dim>
<dim>1</dim>
<dim>-1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="135,input.3">
<dim>-1</dim>
<dim>12</dim>
<dim>-1</dim>
<dim>-1</dim>
</port>
</output>
</layer>
<layer id="82" name="__module.roberta.encoder.layer.0.attention.self/aten::softmax/Softmax" type="SoftMax" version="opset8">
<data axis="-1" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>12</dim>
<dim>-1</dim>
<dim>-1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32" names="136,input.5">
<dim>-1</dim>
<dim>12</dim>
<dim>-1</dim>
<dim>-1</dim>
</port>
</output>
</layer>
<layer id="83" name="self.roberta.encoder.layer.0.attention.self.value.weight" type="Const" version="opset1">
<data element_type="f32" shape="768, 768" offset="160731300" size="2359296" />
<output>
<port id="0" precision="FP32" names="self.roberta.encoder.layer.0.attention.self.value.weight">
<dim>768</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="84" name="__module.roberta.encoder.layer.0.attention.self.value/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="FP32">
<dim>768</dim>
<dim>768</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="85" name="Constant_7513" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 768" offset="163090596" size="3072" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="86" name="__module.roberta.encoder.layer.0.attention.self.value/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="119,x.5">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="87" name="__module.roberta.encoder.layer.0.attention.self/prim::ListConstruct/Concat_2" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="158368848" size="32" />
<output>
<port id="0" precision="I64">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="88" name="__module.roberta.encoder.layer.0.attention.self/aten::view/Reshape_2" type="Reshape" version="opset1">
<data special_zero="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="123,x.7">
<dim>-1</dim>
<dim>-1</dim>
<dim>12</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="89" name="Constant_243" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="158368880" size="32" />
<output>
<port id="0" precision="I64" names="124">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="90" name="__module.roberta.encoder.layer.0.attention.self/aten::permute/Transpose_2" type="Transpose" version="opset1">
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>12</dim>
<dim>64</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="125">
<dim>-1</dim>
<dim>12</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="91" name="__module.roberta.encoder.layer.0.attention.self/aten::matmul/MatMul_1" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="false" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>12</dim>
<dim>-1</dim>
<dim>-1</dim>
</port>
<port id="1" precision="FP32">
<dim>-1</dim>
<dim>12</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="138,context_layer.1">
<dim>-1</dim>
<dim>12</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="92" name="Constant_317" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="158368880" size="32" />
<output>
<port id="0" precision="I64" names="139">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="93" name="__module.roberta.encoder.layer.0.attention.self/aten::permute/Transpose_3" type="Transpose" version="opset1">
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>12</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="140">
<dim>-1</dim>
<dim>-1</dim>
<dim>12</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="94" name="__module.roberta.encoder.layer.0.attention.self/prim::ListConstruct/Concat_3" type="Const" version="opset1">
<data element_type="i64" shape="3" offset="163093668" size="24" />
<output>
<port id="0" precision="I64">
<dim>3</dim>
</port>
</output>
</layer>
<layer id="95" name="__module.roberta.encoder.layer.0.attention.self/aten::view/Reshape_3" type="Reshape" version="opset1">
<data special_zero="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>12</dim>
<dim>64</dim>
</port>
<port id="1" precision="I64">
<dim>3</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="145">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="96" name="self.roberta.encoder.layer.0.attention.output.dense.weight" type="Const" version="opset1">
<data element_type="f32" shape="768, 768" offset="163093692" size="2359296" />
<output>
<port id="0" precision="FP32" names="self.roberta.encoder.layer.0.attention.output.dense.weight">
<dim>768</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="97" name="__module.roberta.encoder.layer.0.attention.output.dense/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="FP32">
<dim>768</dim>
<dim>768</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="98" name="Constant_7514" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 768" offset="165452988" size="3072" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="99" name="__module.roberta.encoder.layer.0.attention.output.dense/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="150,input.7">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="100" name="__module.roberta.encoder.layer.0.attention.output/aten::add/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="152">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="101" name="__module.roberta.encoder.layer.0.attention.output.LayerNorm/aten::layer_norm/Constant" type="Const" version="opset1">
<data element_type="i32" shape="1" offset="156000332" size="4" />
<output>
<port id="0" precision="I32">
<dim>1</dim>
</port>
</output>
</layer>
<layer id="102" name="__module.roberta.encoder.layer.0.attention.output.LayerNorm/aten::layer_norm/MVN" type="MVN" version="opset6">
<data eps="9.9999997473787516e-06" normalize_variance="true" eps_mode="INSIDE_SQRT" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="I32">
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="103" name="Constant_7515" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 768" offset="165456060" size="3072" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="104" name="__module.roberta.encoder.layer.0.attention.output.LayerNorm/aten::layer_norm/Multiply" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="105" name="Constant_7516" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 768" offset="165459132" size="3072" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="106" name="__module.roberta.encoder.layer.0.attention.output.LayerNorm/aten::layer_norm/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="156,input_tensor.3">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="107" name="self.roberta.encoder.layer.0.intermediate.dense.weight" type="Const" version="opset1">
<data element_type="f32" shape="3072, 768" offset="165462204" size="9437184" />
<output>
<port id="0" precision="FP32" names="self.roberta.encoder.layer.0.intermediate.dense.weight">
<dim>3072</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="108" name="__module.roberta.encoder.layer.0.intermediate.dense/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="FP32">
<dim>3072</dim>
<dim>768</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>3072</dim>
</port>
</output>
</layer>
<layer id="109" name="Constant_7517" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 3072" offset="174899388" size="12288" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>3072</dim>
</port>
</output>
</layer>
<layer id="110" name="__module.roberta.encoder.layer.0.intermediate.dense/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>3072</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>3072</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="160">
<dim>-1</dim>
<dim>-1</dim>
<dim>3072</dim>
</port>
</output>
</layer>
<layer id="111" name="__module.roberta.encoder.layer.0.intermediate.intermediate_act_fn/aten::gelu/Gelu" type="Gelu" version="opset7">
<data approximation_mode="ERF" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>3072</dim>
</port>
</input>
<output>
<port id="1" precision="FP32" names="161">
<dim>-1</dim>
<dim>-1</dim>
<dim>3072</dim>
</port>
</output>
</layer>
<layer id="112" name="self.roberta.encoder.layer.0.output.dense.weight" type="Const" version="opset1">
<data element_type="f32" shape="768, 3072" offset="174911676" size="9437184" />
<output>
<port id="0" precision="FP32" names="self.roberta.encoder.layer.0.output.dense.weight">
<dim>768</dim>
<dim>3072</dim>
</port>
</output>
</layer>
<layer id="113" name="__module.roberta.encoder.layer.0.output.dense/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>3072</dim>
</port>
<port id="1" precision="FP32">
<dim>768</dim>
<dim>3072</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="114" name="Constant_7518" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 768" offset="184348860" size="3072" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="115" name="__module.roberta.encoder.layer.0.output.dense/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="166,input.9">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="116" name="__module.roberta.encoder.layer.0.output/aten::add/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="168">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="117" name="__module.roberta.encoder.layer.0.output.LayerNorm/aten::layer_norm/Constant" type="Const" version="opset1">
<data element_type="i32" shape="1" offset="156000332" size="4" />
<output>
<port id="0" precision="I32">
<dim>1</dim>
</port>
</output>
</layer>
<layer id="118" name="__module.roberta.encoder.layer.0.output.LayerNorm/aten::layer_norm/MVN" type="MVN" version="opset6">
<data eps="9.9999997473787516e-06" normalize_variance="true" eps_mode="INSIDE_SQRT" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="I32">
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="119" name="Constant_7519" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 768" offset="184351932" size="3072" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="120" name="__module.roberta.encoder.layer.0.output.LayerNorm/aten::layer_norm/Multiply" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="121" name="Constant_7520" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 768" offset="184355004" size="3072" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="122" name="__module.roberta.encoder.layer.0.output.LayerNorm/aten::layer_norm/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="172,input_tensor.5">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="123" name="self.roberta.encoder.layer.1.attention.self.query.weight" type="Const" version="opset1">
<data element_type="f32" shape="768, 768" offset="184358076" size="2359296" />
<output>
<port id="0" precision="FP32" names="self.roberta.encoder.layer.1.attention.self.query.weight">
<dim>768</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="124" name="__module.roberta.encoder.layer.1.attention.self.query/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="FP32">
<dim>768</dim>
<dim>768</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="125" name="Constant_7521" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 768" offset="186717372" size="3072" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="126" name="__module.roberta.encoder.layer.1.attention.self.query/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="183,x.21">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="127" name="__module.roberta.encoder.layer.1.attention.self/prim::ListConstruct/Concat" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="158368848" size="32" />
<output>
<port id="0" precision="I64">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="128" name="__module.roberta.encoder.layer.1.attention.self/aten::view/Reshape" type="Reshape" version="opset1">
<data special_zero="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="205,x.23">
<dim>-1</dim>
<dim>-1</dim>
<dim>12</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="129" name="Constant_465" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="158368880" size="32" />
<output>
<port id="0" precision="I64" names="206">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="130" name="__module.roberta.encoder.layer.1.attention.self/aten::permute/Transpose" type="Transpose" version="opset1">
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>12</dim>
<dim>64</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="207">
<dim>-1</dim>
<dim>12</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="131" name="self.roberta.encoder.layer.1.attention.self.key.weight" type="Const" version="opset1">
<data element_type="f32" shape="768, 768" offset="186720444" size="2359296" />
<output>
<port id="0" precision="FP32" names="self.roberta.encoder.layer.1.attention.self.key.weight">
<dim>768</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="132" name="__module.roberta.encoder.layer.1.attention.self.key/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="FP32">
<dim>768</dim>
<dim>768</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="133" name="Constant_7522" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 768" offset="189079740" size="3072" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="134" name="__module.roberta.encoder.layer.1.attention.self.key/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="186,x.13">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="135" name="__module.roberta.encoder.layer.1.attention.self/prim::ListConstruct/Concat_1" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="158368848" size="32" />
<output>
<port id="0" precision="I64">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="136" name="__module.roberta.encoder.layer.1.attention.self/aten::view/Reshape_1" type="Reshape" version="opset1">
<data special_zero="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="190,x.15">
<dim>-1</dim>
<dim>-1</dim>
<dim>12</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="137" name="Constant_425" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="158368880" size="32" />
<output>
<port id="0" precision="I64" names="191">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="138" name="__module.roberta.encoder.layer.1.attention.self/aten::permute/Transpose_1" type="Transpose" version="opset1">
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>12</dim>
<dim>64</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="192,key_layer.3">
<dim>-1</dim>
<dim>12</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="139" name="__module.roberta.encoder.layer.1.attention.self/aten::matmul/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>12</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
<port id="1" precision="FP32">
<dim>-1</dim>
<dim>12</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="209,attention_scores.5">
<dim>-1</dim>
<dim>12</dim>
<dim>-1</dim>
<dim>-1</dim>
</port>
</output>
</layer>
<layer id="140" name="Constant_7523" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1, 1" offset="160731280" size="4" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="141" name="__module.roberta.encoder.layer.1.attention.self/aten::div/Divide" type="Divide" version="opset1">
<data auto_broadcast="numpy" m_pythondiv="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>12</dim>
<dim>-1</dim>
<dim>-1</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="210,attention_scores.7">
<dim>-1</dim>
<dim>12</dim>
<dim>-1</dim>
<dim>-1</dim>
</port>
</output>
</layer>
<layer id="142" name="__module.roberta.encoder.layer.1.attention.self/aten::add/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>12</dim>
<dim>-1</dim>
<dim>-1</dim>
</port>
<port id="1" precision="FP32">
<dim>-1</dim>
<dim>1</dim>
<dim>1</dim>
<dim>-1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="211,input.11">
<dim>-1</dim>
<dim>12</dim>
<dim>-1</dim>
<dim>-1</dim>
</port>
</output>
</layer>
<layer id="143" name="__module.roberta.encoder.layer.1.attention.self/aten::softmax/Softmax" type="SoftMax" version="opset8">
<data axis="-1" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>12</dim>
<dim>-1</dim>
<dim>-1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32" names="212,input.13">
<dim>-1</dim>
<dim>12</dim>
<dim>-1</dim>
<dim>-1</dim>
</port>
</output>
</layer>
<layer id="144" name="self.roberta.encoder.layer.1.attention.self.value.weight" type="Const" version="opset1">
<data element_type="f32" shape="768, 768" offset="189082812" size="2359296" />
<output>
<port id="0" precision="FP32" names="self.roberta.encoder.layer.1.attention.self.value.weight">
<dim>768</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="145" name="__module.roberta.encoder.layer.1.attention.self.value/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="FP32">
<dim>768</dim>
<dim>768</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="146" name="Constant_7524" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 768" offset="191442108" size="3072" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="147" name="__module.roberta.encoder.layer.1.attention.self.value/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="195,x.17">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="148" name="__module.roberta.encoder.layer.1.attention.self/prim::ListConstruct/Concat_2" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="158368848" size="32" />
<output>
<port id="0" precision="I64">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="149" name="__module.roberta.encoder.layer.1.attention.self/aten::view/Reshape_2" type="Reshape" version="opset1">
<data special_zero="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="199,x.19">
<dim>-1</dim>
<dim>-1</dim>
<dim>12</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="150" name="Constant_448" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="158368880" size="32" />
<output>
<port id="0" precision="I64" names="200">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="151" name="__module.roberta.encoder.layer.1.attention.self/aten::permute/Transpose_2" type="Transpose" version="opset1">
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>12</dim>
<dim>64</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="201">
<dim>-1</dim>
<dim>12</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="152" name="__module.roberta.encoder.layer.1.attention.self/aten::matmul/MatMul_1" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="false" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>12</dim>
<dim>-1</dim>
<dim>-1</dim>
</port>
<port id="1" precision="FP32">
<dim>-1</dim>
<dim>12</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="214,context_layer.5">
<dim>-1</dim>
<dim>12</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="153" name="Constant_522" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="158368880" size="32" />
<output>
<port id="0" precision="I64" names="215">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="154" name="__module.roberta.encoder.layer.1.attention.self/aten::permute/Transpose_3" type="Transpose" version="opset1">
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>12</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="216">
<dim>-1</dim>
<dim>-1</dim>
<dim>12</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="155" name="__module.roberta.encoder.layer.1.attention.self/prim::ListConstruct/Concat_3" type="Const" version="opset1">
<data element_type="i64" shape="3" offset="163093668" size="24" />
<output>
<port id="0" precision="I64">
<dim>3</dim>
</port>
</output>
</layer>
<layer id="156" name="__module.roberta.encoder.layer.1.attention.self/aten::view/Reshape_3" type="Reshape" version="opset1">
<data special_zero="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>12</dim>
<dim>64</dim>
</port>
<port id="1" precision="I64">
<dim>3</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="221">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="157" name="self.roberta.encoder.layer.1.attention.output.dense.weight" type="Const" version="opset1">
<data element_type="f32" shape="768, 768" offset="191445180" size="2359296" />
<output>
<port id="0" precision="FP32" names="self.roberta.encoder.layer.1.attention.output.dense.weight">
<dim>768</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="158" name="__module.roberta.encoder.layer.1.attention.output.dense/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="FP32">
<dim>768</dim>
<dim>768</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="159" name="Constant_7525" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 768" offset="193804476" size="3072" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="160" name="__module.roberta.encoder.layer.1.attention.output.dense/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="226,input.15">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="161" name="__module.roberta.encoder.layer.1.attention.output/aten::add/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="228">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="162" name="__module.roberta.encoder.layer.1.attention.output.LayerNorm/aten::layer_norm/Constant" type="Const" version="opset1">
<data element_type="i32" shape="1" offset="156000332" size="4" />
<output>
<port id="0" precision="I32">
<dim>1</dim>
</port>
</output>
</layer>
<layer id="163" name="__module.roberta.encoder.layer.1.attention.output.LayerNorm/aten::layer_norm/MVN" type="MVN" version="opset6">
<data eps="9.9999997473787516e-06" normalize_variance="true" eps_mode="INSIDE_SQRT" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="I32">
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="164" name="Constant_7526" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 768" offset="193807548" size="3072" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="165" name="__module.roberta.encoder.layer.1.attention.output.LayerNorm/aten::layer_norm/Multiply" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="166" name="Constant_7527" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 768" offset="193810620" size="3072" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="167" name="__module.roberta.encoder.layer.1.attention.output.LayerNorm/aten::layer_norm/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="232,input_tensor.7">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="168" name="self.roberta.encoder.layer.1.intermediate.dense.weight" type="Const" version="opset1">
<data element_type="f32" shape="3072, 768" offset="193813692" size="9437184" />
<output>
<port id="0" precision="FP32" names="self.roberta.encoder.layer.1.intermediate.dense.weight">
<dim>3072</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="169" name="__module.roberta.encoder.layer.1.intermediate.dense/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="FP32">
<dim>3072</dim>
<dim>768</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>3072</dim>
</port>
</output>
</layer>
<layer id="170" name="Constant_7528" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 3072" offset="203250876" size="12288" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>3072</dim>
</port>
</output>
</layer>
<layer id="171" name="__module.roberta.encoder.layer.1.intermediate.dense/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>3072</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>3072</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="236">
<dim>-1</dim>
<dim>-1</dim>
<dim>3072</dim>
</port>
</output>
</layer>
<layer id="172" name="__module.roberta.encoder.layer.1.intermediate.intermediate_act_fn/aten::gelu/Gelu" type="Gelu" version="opset7">
<data approximation_mode="ERF" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>3072</dim>
</port>
</input>
<output>
<port id="1" precision="FP32" names="237">
<dim>-1</dim>
<dim>-1</dim>
<dim>3072</dim>
</port>
</output>
</layer>
<layer id="173" name="self.roberta.encoder.layer.1.output.dense.weight" type="Const" version="opset1">
<data element_type="f32" shape="768, 3072" offset="203263164" size="9437184" />
<output>
<port id="0" precision="FP32" names="self.roberta.encoder.layer.1.output.dense.weight">
<dim>768</dim>
<dim>3072</dim>
</port>
</output>
</layer>
<layer id="174" name="__module.roberta.encoder.layer.1.output.dense/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>3072</dim>
</port>
<port id="1" precision="FP32">
<dim>768</dim>
<dim>3072</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="175" name="Constant_7529" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 768" offset="212700348" size="3072" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="176" name="__module.roberta.encoder.layer.1.output.dense/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="242,input.17">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="177" name="__module.roberta.encoder.layer.1.output/aten::add/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="244">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="178" name="__module.roberta.encoder.layer.1.output.LayerNorm/aten::layer_norm/Constant" type="Const" version="opset1">
<data element_type="i32" shape="1" offset="156000332" size="4" />
<output>
<port id="0" precision="I32">
<dim>1</dim>
</port>
</output>
</layer>
<layer id="179" name="__module.roberta.encoder.layer.1.output.LayerNorm/aten::layer_norm/MVN" type="MVN" version="opset6">
<data eps="9.9999997473787516e-06" normalize_variance="true" eps_mode="INSIDE_SQRT" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="I32">
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="180" name="Constant_7530" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 768" offset="212703420" size="3072" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="181" name="__module.roberta.encoder.layer.1.output.LayerNorm/aten::layer_norm/Multiply" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="182" name="Constant_7531" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 768" offset="212706492" size="3072" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="183" name="__module.roberta.encoder.layer.1.output.LayerNorm/aten::layer_norm/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="248,input_tensor.9">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="184" name="self.roberta.encoder.layer.2.attention.self.query.weight" type="Const" version="opset1">
<data element_type="f32" shape="768, 768" offset="212709564" size="2359296" />
<output>
<port id="0" precision="FP32" names="self.roberta.encoder.layer.2.attention.self.query.weight">
<dim>768</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="185" name="__module.roberta.encoder.layer.2.attention.self.query/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="FP32">
<dim>768</dim>
<dim>768</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="186" name="Constant_7532" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 768" offset="215068860" size="3072" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="187" name="__module.roberta.encoder.layer.2.attention.self.query/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="259,x.33">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="188" name="__module.roberta.encoder.layer.2.attention.self/prim::ListConstruct/Concat" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="158368848" size="32" />
<output>
<port id="0" precision="I64">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="189" name="__module.roberta.encoder.layer.2.attention.self/aten::view/Reshape" type="Reshape" version="opset1">
<data special_zero="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="281,x.35">
<dim>-1</dim>
<dim>-1</dim>
<dim>12</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="190" name="Constant_670" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="158368880" size="32" />
<output>
<port id="0" precision="I64" names="282">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="191" name="__module.roberta.encoder.layer.2.attention.self/aten::permute/Transpose" type="Transpose" version="opset1">
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>12</dim>
<dim>64</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="283">
<dim>-1</dim>
<dim>12</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="192" name="self.roberta.encoder.layer.2.attention.self.key.weight" type="Const" version="opset1">
<data element_type="f32" shape="768, 768" offset="215071932" size="2359296" />
<output>
<port id="0" precision="FP32" names="self.roberta.encoder.layer.2.attention.self.key.weight">
<dim>768</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="193" name="__module.roberta.encoder.layer.2.attention.self.key/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="FP32">
<dim>768</dim>
<dim>768</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="194" name="Constant_7533" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 768" offset="217431228" size="3072" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="195" name="__module.roberta.encoder.layer.2.attention.self.key/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="262,x.25">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="196" name="__module.roberta.encoder.layer.2.attention.self/prim::ListConstruct/Concat_1" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="158368848" size="32" />
<output>
<port id="0" precision="I64">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="197" name="__module.roberta.encoder.layer.2.attention.self/aten::view/Reshape_1" type="Reshape" version="opset1">
<data special_zero="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="266,x.27">
<dim>-1</dim>
<dim>-1</dim>
<dim>12</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="198" name="Constant_630" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="158368880" size="32" />
<output>
<port id="0" precision="I64" names="267">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="199" name="__module.roberta.encoder.layer.2.attention.self/aten::permute/Transpose_1" type="Transpose" version="opset1">
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>12</dim>
<dim>64</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="268,key_layer.5">
<dim>-1</dim>
<dim>12</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="200" name="__module.roberta.encoder.layer.2.attention.self/aten::matmul/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>12</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
<port id="1" precision="FP32">
<dim>-1</dim>
<dim>12</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="285,attention_scores.9">
<dim>-1</dim>
<dim>12</dim>
<dim>-1</dim>
<dim>-1</dim>
</port>
</output>
</layer>
<layer id="201" name="Constant_7534" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1, 1" offset="160731280" size="4" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="202" name="__module.roberta.encoder.layer.2.attention.self/aten::div/Divide" type="Divide" version="opset1">
<data auto_broadcast="numpy" m_pythondiv="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>12</dim>
<dim>-1</dim>
<dim>-1</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="286,attention_scores.11">
<dim>-1</dim>
<dim>12</dim>
<dim>-1</dim>
<dim>-1</dim>
</port>
</output>
</layer>
<layer id="203" name="__module.roberta.encoder.layer.2.attention.self/aten::add/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>12</dim>
<dim>-1</dim>
<dim>-1</dim>
</port>
<port id="1" precision="FP32">
<dim>-1</dim>
<dim>1</dim>
<dim>1</dim>
<dim>-1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="287,input.19">
<dim>-1</dim>
<dim>12</dim>
<dim>-1</dim>
<dim>-1</dim>
</port>
</output>
</layer>
<layer id="204" name="__module.roberta.encoder.layer.2.attention.self/aten::softmax/Softmax" type="SoftMax" version="opset8">
<data axis="-1" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>12</dim>
<dim>-1</dim>
<dim>-1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32" names="288,input.21">
<dim>-1</dim>
<dim>12</dim>
<dim>-1</dim>
<dim>-1</dim>
</port>
</output>
</layer>
<layer id="205" name="self.roberta.encoder.layer.2.attention.self.value.weight" type="Const" version="opset1">
<data element_type="f32" shape="768, 768" offset="217434300" size="2359296" />
<output>
<port id="0" precision="FP32" names="self.roberta.encoder.layer.2.attention.self.value.weight">
<dim>768</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="206" name="__module.roberta.encoder.layer.2.attention.self.value/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="FP32">
<dim>768</dim>
<dim>768</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="207" name="Constant_7535" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 768" offset="219793596" size="3072" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="208" name="__module.roberta.encoder.layer.2.attention.self.value/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="271,x.29">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="209" name="__module.roberta.encoder.layer.2.attention.self/prim::ListConstruct/Concat_2" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="158368848" size="32" />
<output>
<port id="0" precision="I64">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="210" name="__module.roberta.encoder.layer.2.attention.self/aten::view/Reshape_2" type="Reshape" version="opset1">
<data special_zero="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="275,x.31">
<dim>-1</dim>
<dim>-1</dim>
<dim>12</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="211" name="Constant_653" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="158368880" size="32" />
<output>
<port id="0" precision="I64" names="276">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="212" name="__module.roberta.encoder.layer.2.attention.self/aten::permute/Transpose_2" type="Transpose" version="opset1">
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>12</dim>
<dim>64</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="277">
<dim>-1</dim>
<dim>12</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="213" name="__module.roberta.encoder.layer.2.attention.self/aten::matmul/MatMul_1" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="false" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>12</dim>
<dim>-1</dim>
<dim>-1</dim>
</port>
<port id="1" precision="FP32">
<dim>-1</dim>
<dim>12</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="290,context_layer.9">
<dim>-1</dim>
<dim>12</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="214" name="Constant_727" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="158368880" size="32" />
<output>
<port id="0" precision="I64" names="291">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="215" name="__module.roberta.encoder.layer.2.attention.self/aten::permute/Transpose_3" type="Transpose" version="opset1">
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>12</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="292">
<dim>-1</dim>
<dim>-1</dim>
<dim>12</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="216" name="__module.roberta.encoder.layer.2.attention.self/prim::ListConstruct/Concat_3" type="Const" version="opset1">
<data element_type="i64" shape="3" offset="163093668" size="24" />
<output>
<port id="0" precision="I64">
<dim>3</dim>
</port>
</output>
</layer>
<layer id="217" name="__module.roberta.encoder.layer.2.attention.self/aten::view/Reshape_3" type="Reshape" version="opset1">
<data special_zero="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>12</dim>
<dim>64</dim>
</port>
<port id="1" precision="I64">
<dim>3</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="297">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="218" name="self.roberta.encoder.layer.2.attention.output.dense.weight" type="Const" version="opset1">
<data element_type="f32" shape="768, 768" offset="219796668" size="2359296" />
<output>
<port id="0" precision="FP32" names="self.roberta.encoder.layer.2.attention.output.dense.weight">
<dim>768</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="219" name="__module.roberta.encoder.layer.2.attention.output.dense/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="FP32">
<dim>768</dim>
<dim>768</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="220" name="Constant_7536" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 768" offset="222155964" size="3072" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="221" name="__module.roberta.encoder.layer.2.attention.output.dense/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="302,input.23">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="222" name="__module.roberta.encoder.layer.2.attention.output/aten::add/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="304">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="223" name="__module.roberta.encoder.layer.2.attention.output.LayerNorm/aten::layer_norm/Constant" type="Const" version="opset1">
<data element_type="i32" shape="1" offset="156000332" size="4" />
<output>
<port id="0" precision="I32">
<dim>1</dim>
</port>
</output>
</layer>
<layer id="224" name="__module.roberta.encoder.layer.2.attention.output.LayerNorm/aten::layer_norm/MVN" type="MVN" version="opset6">
<data eps="9.9999997473787516e-06" normalize_variance="true" eps_mode="INSIDE_SQRT" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="I32">
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="225" name="Constant_7537" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 768" offset="222159036" size="3072" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="226" name="__module.roberta.encoder.layer.2.attention.output.LayerNorm/aten::layer_norm/Multiply" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="227" name="Constant_7538" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 768" offset="222162108" size="3072" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="228" name="__module.roberta.encoder.layer.2.attention.output.LayerNorm/aten::layer_norm/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="308,input_tensor.11">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="229" name="self.roberta.encoder.layer.2.intermediate.dense.weight" type="Const" version="opset1">
<data element_type="f32" shape="3072, 768" offset="222165180" size="9437184" />
<output>
<port id="0" precision="FP32" names="self.roberta.encoder.layer.2.intermediate.dense.weight">
<dim>3072</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="230" name="__module.roberta.encoder.layer.2.intermediate.dense/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="FP32">
<dim>3072</dim>
<dim>768</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>3072</dim>
</port>
</output>
</layer>
<layer id="231" name="Constant_7539" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 3072" offset="231602364" size="12288" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>3072</dim>
</port>
</output>
</layer>
<layer id="232" name="__module.roberta.encoder.layer.2.intermediate.dense/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>3072</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>3072</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="312">
<dim>-1</dim>
<dim>-1</dim>
<dim>3072</dim>
</port>
</output>
</layer>
<layer id="233" name="__module.roberta.encoder.layer.2.intermediate.intermediate_act_fn/aten::gelu/Gelu" type="Gelu" version="opset7">
<data approximation_mode="ERF" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>3072</dim>
</port>
</input>
<output>
<port id="1" precision="FP32" names="313">
<dim>-1</dim>
<dim>-1</dim>
<dim>3072</dim>
</port>
</output>
</layer>
<layer id="234" name="self.roberta.encoder.layer.2.output.dense.weight" type="Const" version="opset1">
<data element_type="f32" shape="768, 3072" offset="231614652" size="9437184" />
<output>
<port id="0" precision="FP32" names="self.roberta.encoder.layer.2.output.dense.weight">
<dim>768</dim>
<dim>3072</dim>
</port>
</output>
</layer>
<layer id="235" name="__module.roberta.encoder.layer.2.output.dense/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>3072</dim>
</port>
<port id="1" precision="FP32">
<dim>768</dim>
<dim>3072</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="236" name="Constant_7540" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 768" offset="241051836" size="3072" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="237" name="__module.roberta.encoder.layer.2.output.dense/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="318,input.25">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="238" name="__module.roberta.encoder.layer.2.output/aten::add/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="320">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="239" name="__module.roberta.encoder.layer.2.output.LayerNorm/aten::layer_norm/Constant" type="Const" version="opset1">
<data element_type="i32" shape="1" offset="156000332" size="4" />
<output>
<port id="0" precision="I32">
<dim>1</dim>
</port>
</output>
</layer>
<layer id="240" name="__module.roberta.encoder.layer.2.output.LayerNorm/aten::layer_norm/MVN" type="MVN" version="opset6">
<data eps="9.9999997473787516e-06" normalize_variance="true" eps_mode="INSIDE_SQRT" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="I32">
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="241" name="Constant_7541" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 768" offset="241054908" size="3072" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="242" name="__module.roberta.encoder.layer.2.output.LayerNorm/aten::layer_norm/Multiply" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="243" name="Constant_7542" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 768" offset="241057980" size="3072" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="244" name="__module.roberta.encoder.layer.2.output.LayerNorm/aten::layer_norm/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="324,input_tensor.13">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="245" name="self.roberta.encoder.layer.3.attention.self.query.weight" type="Const" version="opset1">
<data element_type="f32" shape="768, 768" offset="241061052" size="2359296" />
<output>
<port id="0" precision="FP32" names="self.roberta.encoder.layer.3.attention.self.query.weight">
<dim>768</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="246" name="__module.roberta.encoder.layer.3.attention.self.query/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="FP32">
<dim>768</dim>
<dim>768</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="247" name="Constant_7543" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 768" offset="243420348" size="3072" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="248" name="__module.roberta.encoder.layer.3.attention.self.query/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="335,x.45">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="249" name="__module.roberta.encoder.layer.3.attention.self/prim::ListConstruct/Concat" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="158368848" size="32" />
<output>
<port id="0" precision="I64">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="250" name="__module.roberta.encoder.layer.3.attention.self/aten::view/Reshape" type="Reshape" version="opset1">
<data special_zero="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="357,x.47">
<dim>-1</dim>
<dim>-1</dim>
<dim>12</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="251" name="Constant_875" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="158368880" size="32" />
<output>
<port id="0" precision="I64" names="358">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="252" name="__module.roberta.encoder.layer.3.attention.self/aten::permute/Transpose" type="Transpose" version="opset1">
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>12</dim>
<dim>64</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="359">
<dim>-1</dim>
<dim>12</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="253" name="self.roberta.encoder.layer.3.attention.self.key.weight" type="Const" version="opset1">
<data element_type="f32" shape="768, 768" offset="243423420" size="2359296" />
<output>
<port id="0" precision="FP32" names="self.roberta.encoder.layer.3.attention.self.key.weight">
<dim>768</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="254" name="__module.roberta.encoder.layer.3.attention.self.key/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="FP32">
<dim>768</dim>
<dim>768</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="255" name="Constant_7544" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 768" offset="245782716" size="3072" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="256" name="__module.roberta.encoder.layer.3.attention.self.key/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="338,x.37">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="257" name="__module.roberta.encoder.layer.3.attention.self/prim::ListConstruct/Concat_1" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="158368848" size="32" />
<output>
<port id="0" precision="I64">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="258" name="__module.roberta.encoder.layer.3.attention.self/aten::view/Reshape_1" type="Reshape" version="opset1">
<data special_zero="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="342,x.39">
<dim>-1</dim>
<dim>-1</dim>
<dim>12</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="259" name="Constant_835" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="158368880" size="32" />
<output>
<port id="0" precision="I64" names="343">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="260" name="__module.roberta.encoder.layer.3.attention.self/aten::permute/Transpose_1" type="Transpose" version="opset1">
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>12</dim>
<dim>64</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="344,key_layer.7">
<dim>-1</dim>
<dim>12</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="261" name="__module.roberta.encoder.layer.3.attention.self/aten::matmul/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>12</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
<port id="1" precision="FP32">
<dim>-1</dim>
<dim>12</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="361,attention_scores.13">
<dim>-1</dim>
<dim>12</dim>
<dim>-1</dim>
<dim>-1</dim>
</port>
</output>
</layer>
<layer id="262" name="Constant_7545" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1, 1" offset="160731280" size="4" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="263" name="__module.roberta.encoder.layer.3.attention.self/aten::div/Divide" type="Divide" version="opset1">
<data auto_broadcast="numpy" m_pythondiv="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>12</dim>
<dim>-1</dim>
<dim>-1</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="362,attention_scores.15">
<dim>-1</dim>
<dim>12</dim>
<dim>-1</dim>
<dim>-1</dim>
</port>
</output>
</layer>
<layer id="264" name="__module.roberta.encoder.layer.3.attention.self/aten::add/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>12</dim>
<dim>-1</dim>
<dim>-1</dim>
</port>
<port id="1" precision="FP32">
<dim>-1</dim>
<dim>1</dim>
<dim>1</dim>
<dim>-1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="363,input.27">
<dim>-1</dim>
<dim>12</dim>
<dim>-1</dim>
<dim>-1</dim>
</port>
</output>
</layer>
<layer id="265" name="__module.roberta.encoder.layer.3.attention.self/aten::softmax/Softmax" type="SoftMax" version="opset8">
<data axis="-1" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>12</dim>
<dim>-1</dim>
<dim>-1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32" names="364,input.29">
<dim>-1</dim>
<dim>12</dim>
<dim>-1</dim>
<dim>-1</dim>
</port>
</output>
</layer>
<layer id="266" name="self.roberta.encoder.layer.3.attention.self.value.weight" type="Const" version="opset1">
<data element_type="f32" shape="768, 768" offset="245785788" size="2359296" />
<output>
<port id="0" precision="FP32" names="self.roberta.encoder.layer.3.attention.self.value.weight">
<dim>768</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="267" name="__module.roberta.encoder.layer.3.attention.self.value/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="FP32">
<dim>768</dim>
<dim>768</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="268" name="Constant_7546" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 768" offset="248145084" size="3072" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="269" name="__module.roberta.encoder.layer.3.attention.self.value/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="347,x.41">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="270" name="__module.roberta.encoder.layer.3.attention.self/prim::ListConstruct/Concat_2" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="158368848" size="32" />
<output>
<port id="0" precision="I64">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="271" name="__module.roberta.encoder.layer.3.attention.self/aten::view/Reshape_2" type="Reshape" version="opset1">
<data special_zero="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="351,x.43">
<dim>-1</dim>
<dim>-1</dim>
<dim>12</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="272" name="Constant_858" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="158368880" size="32" />
<output>
<port id="0" precision="I64" names="352">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="273" name="__module.roberta.encoder.layer.3.attention.self/aten::permute/Transpose_2" type="Transpose" version="opset1">
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>12</dim>
<dim>64</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="353">
<dim>-1</dim>
<dim>12</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="274" name="__module.roberta.encoder.layer.3.attention.self/aten::matmul/MatMul_1" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="false" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>12</dim>
<dim>-1</dim>
<dim>-1</dim>
</port>
<port id="1" precision="FP32">
<dim>-1</dim>
<dim>12</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="366,context_layer.13">
<dim>-1</dim>
<dim>12</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="275" name="Constant_932" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="158368880" size="32" />
<output>
<port id="0" precision="I64" names="367">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="276" name="__module.roberta.encoder.layer.3.attention.self/aten::permute/Transpose_3" type="Transpose" version="opset1">
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>12</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="368">
<dim>-1</dim>
<dim>-1</dim>
<dim>12</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="277" name="__module.roberta.encoder.layer.3.attention.self/prim::ListConstruct/Concat_3" type="Const" version="opset1">
<data element_type="i64" shape="3" offset="163093668" size="24" />
<output>
<port id="0" precision="I64">
<dim>3</dim>
</port>
</output>
</layer>
<layer id="278" name="__module.roberta.encoder.layer.3.attention.self/aten::view/Reshape_3" type="Reshape" version="opset1">
<data special_zero="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>12</dim>
<dim>64</dim>
</port>
<port id="1" precision="I64">
<dim>3</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="373">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="279" name="self.roberta.encoder.layer.3.attention.output.dense.weight" type="Const" version="opset1">
<data element_type="f32" shape="768, 768" offset="248148156" size="2359296" />
<output>
<port id="0" precision="FP32" names="self.roberta.encoder.layer.3.attention.output.dense.weight">
<dim>768</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="280" name="__module.roberta.encoder.layer.3.attention.output.dense/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="FP32">
<dim>768</dim>
<dim>768</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="281" name="Constant_7547" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 768" offset="250507452" size="3072" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="282" name="__module.roberta.encoder.layer.3.attention.output.dense/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="378,input.31">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="283" name="__module.roberta.encoder.layer.3.attention.output/aten::add/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="380">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="284" name="__module.roberta.encoder.layer.3.attention.output.LayerNorm/aten::layer_norm/Constant" type="Const" version="opset1">
<data element_type="i32" shape="1" offset="156000332" size="4" />
<output>
<port id="0" precision="I32">
<dim>1</dim>
</port>
</output>
</layer>
<layer id="285" name="__module.roberta.encoder.layer.3.attention.output.LayerNorm/aten::layer_norm/MVN" type="MVN" version="opset6">
<data eps="9.9999997473787516e-06" normalize_variance="true" eps_mode="INSIDE_SQRT" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="I32">
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="286" name="Constant_7548" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 768" offset="250510524" size="3072" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="287" name="__module.roberta.encoder.layer.3.attention.output.LayerNorm/aten::layer_norm/Multiply" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="288" name="Constant_7549" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 768" offset="250513596" size="3072" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="289" name="__module.roberta.encoder.layer.3.attention.output.LayerNorm/aten::layer_norm/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="384,input_tensor.15">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="290" name="self.roberta.encoder.layer.3.intermediate.dense.weight" type="Const" version="opset1">
<data element_type="f32" shape="3072, 768" offset="250516668" size="9437184" />
<output>
<port id="0" precision="FP32" names="self.roberta.encoder.layer.3.intermediate.dense.weight">
<dim>3072</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="291" name="__module.roberta.encoder.layer.3.intermediate.dense/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="FP32">
<dim>3072</dim>
<dim>768</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>3072</dim>
</port>
</output>
</layer>
<layer id="292" name="Constant_7550" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 3072" offset="259953852" size="12288" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>3072</dim>
</port>
</output>
</layer>
<layer id="293" name="__module.roberta.encoder.layer.3.intermediate.dense/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>3072</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>3072</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="388">
<dim>-1</dim>
<dim>-1</dim>
<dim>3072</dim>
</port>
</output>
</layer>
<layer id="294" name="__module.roberta.encoder.layer.3.intermediate.intermediate_act_fn/aten::gelu/Gelu" type="Gelu" version="opset7">
<data approximation_mode="ERF" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>3072</dim>
</port>
</input>
<output>
<port id="1" precision="FP32" names="389">
<dim>-1</dim>
<dim>-1</dim>
<dim>3072</dim>
</port>
</output>
</layer>
<layer id="295" name="self.roberta.encoder.layer.3.output.dense.weight" type="Const" version="opset1">
<data element_type="f32" shape="768, 3072" offset="259966140" size="9437184" />
<output>
<port id="0" precision="FP32" names="self.roberta.encoder.layer.3.output.dense.weight">
<dim>768</dim>
<dim>3072</dim>
</port>
</output>
</layer>
<layer id="296" name="__module.roberta.encoder.layer.3.output.dense/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>3072</dim>
</port>
<port id="1" precision="FP32">
<dim>768</dim>
<dim>3072</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="297" name="Constant_7551" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 768" offset="269403324" size="3072" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="298" name="__module.roberta.encoder.layer.3.output.dense/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="394,input.33">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="299" name="__module.roberta.encoder.layer.3.output/aten::add/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="396">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="300" name="__module.roberta.encoder.layer.3.output.LayerNorm/aten::layer_norm/Constant" type="Const" version="opset1">
<data element_type="i32" shape="1" offset="156000332" size="4" />
<output>
<port id="0" precision="I32">
<dim>1</dim>
</port>
</output>
</layer>
<layer id="301" name="__module.roberta.encoder.layer.3.output.LayerNorm/aten::layer_norm/MVN" type="MVN" version="opset6">
<data eps="9.9999997473787516e-06" normalize_variance="true" eps_mode="INSIDE_SQRT" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="I32">
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="302" name="Constant_7552" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 768" offset="269406396" size="3072" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="303" name="__module.roberta.encoder.layer.3.output.LayerNorm/aten::layer_norm/Multiply" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="304" name="Constant_7553" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 768" offset="269409468" size="3072" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="305" name="__module.roberta.encoder.layer.3.output.LayerNorm/aten::layer_norm/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="400,input_tensor.17">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="306" name="self.roberta.encoder.layer.4.attention.self.query.weight" type="Const" version="opset1">
<data element_type="f32" shape="768, 768" offset="269412540" size="2359296" />
<output>
<port id="0" precision="FP32" names="self.roberta.encoder.layer.4.attention.self.query.weight">
<dim>768</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="307" name="__module.roberta.encoder.layer.4.attention.self.query/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="FP32">
<dim>768</dim>
<dim>768</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="308" name="Constant_7554" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 768" offset="271771836" size="3072" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="309" name="__module.roberta.encoder.layer.4.attention.self.query/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="411,x.57">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="310" name="__module.roberta.encoder.layer.4.attention.self/prim::ListConstruct/Concat" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="158368848" size="32" />
<output>
<port id="0" precision="I64">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="311" name="__module.roberta.encoder.layer.4.attention.self/aten::view/Reshape" type="Reshape" version="opset1">
<data special_zero="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="433,x.59">
<dim>-1</dim>
<dim>-1</dim>
<dim>12</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="312" name="Constant_1080" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="158368880" size="32" />
<output>
<port id="0" precision="I64" names="434">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="313" name="__module.roberta.encoder.layer.4.attention.self/aten::permute/Transpose" type="Transpose" version="opset1">
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>12</dim>
<dim>64</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="435">
<dim>-1</dim>
<dim>12</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="314" name="self.roberta.encoder.layer.4.attention.self.key.weight" type="Const" version="opset1">
<data element_type="f32" shape="768, 768" offset="271774908" size="2359296" />
<output>
<port id="0" precision="FP32" names="self.roberta.encoder.layer.4.attention.self.key.weight">
<dim>768</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="315" name="__module.roberta.encoder.layer.4.attention.self.key/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="FP32">
<dim>768</dim>
<dim>768</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="316" name="Constant_7555" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 768" offset="274134204" size="3072" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="317" name="__module.roberta.encoder.layer.4.attention.self.key/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="414,x.49">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="318" name="__module.roberta.encoder.layer.4.attention.self/prim::ListConstruct/Concat_1" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="158368848" size="32" />
<output>
<port id="0" precision="I64">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="319" name="__module.roberta.encoder.layer.4.attention.self/aten::view/Reshape_1" type="Reshape" version="opset1">
<data special_zero="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="418,x.51">
<dim>-1</dim>
<dim>-1</dim>
<dim>12</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="320" name="Constant_1040" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="158368880" size="32" />
<output>
<port id="0" precision="I64" names="419">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="321" name="__module.roberta.encoder.layer.4.attention.self/aten::permute/Transpose_1" type="Transpose" version="opset1">
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>12</dim>
<dim>64</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="420,key_layer.9">
<dim>-1</dim>
<dim>12</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="322" name="__module.roberta.encoder.layer.4.attention.self/aten::matmul/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>12</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
<port id="1" precision="FP32">
<dim>-1</dim>
<dim>12</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="437,attention_scores.17">
<dim>-1</dim>
<dim>12</dim>
<dim>-1</dim>
<dim>-1</dim>
</port>
</output>
</layer>
<layer id="323" name="Constant_7556" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1, 1" offset="160731280" size="4" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="324" name="__module.roberta.encoder.layer.4.attention.self/aten::div/Divide" type="Divide" version="opset1">
<data auto_broadcast="numpy" m_pythondiv="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>12</dim>
<dim>-1</dim>
<dim>-1</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="438,attention_scores.19">
<dim>-1</dim>
<dim>12</dim>
<dim>-1</dim>
<dim>-1</dim>
</port>
</output>
</layer>
<layer id="325" name="__module.roberta.encoder.layer.4.attention.self/aten::add/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>12</dim>
<dim>-1</dim>
<dim>-1</dim>
</port>
<port id="1" precision="FP32">
<dim>-1</dim>
<dim>1</dim>
<dim>1</dim>
<dim>-1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="439,input.35">
<dim>-1</dim>
<dim>12</dim>
<dim>-1</dim>
<dim>-1</dim>
</port>
</output>
</layer>
<layer id="326" name="__module.roberta.encoder.layer.4.attention.self/aten::softmax/Softmax" type="SoftMax" version="opset8">
<data axis="-1" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>12</dim>
<dim>-1</dim>
<dim>-1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32" names="440,input.37">
<dim>-1</dim>
<dim>12</dim>
<dim>-1</dim>
<dim>-1</dim>
</port>
</output>
</layer>
<layer id="327" name="self.roberta.encoder.layer.4.attention.self.value.weight" type="Const" version="opset1">
<data element_type="f32" shape="768, 768" offset="274137276" size="2359296" />
<output>
<port id="0" precision="FP32" names="self.roberta.encoder.layer.4.attention.self.value.weight">
<dim>768</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="328" name="__module.roberta.encoder.layer.4.attention.self.value/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="FP32">
<dim>768</dim>
<dim>768</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="329" name="Constant_7557" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 768" offset="276496572" size="3072" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="330" name="__module.roberta.encoder.layer.4.attention.self.value/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="423,x.53">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="331" name="__module.roberta.encoder.layer.4.attention.self/prim::ListConstruct/Concat_2" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="158368848" size="32" />
<output>
<port id="0" precision="I64">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="332" name="__module.roberta.encoder.layer.4.attention.self/aten::view/Reshape_2" type="Reshape" version="opset1">
<data special_zero="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="427,x.55">
<dim>-1</dim>
<dim>-1</dim>
<dim>12</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="333" name="Constant_1063" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="158368880" size="32" />
<output>
<port id="0" precision="I64" names="428">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="334" name="__module.roberta.encoder.layer.4.attention.self/aten::permute/Transpose_2" type="Transpose" version="opset1">
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>12</dim>
<dim>64</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="429">
<dim>-1</dim>
<dim>12</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="335" name="__module.roberta.encoder.layer.4.attention.self/aten::matmul/MatMul_1" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="false" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>12</dim>
<dim>-1</dim>
<dim>-1</dim>
</port>
<port id="1" precision="FP32">
<dim>-1</dim>
<dim>12</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="442,context_layer.17">
<dim>-1</dim>
<dim>12</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="336" name="Constant_1137" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="158368880" size="32" />
<output>
<port id="0" precision="I64" names="443">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="337" name="__module.roberta.encoder.layer.4.attention.self/aten::permute/Transpose_3" type="Transpose" version="opset1">
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>12</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="444">
<dim>-1</dim>
<dim>-1</dim>
<dim>12</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="338" name="__module.roberta.encoder.layer.4.attention.self/prim::ListConstruct/Concat_3" type="Const" version="opset1">
<data element_type="i64" shape="3" offset="163093668" size="24" />
<output>
<port id="0" precision="I64">
<dim>3</dim>
</port>
</output>
</layer>
<layer id="339" name="__module.roberta.encoder.layer.4.attention.self/aten::view/Reshape_3" type="Reshape" version="opset1">
<data special_zero="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>12</dim>
<dim>64</dim>
</port>
<port id="1" precision="I64">
<dim>3</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="449">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="340" name="self.roberta.encoder.layer.4.attention.output.dense.weight" type="Const" version="opset1">
<data element_type="f32" shape="768, 768" offset="276499644" size="2359296" />
<output>
<port id="0" precision="FP32" names="self.roberta.encoder.layer.4.attention.output.dense.weight">
<dim>768</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="341" name="__module.roberta.encoder.layer.4.attention.output.dense/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="FP32">
<dim>768</dim>
<dim>768</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="342" name="Constant_7558" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 768" offset="278858940" size="3072" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="343" name="__module.roberta.encoder.layer.4.attention.output.dense/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="454,input.39">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="344" name="__module.roberta.encoder.layer.4.attention.output/aten::add/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="456">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="345" name="__module.roberta.encoder.layer.4.attention.output.LayerNorm/aten::layer_norm/Constant" type="Const" version="opset1">
<data element_type="i32" shape="1" offset="156000332" size="4" />
<output>
<port id="0" precision="I32">
<dim>1</dim>
</port>
</output>
</layer>
<layer id="346" name="__module.roberta.encoder.layer.4.attention.output.LayerNorm/aten::layer_norm/MVN" type="MVN" version="opset6">
<data eps="9.9999997473787516e-06" normalize_variance="true" eps_mode="INSIDE_SQRT" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="I32">
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="347" name="Constant_7559" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 768" offset="278862012" size="3072" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="348" name="__module.roberta.encoder.layer.4.attention.output.LayerNorm/aten::layer_norm/Multiply" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="349" name="Constant_7560" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 768" offset="278865084" size="3072" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="350" name="__module.roberta.encoder.layer.4.attention.output.LayerNorm/aten::layer_norm/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="460,input_tensor.19">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="351" name="self.roberta.encoder.layer.4.intermediate.dense.weight" type="Const" version="opset1">
<data element_type="f32" shape="3072, 768" offset="278868156" size="9437184" />
<output>
<port id="0" precision="FP32" names="self.roberta.encoder.layer.4.intermediate.dense.weight">
<dim>3072</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="352" name="__module.roberta.encoder.layer.4.intermediate.dense/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="FP32">
<dim>3072</dim>
<dim>768</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>3072</dim>
</port>
</output>
</layer>
<layer id="353" name="Constant_7561" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 3072" offset="288305340" size="12288" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>3072</dim>
</port>
</output>
</layer>
<layer id="354" name="__module.roberta.encoder.layer.4.intermediate.dense/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>3072</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>3072</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="464">
<dim>-1</dim>
<dim>-1</dim>
<dim>3072</dim>
</port>
</output>
</layer>
<layer id="355" name="__module.roberta.encoder.layer.4.intermediate.intermediate_act_fn/aten::gelu/Gelu" type="Gelu" version="opset7">
<data approximation_mode="ERF" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>3072</dim>
</port>
</input>
<output>
<port id="1" precision="FP32" names="465">
<dim>-1</dim>
<dim>-1</dim>
<dim>3072</dim>
</port>
</output>
</layer>
<layer id="356" name="self.roberta.encoder.layer.4.output.dense.weight" type="Const" version="opset1">
<data element_type="f32" shape="768, 3072" offset="288317628" size="9437184" />
<output>
<port id="0" precision="FP32" names="self.roberta.encoder.layer.4.output.dense.weight">
<dim>768</dim>
<dim>3072</dim>
</port>
</output>
</layer>
<layer id="357" name="__module.roberta.encoder.layer.4.output.dense/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>3072</dim>
</port>
<port id="1" precision="FP32">
<dim>768</dim>
<dim>3072</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="358" name="Constant_7562" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 768" offset="297754812" size="3072" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="359" name="__module.roberta.encoder.layer.4.output.dense/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="470,input.41">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="360" name="__module.roberta.encoder.layer.4.output/aten::add/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="472">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="361" name="__module.roberta.encoder.layer.4.output.LayerNorm/aten::layer_norm/Constant" type="Const" version="opset1">
<data element_type="i32" shape="1" offset="156000332" size="4" />
<output>
<port id="0" precision="I32">
<dim>1</dim>
</port>
</output>
</layer>
<layer id="362" name="__module.roberta.encoder.layer.4.output.LayerNorm/aten::layer_norm/MVN" type="MVN" version="opset6">
<data eps="9.9999997473787516e-06" normalize_variance="true" eps_mode="INSIDE_SQRT" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="I32">
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="363" name="Constant_7563" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 768" offset="297757884" size="3072" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="364" name="__module.roberta.encoder.layer.4.output.LayerNorm/aten::layer_norm/Multiply" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="365" name="Constant_7564" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 768" offset="297760956" size="3072" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="366" name="__module.roberta.encoder.layer.4.output.LayerNorm/aten::layer_norm/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="476,input_tensor.21">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="367" name="self.roberta.encoder.layer.5.attention.self.query.weight" type="Const" version="opset1">
<data element_type="f32" shape="768, 768" offset="297764028" size="2359296" />
<output>
<port id="0" precision="FP32" names="self.roberta.encoder.layer.5.attention.self.query.weight">
<dim>768</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="368" name="__module.roberta.encoder.layer.5.attention.self.query/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="FP32">
<dim>768</dim>
<dim>768</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="369" name="Constant_7565" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 768" offset="300123324" size="3072" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="370" name="__module.roberta.encoder.layer.5.attention.self.query/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="487,x.69">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="371" name="__module.roberta.encoder.layer.5.attention.self/prim::ListConstruct/Concat" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="158368848" size="32" />
<output>
<port id="0" precision="I64">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="372" name="__module.roberta.encoder.layer.5.attention.self/aten::view/Reshape" type="Reshape" version="opset1">
<data special_zero="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="509,x.71">
<dim>-1</dim>
<dim>-1</dim>
<dim>12</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="373" name="Constant_1285" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="158368880" size="32" />
<output>
<port id="0" precision="I64" names="510">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="374" name="__module.roberta.encoder.layer.5.attention.self/aten::permute/Transpose" type="Transpose" version="opset1">
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>12</dim>
<dim>64</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="511">
<dim>-1</dim>
<dim>12</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="375" name="self.roberta.encoder.layer.5.attention.self.key.weight" type="Const" version="opset1">
<data element_type="f32" shape="768, 768" offset="300126396" size="2359296" />
<output>
<port id="0" precision="FP32" names="self.roberta.encoder.layer.5.attention.self.key.weight">
<dim>768</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="376" name="__module.roberta.encoder.layer.5.attention.self.key/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="FP32">
<dim>768</dim>
<dim>768</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="377" name="Constant_7566" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 768" offset="302485692" size="3072" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="378" name="__module.roberta.encoder.layer.5.attention.self.key/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="490,x.61">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="379" name="__module.roberta.encoder.layer.5.attention.self/prim::ListConstruct/Concat_1" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="158368848" size="32" />
<output>
<port id="0" precision="I64">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="380" name="__module.roberta.encoder.layer.5.attention.self/aten::view/Reshape_1" type="Reshape" version="opset1">
<data special_zero="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="494,x.63">
<dim>-1</dim>
<dim>-1</dim>
<dim>12</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="381" name="Constant_1245" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="158368880" size="32" />
<output>
<port id="0" precision="I64" names="495">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="382" name="__module.roberta.encoder.layer.5.attention.self/aten::permute/Transpose_1" type="Transpose" version="opset1">
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>12</dim>
<dim>64</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="496,key_layer.11">
<dim>-1</dim>
<dim>12</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="383" name="__module.roberta.encoder.layer.5.attention.self/aten::matmul/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>12</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
<port id="1" precision="FP32">
<dim>-1</dim>
<dim>12</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="513,attention_scores.21">
<dim>-1</dim>
<dim>12</dim>
<dim>-1</dim>
<dim>-1</dim>
</port>
</output>
</layer>
<layer id="384" name="Constant_7567" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1, 1" offset="160731280" size="4" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="385" name="__module.roberta.encoder.layer.5.attention.self/aten::div/Divide" type="Divide" version="opset1">
<data auto_broadcast="numpy" m_pythondiv="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>12</dim>
<dim>-1</dim>
<dim>-1</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="514,attention_scores.23">
<dim>-1</dim>
<dim>12</dim>
<dim>-1</dim>
<dim>-1</dim>
</port>
</output>
</layer>
<layer id="386" name="__module.roberta.encoder.layer.5.attention.self/aten::add/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>12</dim>
<dim>-1</dim>
<dim>-1</dim>
</port>
<port id="1" precision="FP32">
<dim>-1</dim>
<dim>1</dim>
<dim>1</dim>
<dim>-1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="515,input.43">
<dim>-1</dim>
<dim>12</dim>
<dim>-1</dim>
<dim>-1</dim>
</port>
</output>
</layer>
<layer id="387" name="__module.roberta.encoder.layer.5.attention.self/aten::softmax/Softmax" type="SoftMax" version="opset8">
<data axis="-1" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>12</dim>
<dim>-1</dim>
<dim>-1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32" names="516,input.45">
<dim>-1</dim>
<dim>12</dim>
<dim>-1</dim>
<dim>-1</dim>
</port>
</output>
</layer>
<layer id="388" name="self.roberta.encoder.layer.5.attention.self.value.weight" type="Const" version="opset1">
<data element_type="f32" shape="768, 768" offset="302488764" size="2359296" />
<output>
<port id="0" precision="FP32" names="self.roberta.encoder.layer.5.attention.self.value.weight">
<dim>768</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="389" name="__module.roberta.encoder.layer.5.attention.self.value/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="FP32">
<dim>768</dim>
<dim>768</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="390" name="Constant_7568" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 768" offset="304848060" size="3072" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="391" name="__module.roberta.encoder.layer.5.attention.self.value/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="499,x.65">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="392" name="__module.roberta.encoder.layer.5.attention.self/prim::ListConstruct/Concat_2" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="158368848" size="32" />
<output>
<port id="0" precision="I64">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="393" name="__module.roberta.encoder.layer.5.attention.self/aten::view/Reshape_2" type="Reshape" version="opset1">
<data special_zero="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="503,x.67">
<dim>-1</dim>
<dim>-1</dim>
<dim>12</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="394" name="Constant_1268" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="158368880" size="32" />
<output>
<port id="0" precision="I64" names="504">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="395" name="__module.roberta.encoder.layer.5.attention.self/aten::permute/Transpose_2" type="Transpose" version="opset1">
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>12</dim>
<dim>64</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="505">
<dim>-1</dim>
<dim>12</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="396" name="__module.roberta.encoder.layer.5.attention.self/aten::matmul/MatMul_1" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="false" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>12</dim>
<dim>-1</dim>
<dim>-1</dim>
</port>
<port id="1" precision="FP32">
<dim>-1</dim>
<dim>12</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="518,context_layer.21">
<dim>-1</dim>
<dim>12</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="397" name="Constant_1342" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="158368880" size="32" />
<output>
<port id="0" precision="I64" names="519">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="398" name="__module.roberta.encoder.layer.5.attention.self/aten::permute/Transpose_3" type="Transpose" version="opset1">
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>12</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="520">
<dim>-1</dim>
<dim>-1</dim>
<dim>12</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="399" name="__module.roberta.encoder.layer.5.attention.self/prim::ListConstruct/Concat_3" type="Const" version="opset1">
<data element_type="i64" shape="3" offset="163093668" size="24" />
<output>
<port id="0" precision="I64">
<dim>3</dim>
</port>
</output>
</layer>
<layer id="400" name="__module.roberta.encoder.layer.5.attention.self/aten::view/Reshape_3" type="Reshape" version="opset1">
<data special_zero="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>12</dim>
<dim>64</dim>
</port>
<port id="1" precision="I64">
<dim>3</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="525">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="401" name="self.roberta.encoder.layer.5.attention.output.dense.weight" type="Const" version="opset1">
<data element_type="f32" shape="768, 768" offset="304851132" size="2359296" />
<output>
<port id="0" precision="FP32" names="self.roberta.encoder.layer.5.attention.output.dense.weight">
<dim>768</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="402" name="__module.roberta.encoder.layer.5.attention.output.dense/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="FP32">
<dim>768</dim>
<dim>768</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="403" name="Constant_7569" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 768" offset="307210428" size="3072" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="404" name="__module.roberta.encoder.layer.5.attention.output.dense/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="530,input.47">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="405" name="__module.roberta.encoder.layer.5.attention.output/aten::add/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="532">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="406" name="__module.roberta.encoder.layer.5.attention.output.LayerNorm/aten::layer_norm/Constant" type="Const" version="opset1">
<data element_type="i32" shape="1" offset="156000332" size="4" />
<output>
<port id="0" precision="I32">
<dim>1</dim>
</port>
</output>
</layer>
<layer id="407" name="__module.roberta.encoder.layer.5.attention.output.LayerNorm/aten::layer_norm/MVN" type="MVN" version="opset6">
<data eps="9.9999997473787516e-06" normalize_variance="true" eps_mode="INSIDE_SQRT" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="I32">
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="408" name="Constant_7570" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 768" offset="307213500" size="3072" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="409" name="__module.roberta.encoder.layer.5.attention.output.LayerNorm/aten::layer_norm/Multiply" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="410" name="Constant_7571" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 768" offset="307216572" size="3072" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="411" name="__module.roberta.encoder.layer.5.attention.output.LayerNorm/aten::layer_norm/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="536,input_tensor.23">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="412" name="self.roberta.encoder.layer.5.intermediate.dense.weight" type="Const" version="opset1">
<data element_type="f32" shape="3072, 768" offset="307219644" size="9437184" />
<output>
<port id="0" precision="FP32" names="self.roberta.encoder.layer.5.intermediate.dense.weight">
<dim>3072</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="413" name="__module.roberta.encoder.layer.5.intermediate.dense/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="FP32">
<dim>3072</dim>
<dim>768</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>3072</dim>
</port>
</output>
</layer>
<layer id="414" name="Constant_7572" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 3072" offset="316656828" size="12288" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>3072</dim>
</port>
</output>
</layer>
<layer id="415" name="__module.roberta.encoder.layer.5.intermediate.dense/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>3072</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>3072</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="540">
<dim>-1</dim>
<dim>-1</dim>
<dim>3072</dim>
</port>
</output>
</layer>
<layer id="416" name="__module.roberta.encoder.layer.5.intermediate.intermediate_act_fn/aten::gelu/Gelu" type="Gelu" version="opset7">
<data approximation_mode="ERF" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>3072</dim>
</port>
</input>
<output>
<port id="1" precision="FP32" names="541">
<dim>-1</dim>
<dim>-1</dim>
<dim>3072</dim>
</port>
</output>
</layer>
<layer id="417" name="self.roberta.encoder.layer.5.output.dense.weight" type="Const" version="opset1">
<data element_type="f32" shape="768, 3072" offset="316669116" size="9437184" />
<output>
<port id="0" precision="FP32" names="self.roberta.encoder.layer.5.output.dense.weight">
<dim>768</dim>
<dim>3072</dim>
</port>
</output>
</layer>
<layer id="418" name="__module.roberta.encoder.layer.5.output.dense/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>3072</dim>
</port>
<port id="1" precision="FP32">
<dim>768</dim>
<dim>3072</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="419" name="Constant_7573" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 768" offset="326106300" size="3072" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="420" name="__module.roberta.encoder.layer.5.output.dense/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="546,input.49">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="421" name="__module.roberta.encoder.layer.5.output/aten::add/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="548">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="422" name="__module.roberta.encoder.layer.5.output.LayerNorm/aten::layer_norm/Constant" type="Const" version="opset1">
<data element_type="i32" shape="1" offset="156000332" size="4" />
<output>
<port id="0" precision="I32">
<dim>1</dim>
</port>
</output>
</layer>
<layer id="423" name="__module.roberta.encoder.layer.5.output.LayerNorm/aten::layer_norm/MVN" type="MVN" version="opset6">
<data eps="9.9999997473787516e-06" normalize_variance="true" eps_mode="INSIDE_SQRT" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="I32">
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="424" name="Constant_7574" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 768" offset="326109372" size="3072" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="425" name="__module.roberta.encoder.layer.5.output.LayerNorm/aten::layer_norm/Multiply" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="426" name="Constant_7575" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 768" offset="326112444" size="3072" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="427" name="__module.roberta.encoder.layer.5.output.LayerNorm/aten::layer_norm/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="552,input_tensor.25">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="428" name="self.roberta.encoder.layer.6.attention.self.query.weight" type="Const" version="opset1">
<data element_type="f32" shape="768, 768" offset="326115516" size="2359296" />
<output>
<port id="0" precision="FP32" names="self.roberta.encoder.layer.6.attention.self.query.weight">
<dim>768</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="429" name="__module.roberta.encoder.layer.6.attention.self.query/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="FP32">
<dim>768</dim>
<dim>768</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="430" name="Constant_7576" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 768" offset="328474812" size="3072" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="431" name="__module.roberta.encoder.layer.6.attention.self.query/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="563,x.81">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="432" name="__module.roberta.encoder.layer.6.attention.self/prim::ListConstruct/Concat" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="158368848" size="32" />
<output>
<port id="0" precision="I64">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="433" name="__module.roberta.encoder.layer.6.attention.self/aten::view/Reshape" type="Reshape" version="opset1">
<data special_zero="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="585,x.83">
<dim>-1</dim>
<dim>-1</dim>
<dim>12</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="434" name="Constant_1490" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="158368880" size="32" />
<output>
<port id="0" precision="I64" names="586">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="435" name="__module.roberta.encoder.layer.6.attention.self/aten::permute/Transpose" type="Transpose" version="opset1">
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>12</dim>
<dim>64</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="587">
<dim>-1</dim>
<dim>12</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="436" name="self.roberta.encoder.layer.6.attention.self.key.weight" type="Const" version="opset1">
<data element_type="f32" shape="768, 768" offset="328477884" size="2359296" />
<output>
<port id="0" precision="FP32" names="self.roberta.encoder.layer.6.attention.self.key.weight">
<dim>768</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="437" name="__module.roberta.encoder.layer.6.attention.self.key/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="FP32">
<dim>768</dim>
<dim>768</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="438" name="Constant_7577" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 768" offset="330837180" size="3072" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="439" name="__module.roberta.encoder.layer.6.attention.self.key/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="566,x.73">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="440" name="__module.roberta.encoder.layer.6.attention.self/prim::ListConstruct/Concat_1" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="158368848" size="32" />
<output>
<port id="0" precision="I64">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="441" name="__module.roberta.encoder.layer.6.attention.self/aten::view/Reshape_1" type="Reshape" version="opset1">
<data special_zero="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="570,x.75">
<dim>-1</dim>
<dim>-1</dim>
<dim>12</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="442" name="Constant_1450" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="158368880" size="32" />
<output>
<port id="0" precision="I64" names="571">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="443" name="__module.roberta.encoder.layer.6.attention.self/aten::permute/Transpose_1" type="Transpose" version="opset1">
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>12</dim>
<dim>64</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="572,key_layer.13">
<dim>-1</dim>
<dim>12</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="444" name="__module.roberta.encoder.layer.6.attention.self/aten::matmul/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>12</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
<port id="1" precision="FP32">
<dim>-1</dim>
<dim>12</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="589,attention_scores.25">
<dim>-1</dim>
<dim>12</dim>
<dim>-1</dim>
<dim>-1</dim>
</port>
</output>
</layer>
<layer id="445" name="Constant_7578" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1, 1" offset="160731280" size="4" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="446" name="__module.roberta.encoder.layer.6.attention.self/aten::div/Divide" type="Divide" version="opset1">
<data auto_broadcast="numpy" m_pythondiv="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>12</dim>
<dim>-1</dim>
<dim>-1</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="590,attention_scores.27">
<dim>-1</dim>
<dim>12</dim>
<dim>-1</dim>
<dim>-1</dim>
</port>
</output>
</layer>
<layer id="447" name="__module.roberta.encoder.layer.6.attention.self/aten::add/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>12</dim>
<dim>-1</dim>
<dim>-1</dim>
</port>
<port id="1" precision="FP32">
<dim>-1</dim>
<dim>1</dim>
<dim>1</dim>
<dim>-1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="591,input.51">
<dim>-1</dim>
<dim>12</dim>
<dim>-1</dim>
<dim>-1</dim>
</port>
</output>
</layer>
<layer id="448" name="__module.roberta.encoder.layer.6.attention.self/aten::softmax/Softmax" type="SoftMax" version="opset8">
<data axis="-1" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>12</dim>
<dim>-1</dim>
<dim>-1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32" names="592,input.53">
<dim>-1</dim>
<dim>12</dim>
<dim>-1</dim>
<dim>-1</dim>
</port>
</output>
</layer>
<layer id="449" name="self.roberta.encoder.layer.6.attention.self.value.weight" type="Const" version="opset1">
<data element_type="f32" shape="768, 768" offset="330840252" size="2359296" />
<output>
<port id="0" precision="FP32" names="self.roberta.encoder.layer.6.attention.self.value.weight">
<dim>768</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="450" name="__module.roberta.encoder.layer.6.attention.self.value/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="FP32">
<dim>768</dim>
<dim>768</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="451" name="Constant_7579" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 768" offset="333199548" size="3072" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="452" name="__module.roberta.encoder.layer.6.attention.self.value/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="575,x.77">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="453" name="__module.roberta.encoder.layer.6.attention.self/prim::ListConstruct/Concat_2" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="158368848" size="32" />
<output>
<port id="0" precision="I64">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="454" name="__module.roberta.encoder.layer.6.attention.self/aten::view/Reshape_2" type="Reshape" version="opset1">
<data special_zero="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="579,x.79">
<dim>-1</dim>
<dim>-1</dim>
<dim>12</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="455" name="Constant_1473" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="158368880" size="32" />
<output>
<port id="0" precision="I64" names="580">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="456" name="__module.roberta.encoder.layer.6.attention.self/aten::permute/Transpose_2" type="Transpose" version="opset1">
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>12</dim>
<dim>64</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="581">
<dim>-1</dim>
<dim>12</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="457" name="__module.roberta.encoder.layer.6.attention.self/aten::matmul/MatMul_1" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="false" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>12</dim>
<dim>-1</dim>
<dim>-1</dim>
</port>
<port id="1" precision="FP32">
<dim>-1</dim>
<dim>12</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="594,context_layer.25">
<dim>-1</dim>
<dim>12</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="458" name="Constant_1547" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="158368880" size="32" />
<output>
<port id="0" precision="I64" names="595">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="459" name="__module.roberta.encoder.layer.6.attention.self/aten::permute/Transpose_3" type="Transpose" version="opset1">
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>12</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="596">
<dim>-1</dim>
<dim>-1</dim>
<dim>12</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="460" name="__module.roberta.encoder.layer.6.attention.self/prim::ListConstruct/Concat_3" type="Const" version="opset1">
<data element_type="i64" shape="3" offset="163093668" size="24" />
<output>
<port id="0" precision="I64">
<dim>3</dim>
</port>
</output>
</layer>
<layer id="461" name="__module.roberta.encoder.layer.6.attention.self/aten::view/Reshape_3" type="Reshape" version="opset1">
<data special_zero="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>12</dim>
<dim>64</dim>
</port>
<port id="1" precision="I64">
<dim>3</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="601">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="462" name="self.roberta.encoder.layer.6.attention.output.dense.weight" type="Const" version="opset1">
<data element_type="f32" shape="768, 768" offset="333202620" size="2359296" />
<output>
<port id="0" precision="FP32" names="self.roberta.encoder.layer.6.attention.output.dense.weight">
<dim>768</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="463" name="__module.roberta.encoder.layer.6.attention.output.dense/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="FP32">
<dim>768</dim>
<dim>768</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="464" name="Constant_7580" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 768" offset="335561916" size="3072" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="465" name="__module.roberta.encoder.layer.6.attention.output.dense/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="606,input.55">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="466" name="__module.roberta.encoder.layer.6.attention.output/aten::add/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="608">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="467" name="__module.roberta.encoder.layer.6.attention.output.LayerNorm/aten::layer_norm/Constant" type="Const" version="opset1">
<data element_type="i32" shape="1" offset="156000332" size="4" />
<output>
<port id="0" precision="I32">
<dim>1</dim>
</port>
</output>
</layer>
<layer id="468" name="__module.roberta.encoder.layer.6.attention.output.LayerNorm/aten::layer_norm/MVN" type="MVN" version="opset6">
<data eps="9.9999997473787516e-06" normalize_variance="true" eps_mode="INSIDE_SQRT" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="I32">
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="469" name="Constant_7581" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 768" offset="335564988" size="3072" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="470" name="__module.roberta.encoder.layer.6.attention.output.LayerNorm/aten::layer_norm/Multiply" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="471" name="Constant_7582" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 768" offset="335568060" size="3072" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="472" name="__module.roberta.encoder.layer.6.attention.output.LayerNorm/aten::layer_norm/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="612,input_tensor.27">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="473" name="self.roberta.encoder.layer.6.intermediate.dense.weight" type="Const" version="opset1">
<data element_type="f32" shape="3072, 768" offset="335571132" size="9437184" />
<output>
<port id="0" precision="FP32" names="self.roberta.encoder.layer.6.intermediate.dense.weight">
<dim>3072</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="474" name="__module.roberta.encoder.layer.6.intermediate.dense/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="FP32">
<dim>3072</dim>
<dim>768</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>3072</dim>
</port>
</output>
</layer>
<layer id="475" name="Constant_7583" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 3072" offset="345008316" size="12288" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>3072</dim>
</port>
</output>
</layer>
<layer id="476" name="__module.roberta.encoder.layer.6.intermediate.dense/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>3072</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>3072</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="616">
<dim>-1</dim>
<dim>-1</dim>
<dim>3072</dim>
</port>
</output>
</layer>
<layer id="477" name="__module.roberta.encoder.layer.6.intermediate.intermediate_act_fn/aten::gelu/Gelu" type="Gelu" version="opset7">
<data approximation_mode="ERF" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>3072</dim>
</port>
</input>
<output>
<port id="1" precision="FP32" names="617">
<dim>-1</dim>
<dim>-1</dim>
<dim>3072</dim>
</port>
</output>
</layer>
<layer id="478" name="self.roberta.encoder.layer.6.output.dense.weight" type="Const" version="opset1">
<data element_type="f32" shape="768, 3072" offset="345020604" size="9437184" />
<output>
<port id="0" precision="FP32" names="self.roberta.encoder.layer.6.output.dense.weight">
<dim>768</dim>
<dim>3072</dim>
</port>
</output>
</layer>
<layer id="479" name="__module.roberta.encoder.layer.6.output.dense/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>3072</dim>
</port>
<port id="1" precision="FP32">
<dim>768</dim>
<dim>3072</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="480" name="Constant_7584" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 768" offset="354457788" size="3072" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="481" name="__module.roberta.encoder.layer.6.output.dense/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="622,input.57">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="482" name="__module.roberta.encoder.layer.6.output/aten::add/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="624">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="483" name="__module.roberta.encoder.layer.6.output.LayerNorm/aten::layer_norm/Constant" type="Const" version="opset1">
<data element_type="i32" shape="1" offset="156000332" size="4" />
<output>
<port id="0" precision="I32">
<dim>1</dim>
</port>
</output>
</layer>
<layer id="484" name="__module.roberta.encoder.layer.6.output.LayerNorm/aten::layer_norm/MVN" type="MVN" version="opset6">
<data eps="9.9999997473787516e-06" normalize_variance="true" eps_mode="INSIDE_SQRT" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="I32">
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="485" name="Constant_7585" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 768" offset="354460860" size="3072" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="486" name="__module.roberta.encoder.layer.6.output.LayerNorm/aten::layer_norm/Multiply" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="487" name="Constant_7586" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 768" offset="354463932" size="3072" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="488" name="__module.roberta.encoder.layer.6.output.LayerNorm/aten::layer_norm/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="628,input_tensor.29">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="489" name="self.roberta.encoder.layer.7.attention.self.query.weight" type="Const" version="opset1">
<data element_type="f32" shape="768, 768" offset="354467004" size="2359296" />
<output>
<port id="0" precision="FP32" names="self.roberta.encoder.layer.7.attention.self.query.weight">
<dim>768</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="490" name="__module.roberta.encoder.layer.7.attention.self.query/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="FP32">
<dim>768</dim>
<dim>768</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="491" name="Constant_7587" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 768" offset="356826300" size="3072" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="492" name="__module.roberta.encoder.layer.7.attention.self.query/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="639,x.93">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="493" name="__module.roberta.encoder.layer.7.attention.self/prim::ListConstruct/Concat" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="158368848" size="32" />
<output>
<port id="0" precision="I64">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="494" name="__module.roberta.encoder.layer.7.attention.self/aten::view/Reshape" type="Reshape" version="opset1">
<data special_zero="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="661,x.95">
<dim>-1</dim>
<dim>-1</dim>
<dim>12</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="495" name="Constant_1695" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="158368880" size="32" />
<output>
<port id="0" precision="I64" names="662">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="496" name="__module.roberta.encoder.layer.7.attention.self/aten::permute/Transpose" type="Transpose" version="opset1">
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>12</dim>
<dim>64</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="663">
<dim>-1</dim>
<dim>12</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="497" name="self.roberta.encoder.layer.7.attention.self.key.weight" type="Const" version="opset1">
<data element_type="f32" shape="768, 768" offset="356829372" size="2359296" />
<output>
<port id="0" precision="FP32" names="self.roberta.encoder.layer.7.attention.self.key.weight">
<dim>768</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="498" name="__module.roberta.encoder.layer.7.attention.self.key/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="FP32">
<dim>768</dim>
<dim>768</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="499" name="Constant_7588" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 768" offset="359188668" size="3072" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="500" name="__module.roberta.encoder.layer.7.attention.self.key/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="642,x.85">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="501" name="__module.roberta.encoder.layer.7.attention.self/prim::ListConstruct/Concat_1" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="158368848" size="32" />
<output>
<port id="0" precision="I64">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="502" name="__module.roberta.encoder.layer.7.attention.self/aten::view/Reshape_1" type="Reshape" version="opset1">
<data special_zero="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="646,x.87">
<dim>-1</dim>
<dim>-1</dim>
<dim>12</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="503" name="Constant_1655" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="158368880" size="32" />
<output>
<port id="0" precision="I64" names="647">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="504" name="__module.roberta.encoder.layer.7.attention.self/aten::permute/Transpose_1" type="Transpose" version="opset1">
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>12</dim>
<dim>64</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="648,key_layer.15">
<dim>-1</dim>
<dim>12</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="505" name="__module.roberta.encoder.layer.7.attention.self/aten::matmul/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>12</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
<port id="1" precision="FP32">
<dim>-1</dim>
<dim>12</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="665,attention_scores.29">
<dim>-1</dim>
<dim>12</dim>
<dim>-1</dim>
<dim>-1</dim>
</port>
</output>
</layer>
<layer id="506" name="Constant_7589" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1, 1" offset="160731280" size="4" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="507" name="__module.roberta.encoder.layer.7.attention.self/aten::div/Divide" type="Divide" version="opset1">
<data auto_broadcast="numpy" m_pythondiv="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>12</dim>
<dim>-1</dim>
<dim>-1</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="666,attention_scores.31">
<dim>-1</dim>
<dim>12</dim>
<dim>-1</dim>
<dim>-1</dim>
</port>
</output>
</layer>
<layer id="508" name="__module.roberta.encoder.layer.7.attention.self/aten::add/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>12</dim>
<dim>-1</dim>
<dim>-1</dim>
</port>
<port id="1" precision="FP32">
<dim>-1</dim>
<dim>1</dim>
<dim>1</dim>
<dim>-1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="667,input.59">
<dim>-1</dim>
<dim>12</dim>
<dim>-1</dim>
<dim>-1</dim>
</port>
</output>
</layer>
<layer id="509" name="__module.roberta.encoder.layer.7.attention.self/aten::softmax/Softmax" type="SoftMax" version="opset8">
<data axis="-1" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>12</dim>
<dim>-1</dim>
<dim>-1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32" names="668,input.61">
<dim>-1</dim>
<dim>12</dim>
<dim>-1</dim>
<dim>-1</dim>
</port>
</output>
</layer>
<layer id="510" name="self.roberta.encoder.layer.7.attention.self.value.weight" type="Const" version="opset1">
<data element_type="f32" shape="768, 768" offset="359191740" size="2359296" />
<output>
<port id="0" precision="FP32" names="self.roberta.encoder.layer.7.attention.self.value.weight">
<dim>768</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="511" name="__module.roberta.encoder.layer.7.attention.self.value/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="FP32">
<dim>768</dim>
<dim>768</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="512" name="Constant_7590" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 768" offset="361551036" size="3072" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="513" name="__module.roberta.encoder.layer.7.attention.self.value/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="651,x.89">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="514" name="__module.roberta.encoder.layer.7.attention.self/prim::ListConstruct/Concat_2" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="158368848" size="32" />
<output>
<port id="0" precision="I64">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="515" name="__module.roberta.encoder.layer.7.attention.self/aten::view/Reshape_2" type="Reshape" version="opset1">
<data special_zero="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="655,x.91">
<dim>-1</dim>
<dim>-1</dim>
<dim>12</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="516" name="Constant_1678" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="158368880" size="32" />
<output>
<port id="0" precision="I64" names="656">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="517" name="__module.roberta.encoder.layer.7.attention.self/aten::permute/Transpose_2" type="Transpose" version="opset1">
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>12</dim>
<dim>64</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="657">
<dim>-1</dim>
<dim>12</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="518" name="__module.roberta.encoder.layer.7.attention.self/aten::matmul/MatMul_1" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="false" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>12</dim>
<dim>-1</dim>
<dim>-1</dim>
</port>
<port id="1" precision="FP32">
<dim>-1</dim>
<dim>12</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="670,context_layer.29">
<dim>-1</dim>
<dim>12</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="519" name="Constant_1752" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="158368880" size="32" />
<output>
<port id="0" precision="I64" names="671">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="520" name="__module.roberta.encoder.layer.7.attention.self/aten::permute/Transpose_3" type="Transpose" version="opset1">
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>12</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="672">
<dim>-1</dim>
<dim>-1</dim>
<dim>12</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="521" name="__module.roberta.encoder.layer.7.attention.self/prim::ListConstruct/Concat_3" type="Const" version="opset1">
<data element_type="i64" shape="3" offset="163093668" size="24" />
<output>
<port id="0" precision="I64">
<dim>3</dim>
</port>
</output>
</layer>
<layer id="522" name="__module.roberta.encoder.layer.7.attention.self/aten::view/Reshape_3" type="Reshape" version="opset1">
<data special_zero="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>12</dim>
<dim>64</dim>
</port>
<port id="1" precision="I64">
<dim>3</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="677">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="523" name="self.roberta.encoder.layer.7.attention.output.dense.weight" type="Const" version="opset1">
<data element_type="f32" shape="768, 768" offset="361554108" size="2359296" />
<output>
<port id="0" precision="FP32" names="self.roberta.encoder.layer.7.attention.output.dense.weight">
<dim>768</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="524" name="__module.roberta.encoder.layer.7.attention.output.dense/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="FP32">
<dim>768</dim>
<dim>768</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="525" name="Constant_7591" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 768" offset="363913404" size="3072" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="526" name="__module.roberta.encoder.layer.7.attention.output.dense/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="682,input.63">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="527" name="__module.roberta.encoder.layer.7.attention.output/aten::add/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="684">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="528" name="__module.roberta.encoder.layer.7.attention.output.LayerNorm/aten::layer_norm/Constant" type="Const" version="opset1">
<data element_type="i32" shape="1" offset="156000332" size="4" />
<output>
<port id="0" precision="I32">
<dim>1</dim>
</port>
</output>
</layer>
<layer id="529" name="__module.roberta.encoder.layer.7.attention.output.LayerNorm/aten::layer_norm/MVN" type="MVN" version="opset6">
<data eps="9.9999997473787516e-06" normalize_variance="true" eps_mode="INSIDE_SQRT" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="I32">
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="530" name="Constant_7592" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 768" offset="363916476" size="3072" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="531" name="__module.roberta.encoder.layer.7.attention.output.LayerNorm/aten::layer_norm/Multiply" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="532" name="Constant_7593" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 768" offset="363919548" size="3072" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="533" name="__module.roberta.encoder.layer.7.attention.output.LayerNorm/aten::layer_norm/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="688,input_tensor.31">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="534" name="self.roberta.encoder.layer.7.intermediate.dense.weight" type="Const" version="opset1">
<data element_type="f32" shape="3072, 768" offset="363922620" size="9437184" />
<output>
<port id="0" precision="FP32" names="self.roberta.encoder.layer.7.intermediate.dense.weight">
<dim>3072</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="535" name="__module.roberta.encoder.layer.7.intermediate.dense/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="FP32">
<dim>3072</dim>
<dim>768</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>3072</dim>
</port>
</output>
</layer>
<layer id="536" name="Constant_7594" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 3072" offset="373359804" size="12288" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>3072</dim>
</port>
</output>
</layer>
<layer id="537" name="__module.roberta.encoder.layer.7.intermediate.dense/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>3072</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>3072</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="692">
<dim>-1</dim>
<dim>-1</dim>
<dim>3072</dim>
</port>
</output>
</layer>
<layer id="538" name="__module.roberta.encoder.layer.7.intermediate.intermediate_act_fn/aten::gelu/Gelu" type="Gelu" version="opset7">
<data approximation_mode="ERF" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>3072</dim>
</port>
</input>
<output>
<port id="1" precision="FP32" names="693">
<dim>-1</dim>
<dim>-1</dim>
<dim>3072</dim>
</port>
</output>
</layer>
<layer id="539" name="self.roberta.encoder.layer.7.output.dense.weight" type="Const" version="opset1">
<data element_type="f32" shape="768, 3072" offset="373372092" size="9437184" />
<output>
<port id="0" precision="FP32" names="self.roberta.encoder.layer.7.output.dense.weight">
<dim>768</dim>
<dim>3072</dim>
</port>
</output>
</layer>
<layer id="540" name="__module.roberta.encoder.layer.7.output.dense/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>3072</dim>
</port>
<port id="1" precision="FP32">
<dim>768</dim>
<dim>3072</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="541" name="Constant_7595" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 768" offset="382809276" size="3072" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="542" name="__module.roberta.encoder.layer.7.output.dense/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="698,input.65">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="543" name="__module.roberta.encoder.layer.7.output/aten::add/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="700">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="544" name="__module.roberta.encoder.layer.7.output.LayerNorm/aten::layer_norm/Constant" type="Const" version="opset1">
<data element_type="i32" shape="1" offset="156000332" size="4" />
<output>
<port id="0" precision="I32">
<dim>1</dim>
</port>
</output>
</layer>
<layer id="545" name="__module.roberta.encoder.layer.7.output.LayerNorm/aten::layer_norm/MVN" type="MVN" version="opset6">
<data eps="9.9999997473787516e-06" normalize_variance="true" eps_mode="INSIDE_SQRT" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="I32">
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="546" name="Constant_7596" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 768" offset="382812348" size="3072" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="547" name="__module.roberta.encoder.layer.7.output.LayerNorm/aten::layer_norm/Multiply" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="548" name="Constant_7597" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 768" offset="382815420" size="3072" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="549" name="__module.roberta.encoder.layer.7.output.LayerNorm/aten::layer_norm/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="704,input_tensor.33">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="550" name="self.roberta.encoder.layer.8.attention.self.query.weight" type="Const" version="opset1">
<data element_type="f32" shape="768, 768" offset="382818492" size="2359296" />
<output>
<port id="0" precision="FP32" names="self.roberta.encoder.layer.8.attention.self.query.weight">
<dim>768</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="551" name="__module.roberta.encoder.layer.8.attention.self.query/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="FP32">
<dim>768</dim>
<dim>768</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="552" name="Constant_7598" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 768" offset="385177788" size="3072" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="553" name="__module.roberta.encoder.layer.8.attention.self.query/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="715,x.105">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="554" name="__module.roberta.encoder.layer.8.attention.self/prim::ListConstruct/Concat" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="158368848" size="32" />
<output>
<port id="0" precision="I64">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="555" name="__module.roberta.encoder.layer.8.attention.self/aten::view/Reshape" type="Reshape" version="opset1">
<data special_zero="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="737,x.107">
<dim>-1</dim>
<dim>-1</dim>
<dim>12</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="556" name="Constant_1900" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="158368880" size="32" />
<output>
<port id="0" precision="I64" names="738">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="557" name="__module.roberta.encoder.layer.8.attention.self/aten::permute/Transpose" type="Transpose" version="opset1">
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>12</dim>
<dim>64</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="739">
<dim>-1</dim>
<dim>12</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="558" name="self.roberta.encoder.layer.8.attention.self.key.weight" type="Const" version="opset1">
<data element_type="f32" shape="768, 768" offset="385180860" size="2359296" />
<output>
<port id="0" precision="FP32" names="self.roberta.encoder.layer.8.attention.self.key.weight">
<dim>768</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="559" name="__module.roberta.encoder.layer.8.attention.self.key/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="FP32">
<dim>768</dim>
<dim>768</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="560" name="Constant_7599" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 768" offset="387540156" size="3072" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="561" name="__module.roberta.encoder.layer.8.attention.self.key/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="718,x.97">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="562" name="__module.roberta.encoder.layer.8.attention.self/prim::ListConstruct/Concat_1" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="158368848" size="32" />
<output>
<port id="0" precision="I64">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="563" name="__module.roberta.encoder.layer.8.attention.self/aten::view/Reshape_1" type="Reshape" version="opset1">
<data special_zero="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="722,x.99">
<dim>-1</dim>
<dim>-1</dim>
<dim>12</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="564" name="Constant_1860" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="158368880" size="32" />
<output>
<port id="0" precision="I64" names="723">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="565" name="__module.roberta.encoder.layer.8.attention.self/aten::permute/Transpose_1" type="Transpose" version="opset1">
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>12</dim>
<dim>64</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="724,key_layer.17">
<dim>-1</dim>
<dim>12</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="566" name="__module.roberta.encoder.layer.8.attention.self/aten::matmul/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>12</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
<port id="1" precision="FP32">
<dim>-1</dim>
<dim>12</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="741,attention_scores.33">
<dim>-1</dim>
<dim>12</dim>
<dim>-1</dim>
<dim>-1</dim>
</port>
</output>
</layer>
<layer id="567" name="Constant_7600" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1, 1" offset="160731280" size="4" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="568" name="__module.roberta.encoder.layer.8.attention.self/aten::div/Divide" type="Divide" version="opset1">
<data auto_broadcast="numpy" m_pythondiv="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>12</dim>
<dim>-1</dim>
<dim>-1</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="742,attention_scores.35">
<dim>-1</dim>
<dim>12</dim>
<dim>-1</dim>
<dim>-1</dim>
</port>
</output>
</layer>
<layer id="569" name="__module.roberta.encoder.layer.8.attention.self/aten::add/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>12</dim>
<dim>-1</dim>
<dim>-1</dim>
</port>
<port id="1" precision="FP32">
<dim>-1</dim>
<dim>1</dim>
<dim>1</dim>
<dim>-1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="743,input.67">
<dim>-1</dim>
<dim>12</dim>
<dim>-1</dim>
<dim>-1</dim>
</port>
</output>
</layer>
<layer id="570" name="__module.roberta.encoder.layer.8.attention.self/aten::softmax/Softmax" type="SoftMax" version="opset8">
<data axis="-1" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>12</dim>
<dim>-1</dim>
<dim>-1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32" names="744,input.69">
<dim>-1</dim>
<dim>12</dim>
<dim>-1</dim>
<dim>-1</dim>
</port>
</output>
</layer>
<layer id="571" name="self.roberta.encoder.layer.8.attention.self.value.weight" type="Const" version="opset1">
<data element_type="f32" shape="768, 768" offset="387543228" size="2359296" />
<output>
<port id="0" precision="FP32" names="self.roberta.encoder.layer.8.attention.self.value.weight">
<dim>768</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="572" name="__module.roberta.encoder.layer.8.attention.self.value/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="FP32">
<dim>768</dim>
<dim>768</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="573" name="Constant_7601" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 768" offset="389902524" size="3072" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="574" name="__module.roberta.encoder.layer.8.attention.self.value/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="727,x.101">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="575" name="__module.roberta.encoder.layer.8.attention.self/prim::ListConstruct/Concat_2" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="158368848" size="32" />
<output>
<port id="0" precision="I64">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="576" name="__module.roberta.encoder.layer.8.attention.self/aten::view/Reshape_2" type="Reshape" version="opset1">
<data special_zero="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="731,x.103">
<dim>-1</dim>
<dim>-1</dim>
<dim>12</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="577" name="Constant_1883" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="158368880" size="32" />
<output>
<port id="0" precision="I64" names="732">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="578" name="__module.roberta.encoder.layer.8.attention.self/aten::permute/Transpose_2" type="Transpose" version="opset1">
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>12</dim>
<dim>64</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="733">
<dim>-1</dim>
<dim>12</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="579" name="__module.roberta.encoder.layer.8.attention.self/aten::matmul/MatMul_1" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="false" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>12</dim>
<dim>-1</dim>
<dim>-1</dim>
</port>
<port id="1" precision="FP32">
<dim>-1</dim>
<dim>12</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="746,context_layer.33">
<dim>-1</dim>
<dim>12</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="580" name="Constant_1957" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="158368880" size="32" />
<output>
<port id="0" precision="I64" names="747">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="581" name="__module.roberta.encoder.layer.8.attention.self/aten::permute/Transpose_3" type="Transpose" version="opset1">
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>12</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="748">
<dim>-1</dim>
<dim>-1</dim>
<dim>12</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="582" name="__module.roberta.encoder.layer.8.attention.self/prim::ListConstruct/Concat_3" type="Const" version="opset1">
<data element_type="i64" shape="3" offset="163093668" size="24" />
<output>
<port id="0" precision="I64">
<dim>3</dim>
</port>
</output>
</layer>
<layer id="583" name="__module.roberta.encoder.layer.8.attention.self/aten::view/Reshape_3" type="Reshape" version="opset1">
<data special_zero="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>12</dim>
<dim>64</dim>
</port>
<port id="1" precision="I64">
<dim>3</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="753">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="584" name="self.roberta.encoder.layer.8.attention.output.dense.weight" type="Const" version="opset1">
<data element_type="f32" shape="768, 768" offset="389905596" size="2359296" />
<output>
<port id="0" precision="FP32" names="self.roberta.encoder.layer.8.attention.output.dense.weight">
<dim>768</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="585" name="__module.roberta.encoder.layer.8.attention.output.dense/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="FP32">
<dim>768</dim>
<dim>768</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="586" name="Constant_7602" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 768" offset="392264892" size="3072" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="587" name="__module.roberta.encoder.layer.8.attention.output.dense/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="758,input.71">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="588" name="__module.roberta.encoder.layer.8.attention.output/aten::add/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="760">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="589" name="__module.roberta.encoder.layer.8.attention.output.LayerNorm/aten::layer_norm/Constant" type="Const" version="opset1">
<data element_type="i32" shape="1" offset="156000332" size="4" />
<output>
<port id="0" precision="I32">
<dim>1</dim>
</port>
</output>
</layer>
<layer id="590" name="__module.roberta.encoder.layer.8.attention.output.LayerNorm/aten::layer_norm/MVN" type="MVN" version="opset6">
<data eps="9.9999997473787516e-06" normalize_variance="true" eps_mode="INSIDE_SQRT" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="I32">
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="591" name="Constant_7603" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 768" offset="392267964" size="3072" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="592" name="__module.roberta.encoder.layer.8.attention.output.LayerNorm/aten::layer_norm/Multiply" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="593" name="Constant_7604" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 768" offset="392271036" size="3072" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="594" name="__module.roberta.encoder.layer.8.attention.output.LayerNorm/aten::layer_norm/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="764,input_tensor.35">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="595" name="self.roberta.encoder.layer.8.intermediate.dense.weight" type="Const" version="opset1">
<data element_type="f32" shape="3072, 768" offset="392274108" size="9437184" />
<output>
<port id="0" precision="FP32" names="self.roberta.encoder.layer.8.intermediate.dense.weight">
<dim>3072</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="596" name="__module.roberta.encoder.layer.8.intermediate.dense/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="FP32">
<dim>3072</dim>
<dim>768</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>3072</dim>
</port>
</output>
</layer>
<layer id="597" name="Constant_7605" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 3072" offset="401711292" size="12288" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>3072</dim>
</port>
</output>
</layer>
<layer id="598" name="__module.roberta.encoder.layer.8.intermediate.dense/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>3072</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>3072</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="768">
<dim>-1</dim>
<dim>-1</dim>
<dim>3072</dim>
</port>
</output>
</layer>
<layer id="599" name="__module.roberta.encoder.layer.8.intermediate.intermediate_act_fn/aten::gelu/Gelu" type="Gelu" version="opset7">
<data approximation_mode="ERF" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>3072</dim>
</port>
</input>
<output>
<port id="1" precision="FP32" names="769">
<dim>-1</dim>
<dim>-1</dim>
<dim>3072</dim>
</port>
</output>
</layer>
<layer id="600" name="self.roberta.encoder.layer.8.output.dense.weight" type="Const" version="opset1">
<data element_type="f32" shape="768, 3072" offset="401723580" size="9437184" />
<output>
<port id="0" precision="FP32" names="self.roberta.encoder.layer.8.output.dense.weight">
<dim>768</dim>
<dim>3072</dim>
</port>
</output>
</layer>
<layer id="601" name="__module.roberta.encoder.layer.8.output.dense/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>3072</dim>
</port>
<port id="1" precision="FP32">
<dim>768</dim>
<dim>3072</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="602" name="Constant_7606" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 768" offset="411160764" size="3072" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="603" name="__module.roberta.encoder.layer.8.output.dense/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="774,input.73">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="604" name="__module.roberta.encoder.layer.8.output/aten::add/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="776">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="605" name="__module.roberta.encoder.layer.8.output.LayerNorm/aten::layer_norm/Constant" type="Const" version="opset1">
<data element_type="i32" shape="1" offset="156000332" size="4" />
<output>
<port id="0" precision="I32">
<dim>1</dim>
</port>
</output>
</layer>
<layer id="606" name="__module.roberta.encoder.layer.8.output.LayerNorm/aten::layer_norm/MVN" type="MVN" version="opset6">
<data eps="9.9999997473787516e-06" normalize_variance="true" eps_mode="INSIDE_SQRT" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="I32">
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="607" name="Constant_7607" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 768" offset="411163836" size="3072" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="608" name="__module.roberta.encoder.layer.8.output.LayerNorm/aten::layer_norm/Multiply" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="609" name="Constant_7608" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 768" offset="411166908" size="3072" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="610" name="__module.roberta.encoder.layer.8.output.LayerNorm/aten::layer_norm/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="780,input_tensor.37">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="611" name="self.roberta.encoder.layer.9.attention.self.query.weight" type="Const" version="opset1">
<data element_type="f32" shape="768, 768" offset="411169980" size="2359296" />
<output>
<port id="0" precision="FP32" names="self.roberta.encoder.layer.9.attention.self.query.weight">
<dim>768</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="612" name="__module.roberta.encoder.layer.9.attention.self.query/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="FP32">
<dim>768</dim>
<dim>768</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="613" name="Constant_7609" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 768" offset="413529276" size="3072" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="614" name="__module.roberta.encoder.layer.9.attention.self.query/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="791,x.117">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="615" name="__module.roberta.encoder.layer.9.attention.self/prim::ListConstruct/Concat" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="158368848" size="32" />
<output>
<port id="0" precision="I64">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="616" name="__module.roberta.encoder.layer.9.attention.self/aten::view/Reshape" type="Reshape" version="opset1">
<data special_zero="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="813,x.119">
<dim>-1</dim>
<dim>-1</dim>
<dim>12</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="617" name="Constant_2105" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="158368880" size="32" />
<output>
<port id="0" precision="I64" names="814">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="618" name="__module.roberta.encoder.layer.9.attention.self/aten::permute/Transpose" type="Transpose" version="opset1">
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>12</dim>
<dim>64</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="815">
<dim>-1</dim>
<dim>12</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="619" name="self.roberta.encoder.layer.9.attention.self.key.weight" type="Const" version="opset1">
<data element_type="f32" shape="768, 768" offset="413532348" size="2359296" />
<output>
<port id="0" precision="FP32" names="self.roberta.encoder.layer.9.attention.self.key.weight">
<dim>768</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="620" name="__module.roberta.encoder.layer.9.attention.self.key/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="FP32">
<dim>768</dim>
<dim>768</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="621" name="Constant_7610" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 768" offset="415891644" size="3072" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="622" name="__module.roberta.encoder.layer.9.attention.self.key/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="794,x.109">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="623" name="__module.roberta.encoder.layer.9.attention.self/prim::ListConstruct/Concat_1" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="158368848" size="32" />
<output>
<port id="0" precision="I64">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="624" name="__module.roberta.encoder.layer.9.attention.self/aten::view/Reshape_1" type="Reshape" version="opset1">
<data special_zero="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="798,x.111">
<dim>-1</dim>
<dim>-1</dim>
<dim>12</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="625" name="Constant_2065" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="158368880" size="32" />
<output>
<port id="0" precision="I64" names="799">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="626" name="__module.roberta.encoder.layer.9.attention.self/aten::permute/Transpose_1" type="Transpose" version="opset1">
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>12</dim>
<dim>64</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="800,key_layer.19">
<dim>-1</dim>
<dim>12</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="627" name="__module.roberta.encoder.layer.9.attention.self/aten::matmul/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>12</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
<port id="1" precision="FP32">
<dim>-1</dim>
<dim>12</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="817,attention_scores.37">
<dim>-1</dim>
<dim>12</dim>
<dim>-1</dim>
<dim>-1</dim>
</port>
</output>
</layer>
<layer id="628" name="Constant_7611" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1, 1" offset="160731280" size="4" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="629" name="__module.roberta.encoder.layer.9.attention.self/aten::div/Divide" type="Divide" version="opset1">
<data auto_broadcast="numpy" m_pythondiv="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>12</dim>
<dim>-1</dim>
<dim>-1</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="818,attention_scores.39">
<dim>-1</dim>
<dim>12</dim>
<dim>-1</dim>
<dim>-1</dim>
</port>
</output>
</layer>
<layer id="630" name="__module.roberta.encoder.layer.9.attention.self/aten::add/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>12</dim>
<dim>-1</dim>
<dim>-1</dim>
</port>
<port id="1" precision="FP32">
<dim>-1</dim>
<dim>1</dim>
<dim>1</dim>
<dim>-1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="819,input.75">
<dim>-1</dim>
<dim>12</dim>
<dim>-1</dim>
<dim>-1</dim>
</port>
</output>
</layer>
<layer id="631" name="__module.roberta.encoder.layer.9.attention.self/aten::softmax/Softmax" type="SoftMax" version="opset8">
<data axis="-1" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>12</dim>
<dim>-1</dim>
<dim>-1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32" names="820,input.77">
<dim>-1</dim>
<dim>12</dim>
<dim>-1</dim>
<dim>-1</dim>
</port>
</output>
</layer>
<layer id="632" name="self.roberta.encoder.layer.9.attention.self.value.weight" type="Const" version="opset1">
<data element_type="f32" shape="768, 768" offset="415894716" size="2359296" />
<output>
<port id="0" precision="FP32" names="self.roberta.encoder.layer.9.attention.self.value.weight">
<dim>768</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="633" name="__module.roberta.encoder.layer.9.attention.self.value/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="FP32">
<dim>768</dim>
<dim>768</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="634" name="Constant_7612" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 768" offset="418254012" size="3072" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="635" name="__module.roberta.encoder.layer.9.attention.self.value/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="803,x.113">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="636" name="__module.roberta.encoder.layer.9.attention.self/prim::ListConstruct/Concat_2" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="158368848" size="32" />
<output>
<port id="0" precision="I64">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="637" name="__module.roberta.encoder.layer.9.attention.self/aten::view/Reshape_2" type="Reshape" version="opset1">
<data special_zero="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="807,x.115">
<dim>-1</dim>
<dim>-1</dim>
<dim>12</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="638" name="Constant_2088" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="158368880" size="32" />
<output>
<port id="0" precision="I64" names="808">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="639" name="__module.roberta.encoder.layer.9.attention.self/aten::permute/Transpose_2" type="Transpose" version="opset1">
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>12</dim>
<dim>64</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="809">
<dim>-1</dim>
<dim>12</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="640" name="__module.roberta.encoder.layer.9.attention.self/aten::matmul/MatMul_1" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="false" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>12</dim>
<dim>-1</dim>
<dim>-1</dim>
</port>
<port id="1" precision="FP32">
<dim>-1</dim>
<dim>12</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="822,context_layer.37">
<dim>-1</dim>
<dim>12</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="641" name="Constant_2162" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="158368880" size="32" />
<output>
<port id="0" precision="I64" names="823">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="642" name="__module.roberta.encoder.layer.9.attention.self/aten::permute/Transpose_3" type="Transpose" version="opset1">
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>12</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="824">
<dim>-1</dim>
<dim>-1</dim>
<dim>12</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="643" name="__module.roberta.encoder.layer.9.attention.self/prim::ListConstruct/Concat_3" type="Const" version="opset1">
<data element_type="i64" shape="3" offset="163093668" size="24" />
<output>
<port id="0" precision="I64">
<dim>3</dim>
</port>
</output>
</layer>
<layer id="644" name="__module.roberta.encoder.layer.9.attention.self/aten::view/Reshape_3" type="Reshape" version="opset1">
<data special_zero="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>12</dim>
<dim>64</dim>
</port>
<port id="1" precision="I64">
<dim>3</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="829">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="645" name="self.roberta.encoder.layer.9.attention.output.dense.weight" type="Const" version="opset1">
<data element_type="f32" shape="768, 768" offset="418257084" size="2359296" />
<output>
<port id="0" precision="FP32" names="self.roberta.encoder.layer.9.attention.output.dense.weight">
<dim>768</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="646" name="__module.roberta.encoder.layer.9.attention.output.dense/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="FP32">
<dim>768</dim>
<dim>768</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="647" name="Constant_7613" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 768" offset="420616380" size="3072" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="648" name="__module.roberta.encoder.layer.9.attention.output.dense/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="834,input.79">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="649" name="__module.roberta.encoder.layer.9.attention.output/aten::add/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="836">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="650" name="__module.roberta.encoder.layer.9.attention.output.LayerNorm/aten::layer_norm/Constant" type="Const" version="opset1">
<data element_type="i32" shape="1" offset="156000332" size="4" />
<output>
<port id="0" precision="I32">
<dim>1</dim>
</port>
</output>
</layer>
<layer id="651" name="__module.roberta.encoder.layer.9.attention.output.LayerNorm/aten::layer_norm/MVN" type="MVN" version="opset6">
<data eps="9.9999997473787516e-06" normalize_variance="true" eps_mode="INSIDE_SQRT" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="I32">
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="652" name="Constant_7614" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 768" offset="420619452" size="3072" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="653" name="__module.roberta.encoder.layer.9.attention.output.LayerNorm/aten::layer_norm/Multiply" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="654" name="Constant_7615" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 768" offset="420622524" size="3072" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="655" name="__module.roberta.encoder.layer.9.attention.output.LayerNorm/aten::layer_norm/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="840,input_tensor.39">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="656" name="self.roberta.encoder.layer.9.intermediate.dense.weight" type="Const" version="opset1">
<data element_type="f32" shape="3072, 768" offset="420625596" size="9437184" />
<output>
<port id="0" precision="FP32" names="self.roberta.encoder.layer.9.intermediate.dense.weight">
<dim>3072</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="657" name="__module.roberta.encoder.layer.9.intermediate.dense/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="FP32">
<dim>3072</dim>
<dim>768</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>3072</dim>
</port>
</output>
</layer>
<layer id="658" name="Constant_7616" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 3072" offset="430062780" size="12288" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>3072</dim>
</port>
</output>
</layer>
<layer id="659" name="__module.roberta.encoder.layer.9.intermediate.dense/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>3072</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>3072</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="844">
<dim>-1</dim>
<dim>-1</dim>
<dim>3072</dim>
</port>
</output>
</layer>
<layer id="660" name="__module.roberta.encoder.layer.9.intermediate.intermediate_act_fn/aten::gelu/Gelu" type="Gelu" version="opset7">
<data approximation_mode="ERF" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>3072</dim>
</port>
</input>
<output>
<port id="1" precision="FP32" names="845">
<dim>-1</dim>
<dim>-1</dim>
<dim>3072</dim>
</port>
</output>
</layer>
<layer id="661" name="self.roberta.encoder.layer.9.output.dense.weight" type="Const" version="opset1">
<data element_type="f32" shape="768, 3072" offset="430075068" size="9437184" />
<output>
<port id="0" precision="FP32" names="self.roberta.encoder.layer.9.output.dense.weight">
<dim>768</dim>
<dim>3072</dim>
</port>
</output>
</layer>
<layer id="662" name="__module.roberta.encoder.layer.9.output.dense/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>3072</dim>
</port>
<port id="1" precision="FP32">
<dim>768</dim>
<dim>3072</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="663" name="Constant_7617" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 768" offset="439512252" size="3072" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="664" name="__module.roberta.encoder.layer.9.output.dense/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="850,input.81">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="665" name="__module.roberta.encoder.layer.9.output/aten::add/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="852">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="666" name="__module.roberta.encoder.layer.9.output.LayerNorm/aten::layer_norm/Constant" type="Const" version="opset1">
<data element_type="i32" shape="1" offset="156000332" size="4" />
<output>
<port id="0" precision="I32">
<dim>1</dim>
</port>
</output>
</layer>
<layer id="667" name="__module.roberta.encoder.layer.9.output.LayerNorm/aten::layer_norm/MVN" type="MVN" version="opset6">
<data eps="9.9999997473787516e-06" normalize_variance="true" eps_mode="INSIDE_SQRT" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="I32">
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="668" name="Constant_7618" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 768" offset="439515324" size="3072" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="669" name="__module.roberta.encoder.layer.9.output.LayerNorm/aten::layer_norm/Multiply" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="670" name="Constant_7619" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 768" offset="439518396" size="3072" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="671" name="__module.roberta.encoder.layer.9.output.LayerNorm/aten::layer_norm/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="856,input_tensor.41">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="672" name="self.roberta.encoder.layer.10.attention.self.query.weight" type="Const" version="opset1">
<data element_type="f32" shape="768, 768" offset="439521468" size="2359296" />
<output>
<port id="0" precision="FP32" names="self.roberta.encoder.layer.10.attention.self.query.weight">
<dim>768</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="673" name="__module.roberta.encoder.layer.10.attention.self.query/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="FP32">
<dim>768</dim>
<dim>768</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="674" name="Constant_7620" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 768" offset="441880764" size="3072" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="675" name="__module.roberta.encoder.layer.10.attention.self.query/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="867,x.129">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="676" name="__module.roberta.encoder.layer.10.attention.self/prim::ListConstruct/Concat" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="158368848" size="32" />
<output>
<port id="0" precision="I64">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="677" name="__module.roberta.encoder.layer.10.attention.self/aten::view/Reshape" type="Reshape" version="opset1">
<data special_zero="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="889,x.131">
<dim>-1</dim>
<dim>-1</dim>
<dim>12</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="678" name="Constant_2310" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="158368880" size="32" />
<output>
<port id="0" precision="I64" names="890">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="679" name="__module.roberta.encoder.layer.10.attention.self/aten::permute/Transpose" type="Transpose" version="opset1">
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>12</dim>
<dim>64</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="891">
<dim>-1</dim>
<dim>12</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="680" name="self.roberta.encoder.layer.10.attention.self.key.weight" type="Const" version="opset1">
<data element_type="f32" shape="768, 768" offset="441883836" size="2359296" />
<output>
<port id="0" precision="FP32" names="self.roberta.encoder.layer.10.attention.self.key.weight">
<dim>768</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="681" name="__module.roberta.encoder.layer.10.attention.self.key/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="FP32">
<dim>768</dim>
<dim>768</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="682" name="Constant_7621" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 768" offset="444243132" size="3072" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="683" name="__module.roberta.encoder.layer.10.attention.self.key/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="870,x.121">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="684" name="__module.roberta.encoder.layer.10.attention.self/prim::ListConstruct/Concat_1" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="158368848" size="32" />
<output>
<port id="0" precision="I64">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="685" name="__module.roberta.encoder.layer.10.attention.self/aten::view/Reshape_1" type="Reshape" version="opset1">
<data special_zero="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="874,x.123">
<dim>-1</dim>
<dim>-1</dim>
<dim>12</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="686" name="Constant_2270" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="158368880" size="32" />
<output>
<port id="0" precision="I64" names="875">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="687" name="__module.roberta.encoder.layer.10.attention.self/aten::permute/Transpose_1" type="Transpose" version="opset1">
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>12</dim>
<dim>64</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="876,key_layer.21">
<dim>-1</dim>
<dim>12</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="688" name="__module.roberta.encoder.layer.10.attention.self/aten::matmul/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>12</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
<port id="1" precision="FP32">
<dim>-1</dim>
<dim>12</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="893,attention_scores.41">
<dim>-1</dim>
<dim>12</dim>
<dim>-1</dim>
<dim>-1</dim>
</port>
</output>
</layer>
<layer id="689" name="Constant_7622" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1, 1" offset="160731280" size="4" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="690" name="__module.roberta.encoder.layer.10.attention.self/aten::div/Divide" type="Divide" version="opset1">
<data auto_broadcast="numpy" m_pythondiv="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>12</dim>
<dim>-1</dim>
<dim>-1</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="894,attention_scores.43">
<dim>-1</dim>
<dim>12</dim>
<dim>-1</dim>
<dim>-1</dim>
</port>
</output>
</layer>
<layer id="691" name="__module.roberta.encoder.layer.10.attention.self/aten::add/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>12</dim>
<dim>-1</dim>
<dim>-1</dim>
</port>
<port id="1" precision="FP32">
<dim>-1</dim>
<dim>1</dim>
<dim>1</dim>
<dim>-1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="895,input.83">
<dim>-1</dim>
<dim>12</dim>
<dim>-1</dim>
<dim>-1</dim>
</port>
</output>
</layer>
<layer id="692" name="__module.roberta.encoder.layer.10.attention.self/aten::softmax/Softmax" type="SoftMax" version="opset8">
<data axis="-1" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>12</dim>
<dim>-1</dim>
<dim>-1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32" names="896,input.85">
<dim>-1</dim>
<dim>12</dim>
<dim>-1</dim>
<dim>-1</dim>
</port>
</output>
</layer>
<layer id="693" name="self.roberta.encoder.layer.10.attention.self.value.weight" type="Const" version="opset1">
<data element_type="f32" shape="768, 768" offset="444246204" size="2359296" />
<output>
<port id="0" precision="FP32" names="self.roberta.encoder.layer.10.attention.self.value.weight">
<dim>768</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="694" name="__module.roberta.encoder.layer.10.attention.self.value/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="FP32">
<dim>768</dim>
<dim>768</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="695" name="Constant_7623" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 768" offset="446605500" size="3072" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="696" name="__module.roberta.encoder.layer.10.attention.self.value/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="879,x.125">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="697" name="__module.roberta.encoder.layer.10.attention.self/prim::ListConstruct/Concat_2" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="158368848" size="32" />
<output>
<port id="0" precision="I64">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="698" name="__module.roberta.encoder.layer.10.attention.self/aten::view/Reshape_2" type="Reshape" version="opset1">
<data special_zero="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="883,x.127">
<dim>-1</dim>
<dim>-1</dim>
<dim>12</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="699" name="Constant_2293" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="158368880" size="32" />
<output>
<port id="0" precision="I64" names="884">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="700" name="__module.roberta.encoder.layer.10.attention.self/aten::permute/Transpose_2" type="Transpose" version="opset1">
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>12</dim>
<dim>64</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="885">
<dim>-1</dim>
<dim>12</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="701" name="__module.roberta.encoder.layer.10.attention.self/aten::matmul/MatMul_1" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="false" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>12</dim>
<dim>-1</dim>
<dim>-1</dim>
</port>
<port id="1" precision="FP32">
<dim>-1</dim>
<dim>12</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="898,context_layer.41">
<dim>-1</dim>
<dim>12</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="702" name="Constant_2367" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="158368880" size="32" />
<output>
<port id="0" precision="I64" names="899">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="703" name="__module.roberta.encoder.layer.10.attention.self/aten::permute/Transpose_3" type="Transpose" version="opset1">
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>12</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="900">
<dim>-1</dim>
<dim>-1</dim>
<dim>12</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="704" name="__module.roberta.encoder.layer.10.attention.self/prim::ListConstruct/Concat_3" type="Const" version="opset1">
<data element_type="i64" shape="3" offset="163093668" size="24" />
<output>
<port id="0" precision="I64">
<dim>3</dim>
</port>
</output>
</layer>
<layer id="705" name="__module.roberta.encoder.layer.10.attention.self/aten::view/Reshape_3" type="Reshape" version="opset1">
<data special_zero="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>12</dim>
<dim>64</dim>
</port>
<port id="1" precision="I64">
<dim>3</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="905">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="706" name="self.roberta.encoder.layer.10.attention.output.dense.weight" type="Const" version="opset1">
<data element_type="f32" shape="768, 768" offset="446608572" size="2359296" />
<output>
<port id="0" precision="FP32" names="self.roberta.encoder.layer.10.attention.output.dense.weight">
<dim>768</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="707" name="__module.roberta.encoder.layer.10.attention.output.dense/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="FP32">
<dim>768</dim>
<dim>768</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="708" name="Constant_7624" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 768" offset="448967868" size="3072" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="709" name="__module.roberta.encoder.layer.10.attention.output.dense/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="910,input.87">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="710" name="__module.roberta.encoder.layer.10.attention.output/aten::add/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="912">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="711" name="__module.roberta.encoder.layer.10.attention.output.LayerNorm/aten::layer_norm/Constant" type="Const" version="opset1">
<data element_type="i32" shape="1" offset="156000332" size="4" />
<output>
<port id="0" precision="I32">
<dim>1</dim>
</port>
</output>
</layer>
<layer id="712" name="__module.roberta.encoder.layer.10.attention.output.LayerNorm/aten::layer_norm/MVN" type="MVN" version="opset6">
<data eps="9.9999997473787516e-06" normalize_variance="true" eps_mode="INSIDE_SQRT" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="I32">
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="713" name="Constant_7625" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 768" offset="448970940" size="3072" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="714" name="__module.roberta.encoder.layer.10.attention.output.LayerNorm/aten::layer_norm/Multiply" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="715" name="Constant_7626" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 768" offset="448974012" size="3072" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="716" name="__module.roberta.encoder.layer.10.attention.output.LayerNorm/aten::layer_norm/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="916,input_tensor.43">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="717" name="self.roberta.encoder.layer.10.intermediate.dense.weight" type="Const" version="opset1">
<data element_type="f32" shape="3072, 768" offset="448977084" size="9437184" />
<output>
<port id="0" precision="FP32" names="self.roberta.encoder.layer.10.intermediate.dense.weight">
<dim>3072</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="718" name="__module.roberta.encoder.layer.10.intermediate.dense/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="FP32">
<dim>3072</dim>
<dim>768</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>3072</dim>
</port>
</output>
</layer>
<layer id="719" name="Constant_7627" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 3072" offset="458414268" size="12288" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>3072</dim>
</port>
</output>
</layer>
<layer id="720" name="__module.roberta.encoder.layer.10.intermediate.dense/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>3072</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>3072</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="920">
<dim>-1</dim>
<dim>-1</dim>
<dim>3072</dim>
</port>
</output>
</layer>
<layer id="721" name="__module.roberta.encoder.layer.10.intermediate.intermediate_act_fn/aten::gelu/Gelu" type="Gelu" version="opset7">
<data approximation_mode="ERF" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>3072</dim>
</port>
</input>
<output>
<port id="1" precision="FP32" names="921">
<dim>-1</dim>
<dim>-1</dim>
<dim>3072</dim>
</port>
</output>
</layer>
<layer id="722" name="self.roberta.encoder.layer.10.output.dense.weight" type="Const" version="opset1">
<data element_type="f32" shape="768, 3072" offset="458426556" size="9437184" />
<output>
<port id="0" precision="FP32" names="self.roberta.encoder.layer.10.output.dense.weight">
<dim>768</dim>
<dim>3072</dim>
</port>
</output>
</layer>
<layer id="723" name="__module.roberta.encoder.layer.10.output.dense/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>3072</dim>
</port>
<port id="1" precision="FP32">
<dim>768</dim>
<dim>3072</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="724" name="Constant_7628" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 768" offset="467863740" size="3072" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="725" name="__module.roberta.encoder.layer.10.output.dense/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="926,input.89">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="726" name="__module.roberta.encoder.layer.10.output/aten::add/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="928">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="727" name="__module.roberta.encoder.layer.10.output.LayerNorm/aten::layer_norm/Constant" type="Const" version="opset1">
<data element_type="i32" shape="1" offset="156000332" size="4" />
<output>
<port id="0" precision="I32">
<dim>1</dim>
</port>
</output>
</layer>
<layer id="728" name="__module.roberta.encoder.layer.10.output.LayerNorm/aten::layer_norm/MVN" type="MVN" version="opset6">
<data eps="9.9999997473787516e-06" normalize_variance="true" eps_mode="INSIDE_SQRT" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="I32">
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="729" name="Constant_7629" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 768" offset="467866812" size="3072" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="730" name="__module.roberta.encoder.layer.10.output.LayerNorm/aten::layer_norm/Multiply" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="731" name="Constant_7630" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 768" offset="467869884" size="3072" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="732" name="__module.roberta.encoder.layer.10.output.LayerNorm/aten::layer_norm/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="932,input_tensor.45">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="733" name="self.roberta.encoder.layer.11.attention.self.query.weight" type="Const" version="opset1">
<data element_type="f32" shape="768, 768" offset="467872956" size="2359296" />
<output>
<port id="0" precision="FP32" names="self.roberta.encoder.layer.11.attention.self.query.weight">
<dim>768</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="734" name="__module.roberta.encoder.layer.11.attention.self.query/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="FP32">
<dim>768</dim>
<dim>768</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="735" name="Constant_7631" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 768" offset="470232252" size="3072" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="736" name="__module.roberta.encoder.layer.11.attention.self.query/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="943,x.141">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="737" name="__module.roberta.encoder.layer.11.attention.self/prim::ListConstruct/Concat" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="158368848" size="32" />
<output>
<port id="0" precision="I64">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="738" name="__module.roberta.encoder.layer.11.attention.self/aten::view/Reshape" type="Reshape" version="opset1">
<data special_zero="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="965,x">
<dim>-1</dim>
<dim>-1</dim>
<dim>12</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="739" name="Constant_2515" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="158368880" size="32" />
<output>
<port id="0" precision="I64" names="966">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="740" name="__module.roberta.encoder.layer.11.attention.self/aten::permute/Transpose" type="Transpose" version="opset1">
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>12</dim>
<dim>64</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="967">
<dim>-1</dim>
<dim>12</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="741" name="self.roberta.encoder.layer.11.attention.self.key.weight" type="Const" version="opset1">
<data element_type="f32" shape="768, 768" offset="470235324" size="2359296" />
<output>
<port id="0" precision="FP32" names="self.roberta.encoder.layer.11.attention.self.key.weight">
<dim>768</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="742" name="__module.roberta.encoder.layer.11.attention.self.key/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="FP32">
<dim>768</dim>
<dim>768</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="743" name="Constant_7632" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 768" offset="472594620" size="3072" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="744" name="__module.roberta.encoder.layer.11.attention.self.key/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="946,x.133">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="745" name="__module.roberta.encoder.layer.11.attention.self/prim::ListConstruct/Concat_1" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="158368848" size="32" />
<output>
<port id="0" precision="I64">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="746" name="__module.roberta.encoder.layer.11.attention.self/aten::view/Reshape_1" type="Reshape" version="opset1">
<data special_zero="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="950,x.135">
<dim>-1</dim>
<dim>-1</dim>
<dim>12</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="747" name="Constant_2475" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="158368880" size="32" />
<output>
<port id="0" precision="I64" names="951">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="748" name="__module.roberta.encoder.layer.11.attention.self/aten::permute/Transpose_1" type="Transpose" version="opset1">
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>12</dim>
<dim>64</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="952,key_layer">
<dim>-1</dim>
<dim>12</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="749" name="__module.roberta.encoder.layer.11.attention.self/aten::matmul/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>12</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
<port id="1" precision="FP32">
<dim>-1</dim>
<dim>12</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="969,attention_scores.45">
<dim>-1</dim>
<dim>12</dim>
<dim>-1</dim>
<dim>-1</dim>
</port>
</output>
</layer>
<layer id="750" name="Constant_7633" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1, 1" offset="160731280" size="4" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="751" name="__module.roberta.encoder.layer.11.attention.self/aten::div/Divide" type="Divide" version="opset1">
<data auto_broadcast="numpy" m_pythondiv="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>12</dim>
<dim>-1</dim>
<dim>-1</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="970,attention_scores">
<dim>-1</dim>
<dim>12</dim>
<dim>-1</dim>
<dim>-1</dim>
</port>
</output>
</layer>
<layer id="752" name="__module.roberta.encoder.layer.11.attention.self/aten::add/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>12</dim>
<dim>-1</dim>
<dim>-1</dim>
</port>
<port id="1" precision="FP32">
<dim>-1</dim>
<dim>1</dim>
<dim>1</dim>
<dim>-1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="971,input.91">
<dim>-1</dim>
<dim>12</dim>
<dim>-1</dim>
<dim>-1</dim>
</port>
</output>
</layer>
<layer id="753" name="__module.roberta.encoder.layer.11.attention.self/aten::softmax/Softmax" type="SoftMax" version="opset8">
<data axis="-1" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>12</dim>
<dim>-1</dim>
<dim>-1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32" names="972,input.93">
<dim>-1</dim>
<dim>12</dim>
<dim>-1</dim>
<dim>-1</dim>
</port>
</output>
</layer>
<layer id="754" name="self.roberta.encoder.layer.11.attention.self.value.weight" type="Const" version="opset1">
<data element_type="f32" shape="768, 768" offset="472597692" size="2359296" />
<output>
<port id="0" precision="FP32" names="self.roberta.encoder.layer.11.attention.self.value.weight">
<dim>768</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="755" name="__module.roberta.encoder.layer.11.attention.self.value/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="FP32">
<dim>768</dim>
<dim>768</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="756" name="Constant_7634" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 768" offset="474956988" size="3072" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="757" name="__module.roberta.encoder.layer.11.attention.self.value/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="955,x.137">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="758" name="__module.roberta.encoder.layer.11.attention.self/prim::ListConstruct/Concat_2" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="158368848" size="32" />
<output>
<port id="0" precision="I64">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="759" name="__module.roberta.encoder.layer.11.attention.self/aten::view/Reshape_2" type="Reshape" version="opset1">
<data special_zero="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="959,x.139">
<dim>-1</dim>
<dim>-1</dim>
<dim>12</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="760" name="Constant_2498" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="158368880" size="32" />
<output>
<port id="0" precision="I64" names="960">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="761" name="__module.roberta.encoder.layer.11.attention.self/aten::permute/Transpose_2" type="Transpose" version="opset1">
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>12</dim>
<dim>64</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="961">
<dim>-1</dim>
<dim>12</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="762" name="__module.roberta.encoder.layer.11.attention.self/aten::matmul/MatMul_1" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="false" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>12</dim>
<dim>-1</dim>
<dim>-1</dim>
</port>
<port id="1" precision="FP32">
<dim>-1</dim>
<dim>12</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="974,context_layer.45">
<dim>-1</dim>
<dim>12</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="763" name="Constant_2572" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="158368880" size="32" />
<output>
<port id="0" precision="I64" names="975">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="764" name="__module.roberta.encoder.layer.11.attention.self/aten::permute/Transpose_3" type="Transpose" version="opset1">
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>12</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="976">
<dim>-1</dim>
<dim>-1</dim>
<dim>12</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="765" name="__module.roberta.encoder.layer.11.attention.self/prim::ListConstruct/Concat_3" type="Const" version="opset1">
<data element_type="i64" shape="3" offset="163093668" size="24" />
<output>
<port id="0" precision="I64">
<dim>3</dim>
</port>
</output>
</layer>
<layer id="766" name="__module.roberta.encoder.layer.11.attention.self/aten::view/Reshape_3" type="Reshape" version="opset1">
<data special_zero="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>12</dim>
<dim>64</dim>
</port>
<port id="1" precision="I64">
<dim>3</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="981">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="767" name="self.roberta.encoder.layer.11.attention.output.dense.weight" type="Const" version="opset1">
<data element_type="f32" shape="768, 768" offset="474960060" size="2359296" />
<output>
<port id="0" precision="FP32" names="self.roberta.encoder.layer.11.attention.output.dense.weight">
<dim>768</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="768" name="__module.roberta.encoder.layer.11.attention.output.dense/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="FP32">
<dim>768</dim>
<dim>768</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="769" name="Constant_7635" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 768" offset="477319356" size="3072" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="770" name="__module.roberta.encoder.layer.11.attention.output.dense/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="986,input.95">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="771" name="__module.roberta.encoder.layer.11.attention.output/aten::add/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="988">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="772" name="__module.roberta.encoder.layer.11.attention.output.LayerNorm/aten::layer_norm/Constant" type="Const" version="opset1">
<data element_type="i32" shape="1" offset="156000332" size="4" />
<output>
<port id="0" precision="I32">
<dim>1</dim>
</port>
</output>
</layer>
<layer id="773" name="__module.roberta.encoder.layer.11.attention.output.LayerNorm/aten::layer_norm/MVN" type="MVN" version="opset6">
<data eps="9.9999997473787516e-06" normalize_variance="true" eps_mode="INSIDE_SQRT" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="I32">
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="774" name="Constant_7636" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 768" offset="477322428" size="3072" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="775" name="__module.roberta.encoder.layer.11.attention.output.LayerNorm/aten::layer_norm/Multiply" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="776" name="Constant_7637" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 768" offset="477325500" size="3072" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="777" name="__module.roberta.encoder.layer.11.attention.output.LayerNorm/aten::layer_norm/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="992,input_tensor">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="778" name="self.roberta.encoder.layer.11.intermediate.dense.weight" type="Const" version="opset1">
<data element_type="f32" shape="3072, 768" offset="477328572" size="9437184" />
<output>
<port id="0" precision="FP32" names="self.roberta.encoder.layer.11.intermediate.dense.weight">
<dim>3072</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="779" name="__module.roberta.encoder.layer.11.intermediate.dense/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="FP32">
<dim>3072</dim>
<dim>768</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>3072</dim>
</port>
</output>
</layer>
<layer id="780" name="Constant_7638" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 3072" offset="486765756" size="12288" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>3072</dim>
</port>
</output>
</layer>
<layer id="781" name="__module.roberta.encoder.layer.11.intermediate.dense/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>3072</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>3072</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="996">
<dim>-1</dim>
<dim>-1</dim>
<dim>3072</dim>
</port>
</output>
</layer>
<layer id="782" name="__module.roberta.encoder.layer.11.intermediate.intermediate_act_fn/aten::gelu/Gelu" type="Gelu" version="opset7">
<data approximation_mode="ERF" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>3072</dim>
</port>
</input>
<output>
<port id="1" precision="FP32" names="997">
<dim>-1</dim>
<dim>-1</dim>
<dim>3072</dim>
</port>
</output>
</layer>
<layer id="783" name="self.roberta.encoder.layer.11.output.dense.weight" type="Const" version="opset1">
<data element_type="f32" shape="768, 3072" offset="486778044" size="9437184" />
<output>
<port id="0" precision="FP32" names="self.roberta.encoder.layer.11.output.dense.weight">
<dim>768</dim>
<dim>3072</dim>
</port>
</output>
</layer>
<layer id="784" name="__module.roberta.encoder.layer.11.output.dense/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>3072</dim>
</port>
<port id="1" precision="FP32">
<dim>768</dim>
<dim>3072</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="785" name="Constant_7639" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 768" offset="496215228" size="3072" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="786" name="__module.roberta.encoder.layer.11.output.dense/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1002,input.97">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="787" name="__module.roberta.encoder.layer.11.output/aten::add/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1004">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="788" name="__module.roberta.encoder.layer.11.output.LayerNorm/aten::layer_norm/Constant" type="Const" version="opset1">
<data element_type="i32" shape="1" offset="156000332" size="4" />
<output>
<port id="0" precision="I32">
<dim>1</dim>
</port>
</output>
</layer>
<layer id="789" name="__module.roberta.encoder.layer.11.output.LayerNorm/aten::layer_norm/MVN" type="MVN" version="opset6">
<data eps="9.9999997473787516e-06" normalize_variance="true" eps_mode="INSIDE_SQRT" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="I32">
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="790" name="Constant_7640" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 768" offset="496218300" size="3072" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="791" name="__module.roberta.encoder.layer.11.output.LayerNorm/aten::layer_norm/Multiply" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="792" name="Constant_7641" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 768" offset="496221372" size="3072" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="793" name="__module.roberta.encoder.layer.11.output.LayerNorm/aten::layer_norm/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>768</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1008,1017,features">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="794" name="1013" type="Const" version="opset1">
<data element_type="i64" shape="" offset="154421308" size="8" />
<output>
<port id="0" precision="I64" names="1013" />
</output>
</layer>
<layer id="795" name="1011" type="Const" version="opset1">
<data element_type="i64" shape="" offset="154421284" size="8" />
<output>
<port id="0" precision="I64" names="1011" />
</output>
</layer>
<layer id="796" name="__module.classifier/aten::select/Gather" type="Gather" version="opset8">
<data batch_dims="0" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="I64" />
<port id="2" precision="I64" />
</input>
<output>
<port id="3" precision="FP32" names="1018,1019,input.99">
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="797" name="self.classifier.dense.weight" type="Const" version="opset1">
<data element_type="f32" shape="768, 768" offset="496224444" size="2359296" />
<output>
<port id="0" precision="FP32" names="self.classifier.dense.weight">
<dim>768</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="798" name="__module.classifier.dense/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="FP32">
<dim>768</dim>
<dim>768</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="799" name="Constant_7642" type="Const" version="opset1">
<data element_type="f32" shape="1, 768" offset="498583740" size="3072" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="800" name="__module.classifier.dense/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>768</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1023">
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="801" name="__module.classifier/aten::tanh/Tanh" type="Tanh" version="opset1">
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>768</dim>
</port>
</input>
<output>
<port id="1" precision="FP32" names="1024,input">
<dim>-1</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="802" name="self.classifier.out_proj.weight" type="Const" version="opset1">
<data element_type="f32" shape="2, 768" offset="498586812" size="6144" />
<output>
<port id="0" precision="FP32" names="self.classifier.out_proj.weight">
<dim>2</dim>
<dim>768</dim>
</port>
</output>
</layer>
<layer id="803" name="__module.classifier.out_proj/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>768</dim>
</port>
<port id="1" precision="FP32">
<dim>2</dim>
<dim>768</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>2</dim>
</port>
</output>
</layer>
<layer id="804" name="Constant_7643" type="Const" version="opset1">
<data element_type="f32" shape="1, 2" offset="498592956" size="8" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>2</dim>
</port>
</output>
</layer>
<layer id="805" name="__module.classifier.out_proj/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>2</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>2</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="logits">
<dim>-1</dim>
<dim>2</dim>
</port>
</output>
</layer>
<layer id="806" name="Result_3736" type="Result" version="opset1">
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>2</dim>
</port>
</input>
</layer>
</layers>
<edges>
<edge from-layer="0" from-port="0" to-layer="72" to-port="0" />
<edge from-layer="1" from-port="0" to-layer="3" to-port="0" />
<edge from-layer="1" from-port="0" to-layer="35" to-port="0" />
<edge from-layer="1" from-port="0" to-layer="11" to-port="0" />
<edge from-layer="2" from-port="0" to-layer="5" to-port="0" />
<edge from-layer="3" from-port="1" to-layer="5" to-port="1" />
<edge from-layer="4" from-port="0" to-layer="5" to-port="2" />
<edge from-layer="5" from-port="3" to-layer="32" to-port="0" />
<edge from-layer="6" from-port="0" to-layer="31" to-port="0" />
<edge from-layer="7" from-port="0" to-layer="20" to-port="0" />
<edge from-layer="8" from-port="0" to-layer="20" to-port="1" />
<edge from-layer="9" from-port="0" to-layer="18" to-port="0" />
<edge from-layer="10" from-port="0" to-layer="18" to-port="1" />
<edge from-layer="11" from-port="1" to-layer="14" to-port="0" />
<edge from-layer="11" from-port="1" to-layer="23" to-port="0" />
<edge from-layer="12" from-port="0" to-layer="14" to-port="1" />
<edge from-layer="12" from-port="0" to-layer="72" to-port="1" />
<edge from-layer="12" from-port="0" to-layer="37" to-port="1" />
<edge from-layer="13" from-port="0" to-layer="14" to-port="2" />
<edge from-layer="14" from-port="3" to-layer="16" to-port="0" />
<edge from-layer="14" from-port="3" to-layer="25" to-port="0" />
<edge from-layer="15" from-port="0" to-layer="16" to-port="1" />
<edge from-layer="16" from-port="2" to-layer="18" to-port="2" />
<edge from-layer="17" from-port="0" to-layer="18" to-port="3" />
<edge from-layer="18" from-port="4" to-layer="20" to-port="2" />
<edge from-layer="19" from-port="0" to-layer="20" to-port="3" />
<edge from-layer="20" from-port="4" to-layer="28" to-port="0" />
<edge from-layer="21" from-port="0" to-layer="23" to-port="1" />
<edge from-layer="22" from-port="0" to-layer="23" to-port="2" />
<edge from-layer="23" from-port="3" to-layer="26" to-port="0" />
<edge from-layer="24" from-port="0" to-layer="25" to-port="1" />
<edge from-layer="25" from-port="2" to-layer="26" to-port="1" />
<edge from-layer="26" from-port="2" to-layer="27" to-port="0" />
<edge from-layer="27" from-port="1" to-layer="28" to-port="1" />
<edge from-layer="28" from-port="2" to-layer="29" to-port="0" />
<edge from-layer="29" from-port="1" to-layer="31" to-port="1" />
<edge from-layer="30" from-port="0" to-layer="31" to-port="2" />
<edge from-layer="31" from-port="3" to-layer="32" to-port="1" />
<edge from-layer="32" from-port="2" to-layer="45" to-port="0" />
<edge from-layer="33" from-port="0" to-layer="44" to-port="0" />
<edge from-layer="34" from-port="0" to-layer="35" to-port="1" />
<edge from-layer="35" from-port="2" to-layer="36" to-port="0" />
<edge from-layer="36" from-port="1" to-layer="38" to-port="1" />
<edge from-layer="36" from-port="1" to-layer="37" to-port="0" />
<edge from-layer="37" from-port="2" to-layer="38" to-port="0" />
<edge from-layer="38" from-port="2" to-layer="39" to-port="0" />
<edge from-layer="39" from-port="1" to-layer="41" to-port="0" />
<edge from-layer="40" from-port="0" to-layer="41" to-port="1" />
<edge from-layer="41" from-port="2" to-layer="42" to-port="0" />
<edge from-layer="42" from-port="1" to-layer="44" to-port="1" />
<edge from-layer="43" from-port="0" to-layer="44" to-port="2" />
<edge from-layer="44" from-port="3" to-layer="45" to-port="1" />
<edge from-layer="45" from-port="2" to-layer="47" to-port="0" />
<edge from-layer="46" from-port="0" to-layer="47" to-port="1" />
<edge from-layer="47" from-port="2" to-layer="49" to-port="0" />
<edge from-layer="48" from-port="0" to-layer="49" to-port="1" />
<edge from-layer="49" from-port="2" to-layer="51" to-port="0" />
<edge from-layer="50" from-port="0" to-layer="51" to-port="1" />
<edge from-layer="51" from-port="2" to-layer="100" to-port="1" />
<edge from-layer="51" from-port="2" to-layer="84" to-port="0" />
<edge from-layer="51" from-port="2" to-layer="53" to-port="0" />
<edge from-layer="51" from-port="2" to-layer="61" to-port="0" />
<edge from-layer="52" from-port="0" to-layer="53" to-port="1" />
<edge from-layer="53" from-port="2" to-layer="55" to-port="0" />
<edge from-layer="54" from-port="0" to-layer="55" to-port="1" />
<edge from-layer="55" from-port="2" to-layer="57" to-port="0" />
<edge from-layer="56" from-port="0" to-layer="57" to-port="1" />
<edge from-layer="57" from-port="2" to-layer="59" to-port="0" />
<edge from-layer="58" from-port="0" to-layer="59" to-port="1" />
<edge from-layer="59" from-port="2" to-layer="68" to-port="0" />
<edge from-layer="60" from-port="0" to-layer="61" to-port="1" />
<edge from-layer="61" from-port="2" to-layer="63" to-port="0" />
<edge from-layer="62" from-port="0" to-layer="63" to-port="1" />
<edge from-layer="63" from-port="2" to-layer="65" to-port="0" />
<edge from-layer="64" from-port="0" to-layer="65" to-port="1" />
<edge from-layer="65" from-port="2" to-layer="67" to-port="0" />
<edge from-layer="66" from-port="0" to-layer="67" to-port="1" />
<edge from-layer="67" from-port="2" to-layer="68" to-port="1" />
<edge from-layer="68" from-port="2" to-layer="70" to-port="0" />
<edge from-layer="69" from-port="0" to-layer="70" to-port="1" />
<edge from-layer="70" from-port="2" to-layer="81" to-port="0" />
<edge from-layer="71" from-port="0" to-layer="78" to-port="0" />
<edge from-layer="72" from-port="2" to-layer="74" to-port="0" />
<edge from-layer="73" from-port="0" to-layer="74" to-port="1" />
<edge from-layer="74" from-port="2" to-layer="75" to-port="0" />
<edge from-layer="75" from-port="1" to-layer="77" to-port="0" />
<edge from-layer="76" from-port="0" to-layer="77" to-port="1" />
<edge from-layer="77" from-port="2" to-layer="78" to-port="1" />
<edge from-layer="78" from-port="2" to-layer="80" to-port="0" />
<edge from-layer="79" from-port="0" to-layer="80" to-port="1" />
<edge from-layer="80" from-port="2" to-layer="508" to-port="1" />
<edge from-layer="80" from-port="2" to-layer="569" to-port="1" />
<edge from-layer="80" from-port="2" to-layer="630" to-port="1" />
<edge from-layer="80" from-port="2" to-layer="691" to-port="1" />
<edge from-layer="80" from-port="2" to-layer="752" to-port="1" />
<edge from-layer="80" from-port="2" to-layer="447" to-port="1" />
<edge from-layer="80" from-port="2" to-layer="386" to-port="1" />
<edge from-layer="80" from-port="2" to-layer="325" to-port="1" />
<edge from-layer="80" from-port="2" to-layer="264" to-port="1" />
<edge from-layer="80" from-port="2" to-layer="203" to-port="1" />
<edge from-layer="80" from-port="2" to-layer="142" to-port="1" />
<edge from-layer="80" from-port="2" to-layer="81" to-port="1" />
<edge from-layer="81" from-port="2" to-layer="82" to-port="0" />
<edge from-layer="82" from-port="1" to-layer="91" to-port="0" />
<edge from-layer="83" from-port="0" to-layer="84" to-port="1" />
<edge from-layer="84" from-port="2" to-layer="86" to-port="0" />
<edge from-layer="85" from-port="0" to-layer="86" to-port="1" />
<edge from-layer="86" from-port="2" to-layer="88" to-port="0" />
<edge from-layer="87" from-port="0" to-layer="88" to-port="1" />
<edge from-layer="88" from-port="2" to-layer="90" to-port="0" />
<edge from-layer="89" from-port="0" to-layer="90" to-port="1" />
<edge from-layer="90" from-port="2" to-layer="91" to-port="1" />
<edge from-layer="91" from-port="2" to-layer="93" to-port="0" />
<edge from-layer="92" from-port="0" to-layer="93" to-port="1" />
<edge from-layer="93" from-port="2" to-layer="95" to-port="0" />
<edge from-layer="94" from-port="0" to-layer="95" to-port="1" />
<edge from-layer="95" from-port="2" to-layer="97" to-port="0" />
<edge from-layer="96" from-port="0" to-layer="97" to-port="1" />
<edge from-layer="97" from-port="2" to-layer="99" to-port="0" />
<edge from-layer="98" from-port="0" to-layer="99" to-port="1" />
<edge from-layer="99" from-port="2" to-layer="100" to-port="0" />
<edge from-layer="100" from-port="2" to-layer="102" to-port="0" />
<edge from-layer="101" from-port="0" to-layer="102" to-port="1" />
<edge from-layer="102" from-port="2" to-layer="104" to-port="0" />
<edge from-layer="103" from-port="0" to-layer="104" to-port="1" />
<edge from-layer="104" from-port="2" to-layer="106" to-port="0" />
<edge from-layer="105" from-port="0" to-layer="106" to-port="1" />
<edge from-layer="106" from-port="2" to-layer="108" to-port="0" />
<edge from-layer="106" from-port="2" to-layer="116" to-port="1" />
<edge from-layer="107" from-port="0" to-layer="108" to-port="1" />
<edge from-layer="108" from-port="2" to-layer="110" to-port="0" />
<edge from-layer="109" from-port="0" to-layer="110" to-port="1" />
<edge from-layer="110" from-port="2" to-layer="111" to-port="0" />
<edge from-layer="111" from-port="1" to-layer="113" to-port="0" />
<edge from-layer="112" from-port="0" to-layer="113" to-port="1" />
<edge from-layer="113" from-port="2" to-layer="115" to-port="0" />
<edge from-layer="114" from-port="0" to-layer="115" to-port="1" />
<edge from-layer="115" from-port="2" to-layer="116" to-port="0" />
<edge from-layer="116" from-port="2" to-layer="118" to-port="0" />
<edge from-layer="117" from-port="0" to-layer="118" to-port="1" />
<edge from-layer="118" from-port="2" to-layer="120" to-port="0" />
<edge from-layer="119" from-port="0" to-layer="120" to-port="1" />
<edge from-layer="120" from-port="2" to-layer="122" to-port="0" />
<edge from-layer="121" from-port="0" to-layer="122" to-port="1" />
<edge from-layer="122" from-port="2" to-layer="161" to-port="1" />
<edge from-layer="122" from-port="2" to-layer="124" to-port="0" />
<edge from-layer="122" from-port="2" to-layer="132" to-port="0" />
<edge from-layer="122" from-port="2" to-layer="145" to-port="0" />
<edge from-layer="123" from-port="0" to-layer="124" to-port="1" />
<edge from-layer="124" from-port="2" to-layer="126" to-port="0" />
<edge from-layer="125" from-port="0" to-layer="126" to-port="1" />
<edge from-layer="126" from-port="2" to-layer="128" to-port="0" />
<edge from-layer="127" from-port="0" to-layer="128" to-port="1" />
<edge from-layer="128" from-port="2" to-layer="130" to-port="0" />
<edge from-layer="129" from-port="0" to-layer="130" to-port="1" />
<edge from-layer="130" from-port="2" to-layer="139" to-port="0" />
<edge from-layer="131" from-port="0" to-layer="132" to-port="1" />
<edge from-layer="132" from-port="2" to-layer="134" to-port="0" />
<edge from-layer="133" from-port="0" to-layer="134" to-port="1" />
<edge from-layer="134" from-port="2" to-layer="136" to-port="0" />
<edge from-layer="135" from-port="0" to-layer="136" to-port="1" />
<edge from-layer="136" from-port="2" to-layer="138" to-port="0" />
<edge from-layer="137" from-port="0" to-layer="138" to-port="1" />
<edge from-layer="138" from-port="2" to-layer="139" to-port="1" />
<edge from-layer="139" from-port="2" to-layer="141" to-port="0" />
<edge from-layer="140" from-port="0" to-layer="141" to-port="1" />
<edge from-layer="141" from-port="2" to-layer="142" to-port="0" />
<edge from-layer="142" from-port="2" to-layer="143" to-port="0" />
<edge from-layer="143" from-port="1" to-layer="152" to-port="0" />
<edge from-layer="144" from-port="0" to-layer="145" to-port="1" />
<edge from-layer="145" from-port="2" to-layer="147" to-port="0" />
<edge from-layer="146" from-port="0" to-layer="147" to-port="1" />
<edge from-layer="147" from-port="2" to-layer="149" to-port="0" />
<edge from-layer="148" from-port="0" to-layer="149" to-port="1" />
<edge from-layer="149" from-port="2" to-layer="151" to-port="0" />
<edge from-layer="150" from-port="0" to-layer="151" to-port="1" />
<edge from-layer="151" from-port="2" to-layer="152" to-port="1" />
<edge from-layer="152" from-port="2" to-layer="154" to-port="0" />
<edge from-layer="153" from-port="0" to-layer="154" to-port="1" />
<edge from-layer="154" from-port="2" to-layer="156" to-port="0" />
<edge from-layer="155" from-port="0" to-layer="156" to-port="1" />
<edge from-layer="156" from-port="2" to-layer="158" to-port="0" />
<edge from-layer="157" from-port="0" to-layer="158" to-port="1" />
<edge from-layer="158" from-port="2" to-layer="160" to-port="0" />
<edge from-layer="159" from-port="0" to-layer="160" to-port="1" />
<edge from-layer="160" from-port="2" to-layer="161" to-port="0" />
<edge from-layer="161" from-port="2" to-layer="163" to-port="0" />
<edge from-layer="162" from-port="0" to-layer="163" to-port="1" />
<edge from-layer="163" from-port="2" to-layer="165" to-port="0" />
<edge from-layer="164" from-port="0" to-layer="165" to-port="1" />
<edge from-layer="165" from-port="2" to-layer="167" to-port="0" />
<edge from-layer="166" from-port="0" to-layer="167" to-port="1" />
<edge from-layer="167" from-port="2" to-layer="177" to-port="1" />
<edge from-layer="167" from-port="2" to-layer="169" to-port="0" />
<edge from-layer="168" from-port="0" to-layer="169" to-port="1" />
<edge from-layer="169" from-port="2" to-layer="171" to-port="0" />
<edge from-layer="170" from-port="0" to-layer="171" to-port="1" />
<edge from-layer="171" from-port="2" to-layer="172" to-port="0" />
<edge from-layer="172" from-port="1" to-layer="174" to-port="0" />
<edge from-layer="173" from-port="0" to-layer="174" to-port="1" />
<edge from-layer="174" from-port="2" to-layer="176" to-port="0" />
<edge from-layer="175" from-port="0" to-layer="176" to-port="1" />
<edge from-layer="176" from-port="2" to-layer="177" to-port="0" />
<edge from-layer="177" from-port="2" to-layer="179" to-port="0" />
<edge from-layer="178" from-port="0" to-layer="179" to-port="1" />
<edge from-layer="179" from-port="2" to-layer="181" to-port="0" />
<edge from-layer="180" from-port="0" to-layer="181" to-port="1" />
<edge from-layer="181" from-port="2" to-layer="183" to-port="0" />
<edge from-layer="182" from-port="0" to-layer="183" to-port="1" />
<edge from-layer="183" from-port="2" to-layer="193" to-port="0" />
<edge from-layer="183" from-port="2" to-layer="185" to-port="0" />
<edge from-layer="183" from-port="2" to-layer="222" to-port="1" />
<edge from-layer="183" from-port="2" to-layer="206" to-port="0" />
<edge from-layer="184" from-port="0" to-layer="185" to-port="1" />
<edge from-layer="185" from-port="2" to-layer="187" to-port="0" />
<edge from-layer="186" from-port="0" to-layer="187" to-port="1" />
<edge from-layer="187" from-port="2" to-layer="189" to-port="0" />
<edge from-layer="188" from-port="0" to-layer="189" to-port="1" />
<edge from-layer="189" from-port="2" to-layer="191" to-port="0" />
<edge from-layer="190" from-port="0" to-layer="191" to-port="1" />
<edge from-layer="191" from-port="2" to-layer="200" to-port="0" />
<edge from-layer="192" from-port="0" to-layer="193" to-port="1" />
<edge from-layer="193" from-port="2" to-layer="195" to-port="0" />
<edge from-layer="194" from-port="0" to-layer="195" to-port="1" />
<edge from-layer="195" from-port="2" to-layer="197" to-port="0" />
<edge from-layer="196" from-port="0" to-layer="197" to-port="1" />
<edge from-layer="197" from-port="2" to-layer="199" to-port="0" />
<edge from-layer="198" from-port="0" to-layer="199" to-port="1" />
<edge from-layer="199" from-port="2" to-layer="200" to-port="1" />
<edge from-layer="200" from-port="2" to-layer="202" to-port="0" />
<edge from-layer="201" from-port="0" to-layer="202" to-port="1" />
<edge from-layer="202" from-port="2" to-layer="203" to-port="0" />
<edge from-layer="203" from-port="2" to-layer="204" to-port="0" />
<edge from-layer="204" from-port="1" to-layer="213" to-port="0" />
<edge from-layer="205" from-port="0" to-layer="206" to-port="1" />
<edge from-layer="206" from-port="2" to-layer="208" to-port="0" />
<edge from-layer="207" from-port="0" to-layer="208" to-port="1" />
<edge from-layer="208" from-port="2" to-layer="210" to-port="0" />
<edge from-layer="209" from-port="0" to-layer="210" to-port="1" />
<edge from-layer="210" from-port="2" to-layer="212" to-port="0" />
<edge from-layer="211" from-port="0" to-layer="212" to-port="1" />
<edge from-layer="212" from-port="2" to-layer="213" to-port="1" />
<edge from-layer="213" from-port="2" to-layer="215" to-port="0" />
<edge from-layer="214" from-port="0" to-layer="215" to-port="1" />
<edge from-layer="215" from-port="2" to-layer="217" to-port="0" />
<edge from-layer="216" from-port="0" to-layer="217" to-port="1" />
<edge from-layer="217" from-port="2" to-layer="219" to-port="0" />
<edge from-layer="218" from-port="0" to-layer="219" to-port="1" />
<edge from-layer="219" from-port="2" to-layer="221" to-port="0" />
<edge from-layer="220" from-port="0" to-layer="221" to-port="1" />
<edge from-layer="221" from-port="2" to-layer="222" to-port="0" />
<edge from-layer="222" from-port="2" to-layer="224" to-port="0" />
<edge from-layer="223" from-port="0" to-layer="224" to-port="1" />
<edge from-layer="224" from-port="2" to-layer="226" to-port="0" />
<edge from-layer="225" from-port="0" to-layer="226" to-port="1" />
<edge from-layer="226" from-port="2" to-layer="228" to-port="0" />
<edge from-layer="227" from-port="0" to-layer="228" to-port="1" />
<edge from-layer="228" from-port="2" to-layer="230" to-port="0" />
<edge from-layer="228" from-port="2" to-layer="238" to-port="1" />
<edge from-layer="229" from-port="0" to-layer="230" to-port="1" />
<edge from-layer="230" from-port="2" to-layer="232" to-port="0" />
<edge from-layer="231" from-port="0" to-layer="232" to-port="1" />
<edge from-layer="232" from-port="2" to-layer="233" to-port="0" />
<edge from-layer="233" from-port="1" to-layer="235" to-port="0" />
<edge from-layer="234" from-port="0" to-layer="235" to-port="1" />
<edge from-layer="235" from-port="2" to-layer="237" to-port="0" />
<edge from-layer="236" from-port="0" to-layer="237" to-port="1" />
<edge from-layer="237" from-port="2" to-layer="238" to-port="0" />
<edge from-layer="238" from-port="2" to-layer="240" to-port="0" />
<edge from-layer="239" from-port="0" to-layer="240" to-port="1" />
<edge from-layer="240" from-port="2" to-layer="242" to-port="0" />
<edge from-layer="241" from-port="0" to-layer="242" to-port="1" />
<edge from-layer="242" from-port="2" to-layer="244" to-port="0" />
<edge from-layer="243" from-port="0" to-layer="244" to-port="1" />
<edge from-layer="244" from-port="2" to-layer="254" to-port="0" />
<edge from-layer="244" from-port="2" to-layer="246" to-port="0" />
<edge from-layer="244" from-port="2" to-layer="283" to-port="1" />
<edge from-layer="244" from-port="2" to-layer="267" to-port="0" />
<edge from-layer="245" from-port="0" to-layer="246" to-port="1" />
<edge from-layer="246" from-port="2" to-layer="248" to-port="0" />
<edge from-layer="247" from-port="0" to-layer="248" to-port="1" />
<edge from-layer="248" from-port="2" to-layer="250" to-port="0" />
<edge from-layer="249" from-port="0" to-layer="250" to-port="1" />
<edge from-layer="250" from-port="2" to-layer="252" to-port="0" />
<edge from-layer="251" from-port="0" to-layer="252" to-port="1" />
<edge from-layer="252" from-port="2" to-layer="261" to-port="0" />
<edge from-layer="253" from-port="0" to-layer="254" to-port="1" />
<edge from-layer="254" from-port="2" to-layer="256" to-port="0" />
<edge from-layer="255" from-port="0" to-layer="256" to-port="1" />
<edge from-layer="256" from-port="2" to-layer="258" to-port="0" />
<edge from-layer="257" from-port="0" to-layer="258" to-port="1" />
<edge from-layer="258" from-port="2" to-layer="260" to-port="0" />
<edge from-layer="259" from-port="0" to-layer="260" to-port="1" />
<edge from-layer="260" from-port="2" to-layer="261" to-port="1" />
<edge from-layer="261" from-port="2" to-layer="263" to-port="0" />
<edge from-layer="262" from-port="0" to-layer="263" to-port="1" />
<edge from-layer="263" from-port="2" to-layer="264" to-port="0" />
<edge from-layer="264" from-port="2" to-layer="265" to-port="0" />
<edge from-layer="265" from-port="1" to-layer="274" to-port="0" />
<edge from-layer="266" from-port="0" to-layer="267" to-port="1" />
<edge from-layer="267" from-port="2" to-layer="269" to-port="0" />
<edge from-layer="268" from-port="0" to-layer="269" to-port="1" />
<edge from-layer="269" from-port="2" to-layer="271" to-port="0" />
<edge from-layer="270" from-port="0" to-layer="271" to-port="1" />
<edge from-layer="271" from-port="2" to-layer="273" to-port="0" />
<edge from-layer="272" from-port="0" to-layer="273" to-port="1" />
<edge from-layer="273" from-port="2" to-layer="274" to-port="1" />
<edge from-layer="274" from-port="2" to-layer="276" to-port="0" />
<edge from-layer="275" from-port="0" to-layer="276" to-port="1" />
<edge from-layer="276" from-port="2" to-layer="278" to-port="0" />
<edge from-layer="277" from-port="0" to-layer="278" to-port="1" />
<edge from-layer="278" from-port="2" to-layer="280" to-port="0" />
<edge from-layer="279" from-port="0" to-layer="280" to-port="1" />
<edge from-layer="280" from-port="2" to-layer="282" to-port="0" />
<edge from-layer="281" from-port="0" to-layer="282" to-port="1" />
<edge from-layer="282" from-port="2" to-layer="283" to-port="0" />
<edge from-layer="283" from-port="2" to-layer="285" to-port="0" />
<edge from-layer="284" from-port="0" to-layer="285" to-port="1" />
<edge from-layer="285" from-port="2" to-layer="287" to-port="0" />
<edge from-layer="286" from-port="0" to-layer="287" to-port="1" />
<edge from-layer="287" from-port="2" to-layer="289" to-port="0" />
<edge from-layer="288" from-port="0" to-layer="289" to-port="1" />
<edge from-layer="289" from-port="2" to-layer="291" to-port="0" />
<edge from-layer="289" from-port="2" to-layer="299" to-port="1" />
<edge from-layer="290" from-port="0" to-layer="291" to-port="1" />
<edge from-layer="291" from-port="2" to-layer="293" to-port="0" />
<edge from-layer="292" from-port="0" to-layer="293" to-port="1" />
<edge from-layer="293" from-port="2" to-layer="294" to-port="0" />
<edge from-layer="294" from-port="1" to-layer="296" to-port="0" />
<edge from-layer="295" from-port="0" to-layer="296" to-port="1" />
<edge from-layer="296" from-port="2" to-layer="298" to-port="0" />
<edge from-layer="297" from-port="0" to-layer="298" to-port="1" />
<edge from-layer="298" from-port="2" to-layer="299" to-port="0" />
<edge from-layer="299" from-port="2" to-layer="301" to-port="0" />
<edge from-layer="300" from-port="0" to-layer="301" to-port="1" />
<edge from-layer="301" from-port="2" to-layer="303" to-port="0" />
<edge from-layer="302" from-port="0" to-layer="303" to-port="1" />
<edge from-layer="303" from-port="2" to-layer="305" to-port="0" />
<edge from-layer="304" from-port="0" to-layer="305" to-port="1" />
<edge from-layer="305" from-port="2" to-layer="315" to-port="0" />
<edge from-layer="305" from-port="2" to-layer="344" to-port="1" />
<edge from-layer="305" from-port="2" to-layer="307" to-port="0" />
<edge from-layer="305" from-port="2" to-layer="328" to-port="0" />
<edge from-layer="306" from-port="0" to-layer="307" to-port="1" />
<edge from-layer="307" from-port="2" to-layer="309" to-port="0" />
<edge from-layer="308" from-port="0" to-layer="309" to-port="1" />
<edge from-layer="309" from-port="2" to-layer="311" to-port="0" />
<edge from-layer="310" from-port="0" to-layer="311" to-port="1" />
<edge from-layer="311" from-port="2" to-layer="313" to-port="0" />
<edge from-layer="312" from-port="0" to-layer="313" to-port="1" />
<edge from-layer="313" from-port="2" to-layer="322" to-port="0" />
<edge from-layer="314" from-port="0" to-layer="315" to-port="1" />
<edge from-layer="315" from-port="2" to-layer="317" to-port="0" />
<edge from-layer="316" from-port="0" to-layer="317" to-port="1" />
<edge from-layer="317" from-port="2" to-layer="319" to-port="0" />
<edge from-layer="318" from-port="0" to-layer="319" to-port="1" />
<edge from-layer="319" from-port="2" to-layer="321" to-port="0" />
<edge from-layer="320" from-port="0" to-layer="321" to-port="1" />
<edge from-layer="321" from-port="2" to-layer="322" to-port="1" />
<edge from-layer="322" from-port="2" to-layer="324" to-port="0" />
<edge from-layer="323" from-port="0" to-layer="324" to-port="1" />
<edge from-layer="324" from-port="2" to-layer="325" to-port="0" />
<edge from-layer="325" from-port="2" to-layer="326" to-port="0" />
<edge from-layer="326" from-port="1" to-layer="335" to-port="0" />
<edge from-layer="327" from-port="0" to-layer="328" to-port="1" />
<edge from-layer="328" from-port="2" to-layer="330" to-port="0" />
<edge from-layer="329" from-port="0" to-layer="330" to-port="1" />
<edge from-layer="330" from-port="2" to-layer="332" to-port="0" />
<edge from-layer="331" from-port="0" to-layer="332" to-port="1" />
<edge from-layer="332" from-port="2" to-layer="334" to-port="0" />
<edge from-layer="333" from-port="0" to-layer="334" to-port="1" />
<edge from-layer="334" from-port="2" to-layer="335" to-port="1" />
<edge from-layer="335" from-port="2" to-layer="337" to-port="0" />
<edge from-layer="336" from-port="0" to-layer="337" to-port="1" />
<edge from-layer="337" from-port="2" to-layer="339" to-port="0" />
<edge from-layer="338" from-port="0" to-layer="339" to-port="1" />
<edge from-layer="339" from-port="2" to-layer="341" to-port="0" />
<edge from-layer="340" from-port="0" to-layer="341" to-port="1" />
<edge from-layer="341" from-port="2" to-layer="343" to-port="0" />
<edge from-layer="342" from-port="0" to-layer="343" to-port="1" />
<edge from-layer="343" from-port="2" to-layer="344" to-port="0" />
<edge from-layer="344" from-port="2" to-layer="346" to-port="0" />
<edge from-layer="345" from-port="0" to-layer="346" to-port="1" />
<edge from-layer="346" from-port="2" to-layer="348" to-port="0" />
<edge from-layer="347" from-port="0" to-layer="348" to-port="1" />
<edge from-layer="348" from-port="2" to-layer="350" to-port="0" />
<edge from-layer="349" from-port="0" to-layer="350" to-port="1" />
<edge from-layer="350" from-port="2" to-layer="352" to-port="0" />
<edge from-layer="350" from-port="2" to-layer="360" to-port="1" />
<edge from-layer="351" from-port="0" to-layer="352" to-port="1" />
<edge from-layer="352" from-port="2" to-layer="354" to-port="0" />
<edge from-layer="353" from-port="0" to-layer="354" to-port="1" />
<edge from-layer="354" from-port="2" to-layer="355" to-port="0" />
<edge from-layer="355" from-port="1" to-layer="357" to-port="0" />
<edge from-layer="356" from-port="0" to-layer="357" to-port="1" />
<edge from-layer="357" from-port="2" to-layer="359" to-port="0" />
<edge from-layer="358" from-port="0" to-layer="359" to-port="1" />
<edge from-layer="359" from-port="2" to-layer="360" to-port="0" />
<edge from-layer="360" from-port="2" to-layer="362" to-port="0" />
<edge from-layer="361" from-port="0" to-layer="362" to-port="1" />
<edge from-layer="362" from-port="2" to-layer="364" to-port="0" />
<edge from-layer="363" from-port="0" to-layer="364" to-port="1" />
<edge from-layer="364" from-port="2" to-layer="366" to-port="0" />
<edge from-layer="365" from-port="0" to-layer="366" to-port="1" />
<edge from-layer="366" from-port="2" to-layer="389" to-port="0" />
<edge from-layer="366" from-port="2" to-layer="405" to-port="1" />
<edge from-layer="366" from-port="2" to-layer="376" to-port="0" />
<edge from-layer="366" from-port="2" to-layer="368" to-port="0" />
<edge from-layer="367" from-port="0" to-layer="368" to-port="1" />
<edge from-layer="368" from-port="2" to-layer="370" to-port="0" />
<edge from-layer="369" from-port="0" to-layer="370" to-port="1" />
<edge from-layer="370" from-port="2" to-layer="372" to-port="0" />
<edge from-layer="371" from-port="0" to-layer="372" to-port="1" />
<edge from-layer="372" from-port="2" to-layer="374" to-port="0" />
<edge from-layer="373" from-port="0" to-layer="374" to-port="1" />
<edge from-layer="374" from-port="2" to-layer="383" to-port="0" />
<edge from-layer="375" from-port="0" to-layer="376" to-port="1" />
<edge from-layer="376" from-port="2" to-layer="378" to-port="0" />
<edge from-layer="377" from-port="0" to-layer="378" to-port="1" />
<edge from-layer="378" from-port="2" to-layer="380" to-port="0" />
<edge from-layer="379" from-port="0" to-layer="380" to-port="1" />
<edge from-layer="380" from-port="2" to-layer="382" to-port="0" />
<edge from-layer="381" from-port="0" to-layer="382" to-port="1" />
<edge from-layer="382" from-port="2" to-layer="383" to-port="1" />
<edge from-layer="383" from-port="2" to-layer="385" to-port="0" />
<edge from-layer="384" from-port="0" to-layer="385" to-port="1" />
<edge from-layer="385" from-port="2" to-layer="386" to-port="0" />
<edge from-layer="386" from-port="2" to-layer="387" to-port="0" />
<edge from-layer="387" from-port="1" to-layer="396" to-port="0" />
<edge from-layer="388" from-port="0" to-layer="389" to-port="1" />
<edge from-layer="389" from-port="2" to-layer="391" to-port="0" />
<edge from-layer="390" from-port="0" to-layer="391" to-port="1" />
<edge from-layer="391" from-port="2" to-layer="393" to-port="0" />
<edge from-layer="392" from-port="0" to-layer="393" to-port="1" />
<edge from-layer="393" from-port="2" to-layer="395" to-port="0" />
<edge from-layer="394" from-port="0" to-layer="395" to-port="1" />
<edge from-layer="395" from-port="2" to-layer="396" to-port="1" />
<edge from-layer="396" from-port="2" to-layer="398" to-port="0" />
<edge from-layer="397" from-port="0" to-layer="398" to-port="1" />
<edge from-layer="398" from-port="2" to-layer="400" to-port="0" />
<edge from-layer="399" from-port="0" to-layer="400" to-port="1" />
<edge from-layer="400" from-port="2" to-layer="402" to-port="0" />
<edge from-layer="401" from-port="0" to-layer="402" to-port="1" />
<edge from-layer="402" from-port="2" to-layer="404" to-port="0" />
<edge from-layer="403" from-port="0" to-layer="404" to-port="1" />
<edge from-layer="404" from-port="2" to-layer="405" to-port="0" />
<edge from-layer="405" from-port="2" to-layer="407" to-port="0" />
<edge from-layer="406" from-port="0" to-layer="407" to-port="1" />
<edge from-layer="407" from-port="2" to-layer="409" to-port="0" />
<edge from-layer="408" from-port="0" to-layer="409" to-port="1" />
<edge from-layer="409" from-port="2" to-layer="411" to-port="0" />
<edge from-layer="410" from-port="0" to-layer="411" to-port="1" />
<edge from-layer="411" from-port="2" to-layer="413" to-port="0" />
<edge from-layer="411" from-port="2" to-layer="421" to-port="1" />
<edge from-layer="412" from-port="0" to-layer="413" to-port="1" />
<edge from-layer="413" from-port="2" to-layer="415" to-port="0" />
<edge from-layer="414" from-port="0" to-layer="415" to-port="1" />
<edge from-layer="415" from-port="2" to-layer="416" to-port="0" />
<edge from-layer="416" from-port="1" to-layer="418" to-port="0" />
<edge from-layer="417" from-port="0" to-layer="418" to-port="1" />
<edge from-layer="418" from-port="2" to-layer="420" to-port="0" />
<edge from-layer="419" from-port="0" to-layer="420" to-port="1" />
<edge from-layer="420" from-port="2" to-layer="421" to-port="0" />
<edge from-layer="421" from-port="2" to-layer="423" to-port="0" />
<edge from-layer="422" from-port="0" to-layer="423" to-port="1" />
<edge from-layer="423" from-port="2" to-layer="425" to-port="0" />
<edge from-layer="424" from-port="0" to-layer="425" to-port="1" />
<edge from-layer="425" from-port="2" to-layer="427" to-port="0" />
<edge from-layer="426" from-port="0" to-layer="427" to-port="1" />
<edge from-layer="427" from-port="2" to-layer="437" to-port="0" />
<edge from-layer="427" from-port="2" to-layer="450" to-port="0" />
<edge from-layer="427" from-port="2" to-layer="429" to-port="0" />
<edge from-layer="427" from-port="2" to-layer="466" to-port="1" />
<edge from-layer="428" from-port="0" to-layer="429" to-port="1" />
<edge from-layer="429" from-port="2" to-layer="431" to-port="0" />
<edge from-layer="430" from-port="0" to-layer="431" to-port="1" />
<edge from-layer="431" from-port="2" to-layer="433" to-port="0" />
<edge from-layer="432" from-port="0" to-layer="433" to-port="1" />
<edge from-layer="433" from-port="2" to-layer="435" to-port="0" />
<edge from-layer="434" from-port="0" to-layer="435" to-port="1" />
<edge from-layer="435" from-port="2" to-layer="444" to-port="0" />
<edge from-layer="436" from-port="0" to-layer="437" to-port="1" />
<edge from-layer="437" from-port="2" to-layer="439" to-port="0" />
<edge from-layer="438" from-port="0" to-layer="439" to-port="1" />
<edge from-layer="439" from-port="2" to-layer="441" to-port="0" />
<edge from-layer="440" from-port="0" to-layer="441" to-port="1" />
<edge from-layer="441" from-port="2" to-layer="443" to-port="0" />
<edge from-layer="442" from-port="0" to-layer="443" to-port="1" />
<edge from-layer="443" from-port="2" to-layer="444" to-port="1" />
<edge from-layer="444" from-port="2" to-layer="446" to-port="0" />
<edge from-layer="445" from-port="0" to-layer="446" to-port="1" />
<edge from-layer="446" from-port="2" to-layer="447" to-port="0" />
<edge from-layer="447" from-port="2" to-layer="448" to-port="0" />
<edge from-layer="448" from-port="1" to-layer="457" to-port="0" />
<edge from-layer="449" from-port="0" to-layer="450" to-port="1" />
<edge from-layer="450" from-port="2" to-layer="452" to-port="0" />
<edge from-layer="451" from-port="0" to-layer="452" to-port="1" />
<edge from-layer="452" from-port="2" to-layer="454" to-port="0" />
<edge from-layer="453" from-port="0" to-layer="454" to-port="1" />
<edge from-layer="454" from-port="2" to-layer="456" to-port="0" />
<edge from-layer="455" from-port="0" to-layer="456" to-port="1" />
<edge from-layer="456" from-port="2" to-layer="457" to-port="1" />
<edge from-layer="457" from-port="2" to-layer="459" to-port="0" />
<edge from-layer="458" from-port="0" to-layer="459" to-port="1" />
<edge from-layer="459" from-port="2" to-layer="461" to-port="0" />
<edge from-layer="460" from-port="0" to-layer="461" to-port="1" />
<edge from-layer="461" from-port="2" to-layer="463" to-port="0" />
<edge from-layer="462" from-port="0" to-layer="463" to-port="1" />
<edge from-layer="463" from-port="2" to-layer="465" to-port="0" />
<edge from-layer="464" from-port="0" to-layer="465" to-port="1" />
<edge from-layer="465" from-port="2" to-layer="466" to-port="0" />
<edge from-layer="466" from-port="2" to-layer="468" to-port="0" />
<edge from-layer="467" from-port="0" to-layer="468" to-port="1" />
<edge from-layer="468" from-port="2" to-layer="470" to-port="0" />
<edge from-layer="469" from-port="0" to-layer="470" to-port="1" />
<edge from-layer="470" from-port="2" to-layer="472" to-port="0" />
<edge from-layer="471" from-port="0" to-layer="472" to-port="1" />
<edge from-layer="472" from-port="2" to-layer="482" to-port="1" />
<edge from-layer="472" from-port="2" to-layer="474" to-port="0" />
<edge from-layer="473" from-port="0" to-layer="474" to-port="1" />
<edge from-layer="474" from-port="2" to-layer="476" to-port="0" />
<edge from-layer="475" from-port="0" to-layer="476" to-port="1" />
<edge from-layer="476" from-port="2" to-layer="477" to-port="0" />
<edge from-layer="477" from-port="1" to-layer="479" to-port="0" />
<edge from-layer="478" from-port="0" to-layer="479" to-port="1" />
<edge from-layer="479" from-port="2" to-layer="481" to-port="0" />
<edge from-layer="480" from-port="0" to-layer="481" to-port="1" />
<edge from-layer="481" from-port="2" to-layer="482" to-port="0" />
<edge from-layer="482" from-port="2" to-layer="484" to-port="0" />
<edge from-layer="483" from-port="0" to-layer="484" to-port="1" />
<edge from-layer="484" from-port="2" to-layer="486" to-port="0" />
<edge from-layer="485" from-port="0" to-layer="486" to-port="1" />
<edge from-layer="486" from-port="2" to-layer="488" to-port="0" />
<edge from-layer="487" from-port="0" to-layer="488" to-port="1" />
<edge from-layer="488" from-port="2" to-layer="511" to-port="0" />
<edge from-layer="488" from-port="2" to-layer="527" to-port="1" />
<edge from-layer="488" from-port="2" to-layer="498" to-port="0" />
<edge from-layer="488" from-port="2" to-layer="490" to-port="0" />
<edge from-layer="489" from-port="0" to-layer="490" to-port="1" />
<edge from-layer="490" from-port="2" to-layer="492" to-port="0" />
<edge from-layer="491" from-port="0" to-layer="492" to-port="1" />
<edge from-layer="492" from-port="2" to-layer="494" to-port="0" />
<edge from-layer="493" from-port="0" to-layer="494" to-port="1" />
<edge from-layer="494" from-port="2" to-layer="496" to-port="0" />
<edge from-layer="495" from-port="0" to-layer="496" to-port="1" />
<edge from-layer="496" from-port="2" to-layer="505" to-port="0" />
<edge from-layer="497" from-port="0" to-layer="498" to-port="1" />
<edge from-layer="498" from-port="2" to-layer="500" to-port="0" />
<edge from-layer="499" from-port="0" to-layer="500" to-port="1" />
<edge from-layer="500" from-port="2" to-layer="502" to-port="0" />
<edge from-layer="501" from-port="0" to-layer="502" to-port="1" />
<edge from-layer="502" from-port="2" to-layer="504" to-port="0" />
<edge from-layer="503" from-port="0" to-layer="504" to-port="1" />
<edge from-layer="504" from-port="2" to-layer="505" to-port="1" />
<edge from-layer="505" from-port="2" to-layer="507" to-port="0" />
<edge from-layer="506" from-port="0" to-layer="507" to-port="1" />
<edge from-layer="507" from-port="2" to-layer="508" to-port="0" />
<edge from-layer="508" from-port="2" to-layer="509" to-port="0" />
<edge from-layer="509" from-port="1" to-layer="518" to-port="0" />
<edge from-layer="510" from-port="0" to-layer="511" to-port="1" />
<edge from-layer="511" from-port="2" to-layer="513" to-port="0" />
<edge from-layer="512" from-port="0" to-layer="513" to-port="1" />
<edge from-layer="513" from-port="2" to-layer="515" to-port="0" />
<edge from-layer="514" from-port="0" to-layer="515" to-port="1" />
<edge from-layer="515" from-port="2" to-layer="517" to-port="0" />
<edge from-layer="516" from-port="0" to-layer="517" to-port="1" />
<edge from-layer="517" from-port="2" to-layer="518" to-port="1" />
<edge from-layer="518" from-port="2" to-layer="520" to-port="0" />
<edge from-layer="519" from-port="0" to-layer="520" to-port="1" />
<edge from-layer="520" from-port="2" to-layer="522" to-port="0" />
<edge from-layer="521" from-port="0" to-layer="522" to-port="1" />
<edge from-layer="522" from-port="2" to-layer="524" to-port="0" />
<edge from-layer="523" from-port="0" to-layer="524" to-port="1" />
<edge from-layer="524" from-port="2" to-layer="526" to-port="0" />
<edge from-layer="525" from-port="0" to-layer="526" to-port="1" />
<edge from-layer="526" from-port="2" to-layer="527" to-port="0" />
<edge from-layer="527" from-port="2" to-layer="529" to-port="0" />
<edge from-layer="528" from-port="0" to-layer="529" to-port="1" />
<edge from-layer="529" from-port="2" to-layer="531" to-port="0" />
<edge from-layer="530" from-port="0" to-layer="531" to-port="1" />
<edge from-layer="531" from-port="2" to-layer="533" to-port="0" />
<edge from-layer="532" from-port="0" to-layer="533" to-port="1" />
<edge from-layer="533" from-port="2" to-layer="535" to-port="0" />
<edge from-layer="533" from-port="2" to-layer="543" to-port="1" />
<edge from-layer="534" from-port="0" to-layer="535" to-port="1" />
<edge from-layer="535" from-port="2" to-layer="537" to-port="0" />
<edge from-layer="536" from-port="0" to-layer="537" to-port="1" />
<edge from-layer="537" from-port="2" to-layer="538" to-port="0" />
<edge from-layer="538" from-port="1" to-layer="540" to-port="0" />
<edge from-layer="539" from-port="0" to-layer="540" to-port="1" />
<edge from-layer="540" from-port="2" to-layer="542" to-port="0" />
<edge from-layer="541" from-port="0" to-layer="542" to-port="1" />
<edge from-layer="542" from-port="2" to-layer="543" to-port="0" />
<edge from-layer="543" from-port="2" to-layer="545" to-port="0" />
<edge from-layer="544" from-port="0" to-layer="545" to-port="1" />
<edge from-layer="545" from-port="2" to-layer="547" to-port="0" />
<edge from-layer="546" from-port="0" to-layer="547" to-port="1" />
<edge from-layer="547" from-port="2" to-layer="549" to-port="0" />
<edge from-layer="548" from-port="0" to-layer="549" to-port="1" />
<edge from-layer="549" from-port="2" to-layer="588" to-port="1" />
<edge from-layer="549" from-port="2" to-layer="572" to-port="0" />
<edge from-layer="549" from-port="2" to-layer="559" to-port="0" />
<edge from-layer="549" from-port="2" to-layer="551" to-port="0" />
<edge from-layer="550" from-port="0" to-layer="551" to-port="1" />
<edge from-layer="551" from-port="2" to-layer="553" to-port="0" />
<edge from-layer="552" from-port="0" to-layer="553" to-port="1" />
<edge from-layer="553" from-port="2" to-layer="555" to-port="0" />
<edge from-layer="554" from-port="0" to-layer="555" to-port="1" />
<edge from-layer="555" from-port="2" to-layer="557" to-port="0" />
<edge from-layer="556" from-port="0" to-layer="557" to-port="1" />
<edge from-layer="557" from-port="2" to-layer="566" to-port="0" />
<edge from-layer="558" from-port="0" to-layer="559" to-port="1" />
<edge from-layer="559" from-port="2" to-layer="561" to-port="0" />
<edge from-layer="560" from-port="0" to-layer="561" to-port="1" />
<edge from-layer="561" from-port="2" to-layer="563" to-port="0" />
<edge from-layer="562" from-port="0" to-layer="563" to-port="1" />
<edge from-layer="563" from-port="2" to-layer="565" to-port="0" />
<edge from-layer="564" from-port="0" to-layer="565" to-port="1" />
<edge from-layer="565" from-port="2" to-layer="566" to-port="1" />
<edge from-layer="566" from-port="2" to-layer="568" to-port="0" />
<edge from-layer="567" from-port="0" to-layer="568" to-port="1" />
<edge from-layer="568" from-port="2" to-layer="569" to-port="0" />
<edge from-layer="569" from-port="2" to-layer="570" to-port="0" />
<edge from-layer="570" from-port="1" to-layer="579" to-port="0" />
<edge from-layer="571" from-port="0" to-layer="572" to-port="1" />
<edge from-layer="572" from-port="2" to-layer="574" to-port="0" />
<edge from-layer="573" from-port="0" to-layer="574" to-port="1" />
<edge from-layer="574" from-port="2" to-layer="576" to-port="0" />
<edge from-layer="575" from-port="0" to-layer="576" to-port="1" />
<edge from-layer="576" from-port="2" to-layer="578" to-port="0" />
<edge from-layer="577" from-port="0" to-layer="578" to-port="1" />
<edge from-layer="578" from-port="2" to-layer="579" to-port="1" />
<edge from-layer="579" from-port="2" to-layer="581" to-port="0" />
<edge from-layer="580" from-port="0" to-layer="581" to-port="1" />
<edge from-layer="581" from-port="2" to-layer="583" to-port="0" />
<edge from-layer="582" from-port="0" to-layer="583" to-port="1" />
<edge from-layer="583" from-port="2" to-layer="585" to-port="0" />
<edge from-layer="584" from-port="0" to-layer="585" to-port="1" />
<edge from-layer="585" from-port="2" to-layer="587" to-port="0" />
<edge from-layer="586" from-port="0" to-layer="587" to-port="1" />
<edge from-layer="587" from-port="2" to-layer="588" to-port="0" />
<edge from-layer="588" from-port="2" to-layer="590" to-port="0" />
<edge from-layer="589" from-port="0" to-layer="590" to-port="1" />
<edge from-layer="590" from-port="2" to-layer="592" to-port="0" />
<edge from-layer="591" from-port="0" to-layer="592" to-port="1" />
<edge from-layer="592" from-port="2" to-layer="594" to-port="0" />
<edge from-layer="593" from-port="0" to-layer="594" to-port="1" />
<edge from-layer="594" from-port="2" to-layer="604" to-port="1" />
<edge from-layer="594" from-port="2" to-layer="596" to-port="0" />
<edge from-layer="595" from-port="0" to-layer="596" to-port="1" />
<edge from-layer="596" from-port="2" to-layer="598" to-port="0" />
<edge from-layer="597" from-port="0" to-layer="598" to-port="1" />
<edge from-layer="598" from-port="2" to-layer="599" to-port="0" />
<edge from-layer="599" from-port="1" to-layer="601" to-port="0" />
<edge from-layer="600" from-port="0" to-layer="601" to-port="1" />
<edge from-layer="601" from-port="2" to-layer="603" to-port="0" />
<edge from-layer="602" from-port="0" to-layer="603" to-port="1" />
<edge from-layer="603" from-port="2" to-layer="604" to-port="0" />
<edge from-layer="604" from-port="2" to-layer="606" to-port="0" />
<edge from-layer="605" from-port="0" to-layer="606" to-port="1" />
<edge from-layer="606" from-port="2" to-layer="608" to-port="0" />
<edge from-layer="607" from-port="0" to-layer="608" to-port="1" />
<edge from-layer="608" from-port="2" to-layer="610" to-port="0" />
<edge from-layer="609" from-port="0" to-layer="610" to-port="1" />
<edge from-layer="610" from-port="2" to-layer="649" to-port="1" />
<edge from-layer="610" from-port="2" to-layer="633" to-port="0" />
<edge from-layer="610" from-port="2" to-layer="612" to-port="0" />
<edge from-layer="610" from-port="2" to-layer="620" to-port="0" />
<edge from-layer="611" from-port="0" to-layer="612" to-port="1" />
<edge from-layer="612" from-port="2" to-layer="614" to-port="0" />
<edge from-layer="613" from-port="0" to-layer="614" to-port="1" />
<edge from-layer="614" from-port="2" to-layer="616" to-port="0" />
<edge from-layer="615" from-port="0" to-layer="616" to-port="1" />
<edge from-layer="616" from-port="2" to-layer="618" to-port="0" />
<edge from-layer="617" from-port="0" to-layer="618" to-port="1" />
<edge from-layer="618" from-port="2" to-layer="627" to-port="0" />
<edge from-layer="619" from-port="0" to-layer="620" to-port="1" />
<edge from-layer="620" from-port="2" to-layer="622" to-port="0" />
<edge from-layer="621" from-port="0" to-layer="622" to-port="1" />
<edge from-layer="622" from-port="2" to-layer="624" to-port="0" />
<edge from-layer="623" from-port="0" to-layer="624" to-port="1" />
<edge from-layer="624" from-port="2" to-layer="626" to-port="0" />
<edge from-layer="625" from-port="0" to-layer="626" to-port="1" />
<edge from-layer="626" from-port="2" to-layer="627" to-port="1" />
<edge from-layer="627" from-port="2" to-layer="629" to-port="0" />
<edge from-layer="628" from-port="0" to-layer="629" to-port="1" />
<edge from-layer="629" from-port="2" to-layer="630" to-port="0" />
<edge from-layer="630" from-port="2" to-layer="631" to-port="0" />
<edge from-layer="631" from-port="1" to-layer="640" to-port="0" />
<edge from-layer="632" from-port="0" to-layer="633" to-port="1" />
<edge from-layer="633" from-port="2" to-layer="635" to-port="0" />
<edge from-layer="634" from-port="0" to-layer="635" to-port="1" />
<edge from-layer="635" from-port="2" to-layer="637" to-port="0" />
<edge from-layer="636" from-port="0" to-layer="637" to-port="1" />
<edge from-layer="637" from-port="2" to-layer="639" to-port="0" />
<edge from-layer="638" from-port="0" to-layer="639" to-port="1" />
<edge from-layer="639" from-port="2" to-layer="640" to-port="1" />
<edge from-layer="640" from-port="2" to-layer="642" to-port="0" />
<edge from-layer="641" from-port="0" to-layer="642" to-port="1" />
<edge from-layer="642" from-port="2" to-layer="644" to-port="0" />
<edge from-layer="643" from-port="0" to-layer="644" to-port="1" />
<edge from-layer="644" from-port="2" to-layer="646" to-port="0" />
<edge from-layer="645" from-port="0" to-layer="646" to-port="1" />
<edge from-layer="646" from-port="2" to-layer="648" to-port="0" />
<edge from-layer="647" from-port="0" to-layer="648" to-port="1" />
<edge from-layer="648" from-port="2" to-layer="649" to-port="0" />
<edge from-layer="649" from-port="2" to-layer="651" to-port="0" />
<edge from-layer="650" from-port="0" to-layer="651" to-port="1" />
<edge from-layer="651" from-port="2" to-layer="653" to-port="0" />
<edge from-layer="652" from-port="0" to-layer="653" to-port="1" />
<edge from-layer="653" from-port="2" to-layer="655" to-port="0" />
<edge from-layer="654" from-port="0" to-layer="655" to-port="1" />
<edge from-layer="655" from-port="2" to-layer="657" to-port="0" />
<edge from-layer="655" from-port="2" to-layer="665" to-port="1" />
<edge from-layer="656" from-port="0" to-layer="657" to-port="1" />
<edge from-layer="657" from-port="2" to-layer="659" to-port="0" />
<edge from-layer="658" from-port="0" to-layer="659" to-port="1" />
<edge from-layer="659" from-port="2" to-layer="660" to-port="0" />
<edge from-layer="660" from-port="1" to-layer="662" to-port="0" />
<edge from-layer="661" from-port="0" to-layer="662" to-port="1" />
<edge from-layer="662" from-port="2" to-layer="664" to-port="0" />
<edge from-layer="663" from-port="0" to-layer="664" to-port="1" />
<edge from-layer="664" from-port="2" to-layer="665" to-port="0" />
<edge from-layer="665" from-port="2" to-layer="667" to-port="0" />
<edge from-layer="666" from-port="0" to-layer="667" to-port="1" />
<edge from-layer="667" from-port="2" to-layer="669" to-port="0" />
<edge from-layer="668" from-port="0" to-layer="669" to-port="1" />
<edge from-layer="669" from-port="2" to-layer="671" to-port="0" />
<edge from-layer="670" from-port="0" to-layer="671" to-port="1" />
<edge from-layer="671" from-port="2" to-layer="710" to-port="1" />
<edge from-layer="671" from-port="2" to-layer="694" to-port="0" />
<edge from-layer="671" from-port="2" to-layer="681" to-port="0" />
<edge from-layer="671" from-port="2" to-layer="673" to-port="0" />
<edge from-layer="672" from-port="0" to-layer="673" to-port="1" />
<edge from-layer="673" from-port="2" to-layer="675" to-port="0" />
<edge from-layer="674" from-port="0" to-layer="675" to-port="1" />
<edge from-layer="675" from-port="2" to-layer="677" to-port="0" />
<edge from-layer="676" from-port="0" to-layer="677" to-port="1" />
<edge from-layer="677" from-port="2" to-layer="679" to-port="0" />
<edge from-layer="678" from-port="0" to-layer="679" to-port="1" />
<edge from-layer="679" from-port="2" to-layer="688" to-port="0" />
<edge from-layer="680" from-port="0" to-layer="681" to-port="1" />
<edge from-layer="681" from-port="2" to-layer="683" to-port="0" />
<edge from-layer="682" from-port="0" to-layer="683" to-port="1" />
<edge from-layer="683" from-port="2" to-layer="685" to-port="0" />
<edge from-layer="684" from-port="0" to-layer="685" to-port="1" />
<edge from-layer="685" from-port="2" to-layer="687" to-port="0" />
<edge from-layer="686" from-port="0" to-layer="687" to-port="1" />
<edge from-layer="687" from-port="2" to-layer="688" to-port="1" />
<edge from-layer="688" from-port="2" to-layer="690" to-port="0" />
<edge from-layer="689" from-port="0" to-layer="690" to-port="1" />
<edge from-layer="690" from-port="2" to-layer="691" to-port="0" />
<edge from-layer="691" from-port="2" to-layer="692" to-port="0" />
<edge from-layer="692" from-port="1" to-layer="701" to-port="0" />
<edge from-layer="693" from-port="0" to-layer="694" to-port="1" />
<edge from-layer="694" from-port="2" to-layer="696" to-port="0" />
<edge from-layer="695" from-port="0" to-layer="696" to-port="1" />
<edge from-layer="696" from-port="2" to-layer="698" to-port="0" />
<edge from-layer="697" from-port="0" to-layer="698" to-port="1" />
<edge from-layer="698" from-port="2" to-layer="700" to-port="0" />
<edge from-layer="699" from-port="0" to-layer="700" to-port="1" />
<edge from-layer="700" from-port="2" to-layer="701" to-port="1" />
<edge from-layer="701" from-port="2" to-layer="703" to-port="0" />
<edge from-layer="702" from-port="0" to-layer="703" to-port="1" />
<edge from-layer="703" from-port="2" to-layer="705" to-port="0" />
<edge from-layer="704" from-port="0" to-layer="705" to-port="1" />
<edge from-layer="705" from-port="2" to-layer="707" to-port="0" />
<edge from-layer="706" from-port="0" to-layer="707" to-port="1" />
<edge from-layer="707" from-port="2" to-layer="709" to-port="0" />
<edge from-layer="708" from-port="0" to-layer="709" to-port="1" />
<edge from-layer="709" from-port="2" to-layer="710" to-port="0" />
<edge from-layer="710" from-port="2" to-layer="712" to-port="0" />
<edge from-layer="711" from-port="0" to-layer="712" to-port="1" />
<edge from-layer="712" from-port="2" to-layer="714" to-port="0" />
<edge from-layer="713" from-port="0" to-layer="714" to-port="1" />
<edge from-layer="714" from-port="2" to-layer="716" to-port="0" />
<edge from-layer="715" from-port="0" to-layer="716" to-port="1" />
<edge from-layer="716" from-port="2" to-layer="726" to-port="1" />
<edge from-layer="716" from-port="2" to-layer="718" to-port="0" />
<edge from-layer="717" from-port="0" to-layer="718" to-port="1" />
<edge from-layer="718" from-port="2" to-layer="720" to-port="0" />
<edge from-layer="719" from-port="0" to-layer="720" to-port="1" />
<edge from-layer="720" from-port="2" to-layer="721" to-port="0" />
<edge from-layer="721" from-port="1" to-layer="723" to-port="0" />
<edge from-layer="722" from-port="0" to-layer="723" to-port="1" />
<edge from-layer="723" from-port="2" to-layer="725" to-port="0" />
<edge from-layer="724" from-port="0" to-layer="725" to-port="1" />
<edge from-layer="725" from-port="2" to-layer="726" to-port="0" />
<edge from-layer="726" from-port="2" to-layer="728" to-port="0" />
<edge from-layer="727" from-port="0" to-layer="728" to-port="1" />
<edge from-layer="728" from-port="2" to-layer="730" to-port="0" />
<edge from-layer="729" from-port="0" to-layer="730" to-port="1" />
<edge from-layer="730" from-port="2" to-layer="732" to-port="0" />
<edge from-layer="731" from-port="0" to-layer="732" to-port="1" />
<edge from-layer="732" from-port="2" to-layer="734" to-port="0" />
<edge from-layer="732" from-port="2" to-layer="771" to-port="1" />
<edge from-layer="732" from-port="2" to-layer="755" to-port="0" />
<edge from-layer="732" from-port="2" to-layer="742" to-port="0" />
<edge from-layer="733" from-port="0" to-layer="734" to-port="1" />
<edge from-layer="734" from-port="2" to-layer="736" to-port="0" />
<edge from-layer="735" from-port="0" to-layer="736" to-port="1" />
<edge from-layer="736" from-port="2" to-layer="738" to-port="0" />
<edge from-layer="737" from-port="0" to-layer="738" to-port="1" />
<edge from-layer="738" from-port="2" to-layer="740" to-port="0" />
<edge from-layer="739" from-port="0" to-layer="740" to-port="1" />
<edge from-layer="740" from-port="2" to-layer="749" to-port="0" />
<edge from-layer="741" from-port="0" to-layer="742" to-port="1" />
<edge from-layer="742" from-port="2" to-layer="744" to-port="0" />
<edge from-layer="743" from-port="0" to-layer="744" to-port="1" />
<edge from-layer="744" from-port="2" to-layer="746" to-port="0" />
<edge from-layer="745" from-port="0" to-layer="746" to-port="1" />
<edge from-layer="746" from-port="2" to-layer="748" to-port="0" />
<edge from-layer="747" from-port="0" to-layer="748" to-port="1" />
<edge from-layer="748" from-port="2" to-layer="749" to-port="1" />
<edge from-layer="749" from-port="2" to-layer="751" to-port="0" />
<edge from-layer="750" from-port="0" to-layer="751" to-port="1" />
<edge from-layer="751" from-port="2" to-layer="752" to-port="0" />
<edge from-layer="752" from-port="2" to-layer="753" to-port="0" />
<edge from-layer="753" from-port="1" to-layer="762" to-port="0" />
<edge from-layer="754" from-port="0" to-layer="755" to-port="1" />
<edge from-layer="755" from-port="2" to-layer="757" to-port="0" />
<edge from-layer="756" from-port="0" to-layer="757" to-port="1" />
<edge from-layer="757" from-port="2" to-layer="759" to-port="0" />
<edge from-layer="758" from-port="0" to-layer="759" to-port="1" />
<edge from-layer="759" from-port="2" to-layer="761" to-port="0" />
<edge from-layer="760" from-port="0" to-layer="761" to-port="1" />
<edge from-layer="761" from-port="2" to-layer="762" to-port="1" />
<edge from-layer="762" from-port="2" to-layer="764" to-port="0" />
<edge from-layer="763" from-port="0" to-layer="764" to-port="1" />
<edge from-layer="764" from-port="2" to-layer="766" to-port="0" />
<edge from-layer="765" from-port="0" to-layer="766" to-port="1" />
<edge from-layer="766" from-port="2" to-layer="768" to-port="0" />
<edge from-layer="767" from-port="0" to-layer="768" to-port="1" />
<edge from-layer="768" from-port="2" to-layer="770" to-port="0" />
<edge from-layer="769" from-port="0" to-layer="770" to-port="1" />
<edge from-layer="770" from-port="2" to-layer="771" to-port="0" />
<edge from-layer="771" from-port="2" to-layer="773" to-port="0" />
<edge from-layer="772" from-port="0" to-layer="773" to-port="1" />
<edge from-layer="773" from-port="2" to-layer="775" to-port="0" />
<edge from-layer="774" from-port="0" to-layer="775" to-port="1" />
<edge from-layer="775" from-port="2" to-layer="777" to-port="0" />
<edge from-layer="776" from-port="0" to-layer="777" to-port="1" />
<edge from-layer="777" from-port="2" to-layer="779" to-port="0" />
<edge from-layer="777" from-port="2" to-layer="787" to-port="1" />
<edge from-layer="778" from-port="0" to-layer="779" to-port="1" />
<edge from-layer="779" from-port="2" to-layer="781" to-port="0" />
<edge from-layer="780" from-port="0" to-layer="781" to-port="1" />
<edge from-layer="781" from-port="2" to-layer="782" to-port="0" />
<edge from-layer="782" from-port="1" to-layer="784" to-port="0" />
<edge from-layer="783" from-port="0" to-layer="784" to-port="1" />
<edge from-layer="784" from-port="2" to-layer="786" to-port="0" />
<edge from-layer="785" from-port="0" to-layer="786" to-port="1" />
<edge from-layer="786" from-port="2" to-layer="787" to-port="0" />
<edge from-layer="787" from-port="2" to-layer="789" to-port="0" />
<edge from-layer="788" from-port="0" to-layer="789" to-port="1" />
<edge from-layer="789" from-port="2" to-layer="791" to-port="0" />
<edge from-layer="790" from-port="0" to-layer="791" to-port="1" />
<edge from-layer="791" from-port="2" to-layer="793" to-port="0" />
<edge from-layer="792" from-port="0" to-layer="793" to-port="1" />
<edge from-layer="793" from-port="2" to-layer="796" to-port="0" />
<edge from-layer="794" from-port="0" to-layer="796" to-port="1" />
<edge from-layer="795" from-port="0" to-layer="796" to-port="2" />
<edge from-layer="796" from-port="3" to-layer="798" to-port="0" />
<edge from-layer="797" from-port="0" to-layer="798" to-port="1" />
<edge from-layer="798" from-port="2" to-layer="800" to-port="0" />
<edge from-layer="799" from-port="0" to-layer="800" to-port="1" />
<edge from-layer="800" from-port="2" to-layer="801" to-port="0" />
<edge from-layer="801" from-port="1" to-layer="803" to-port="0" />
<edge from-layer="802" from-port="0" to-layer="803" to-port="1" />
<edge from-layer="803" from-port="2" to-layer="805" to-port="0" />
<edge from-layer="804" from-port="0" to-layer="805" to-port="1" />
<edge from-layer="805" from-port="2" to-layer="806" to-port="0" />
</edges>
<rt_info>
<Runtime_version value="2024.1.0-15008-f4afc983258-releases/2024/1" />
<conversion_parameters>
<framework value="pytorch" />
<is_python_object value="True" />
</conversion_parameters>
<optimum>
<optimum_intel_version value="1.17.2" />
<optimum_version value="1.20.0" />
<pytorch_version value="2.3.1" />
<transformers_version value="4.41.2" />
</optimum>
</rt_info>
</net>