/* COPYRIGHT HEADER GOES HERE: No CopyRight Header String Passed During Model Conversion */ /* Command Line used: qnn-onnx-converter; act_bitwidth=16; act_quantizer=tf; act_quantizer_calibration=min-max; act_quantizer_schema=asymmetric; adjust_nms_features_dims=True; algorithms=[]; align_matmul_ranks=True; apply_masked_softmax=uncompressed; arch_checker=False; backend=None; batch=None; bias_bitwidth=8; calc_static_encodings=False; converter_op_package_lib=; copyright_file=None; custom_io=; custom_op_config_paths=None; debug=-1; defer_loading=False; define_symbol=None; disable_batchnorm_folding=False; disable_defer_loading=False; disable_node_validation=False; disable_qnn_op_config_validation=False; disable_relu_squashing=False; dry_run=None; dumpIR=False; dump_custom_io_config_template=; dump_encoding_json=False; dump_inferred_model=False; dump_qairt_io_config_yaml=; dump_qairt_quantizer_command=None; dump_value_info=False; enable_framework_trace=False; enable_match_gathernd=False; enable_match_topk=False; enable_per_row_quantized_bias=False; exclude_named_tensors=False; expand_gru_op_structure=True; expand_lstm_op_structure=False; expand_sparse_op_structure=False; export_format=cpp; extract_color_transform=True; float_bias_bitwidth=0; float_bias_bw=0; float_bitwidth=32; float_bw=32; float_fallback=False; force_prune_cast_ops=False; handle_gather_negative_indices=True; ignore_encodings=False; include_data_invariant_ops=False; inject_cast_for_gather=True; input_dim=[['text_ids', '1,128'], ['style_dp', '1,8,16'], ['text_mask', '1,1,128']]; input_dtype=[]; input_encoding=[]; input_layout=[]; input_list=./calibration_data/duration_predictor_input_list.txt; input_type=[]; keep_disconnected_nodes=False; keep_int64_inputs=False; keep_quant_nodes=False; keep_weights_quantized=False; match_caffe_ssd_to_tf=True; model_version=None; multi_time_steps_gru=False; multi_time_steps_lstm=False; no_simplification=False; op_package_lib=; out_names=['duration']; overwrite_model_prefix=False; pack_4_bit_weights=False; package_name=None; packed_masked_softmax_inputs=[]; packed_max_seq=1; param_quantizer=None; param_quantizer_calibration=min-max; param_quantizer_schema=asymmetric; percentile_calibration_value=99.99; perform_axes_to_spatial_first_order=True; perform_layout_transformation=False; prepare_inputs_as_params=False; preprocess_roi_pool_inputs=True; preserve_io=[]; preserve_onnx_output_order=False; quantization_overrides=; quantizer_log=None; restrict_quantization_steps=[]; squash_box_decoder=True; unroll_gru_time_steps=True; unroll_lstm_time_steps=True; use_aimet_quantizer=False; use_convert_quantization_nodes=False; use_dynamic_16_bit_weights=False; use_native_dtype=False; use_native_input_files=False; use_native_output_files=False; use_per_channel_quantization=False; use_per_row_quantization=False; use_quantize_v2=False; validate_models=False; weights_bitwidth=8 */ #include "QnnOpDef.h" #include "QnnModel.hpp" // Flag to determine if Backend should do node validation for each opNode added #define DO_GRAPH_NODE_VALIDATIONS 1 using namespace qnn_wrapper_api; const __attribute__((visibility("default"))) char* QNN_SDK_VERSION = "qaisw-v2.37.1.250807093845_124904"; extern "C" { static ModelError_t addTensor_text_ids(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_text_ids[] = {1, 128}; VALIDATE(model.addTensor("text_ids", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "text_ids", .type= QNN_TENSOR_TYPE_APP_WRITE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_INT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions_text_ids, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_style_dp(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_style_dp[] = {1, 16, 8}; VALIDATE(model.addTensor("style_dp", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "style_dp", .type= QNN_TENSOR_TYPE_APP_WRITE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000183746633411f, .offset= -34360}}}, .rank= 3, .dimensions=dimensions_style_dp, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_text_mask(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_text_mask[] = {1, 128, 1}; VALIDATE(model.addTensor("text_mask", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "text_mask", .type= QNN_TENSOR_TYPE_APP_WRITE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000152590218931f, .offset= 0}}}, .rank= 3, .dimensions=dimensions_text_mask, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode_style_dp_ncf(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR style_dp_ncf */ uint32_t dimensions_style_dp_ncf_perm[] = {3}; uint32_t style_dp_ncf_perm[] = {0, 2, 1}; Qnn_Param_t params_style_dp_ncf[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "style_dp_ncf_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions_style_dp_ncf_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)style_dp_ncf_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs_style_dp_ncf[] = { "style_dp" }; uint32_t dimensions_style_dp_ncf[] = {1, 8, 16}; Qnn_Tensor_t outputs_style_dp_ncf[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "style_dp_ncf", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000183746633411f, .offset= -34360}}}, .rank= 3, .dimensions=dimensions_style_dp_ncf, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "style_dp_ncf", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params_style_dp_ncf, // Node Params 1, // Num Node Params inputs_style_dp_ncf, // Input Tensor Names 1, // Num Input Tensor Names outputs_style_dp_ncf, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode_text_mask_ncf(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR text_mask_ncf */ uint32_t dimensions_text_mask_ncf_perm[] = {3}; uint32_t text_mask_ncf_perm[] = {0, 2, 1}; Qnn_Param_t params_text_mask_ncf[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "text_mask_ncf_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions_text_mask_ncf_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)text_mask_ncf_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs_text_mask_ncf[] = { "text_mask" }; uint32_t dimensions_text_mask_ncf[] = {1, 1, 128}; Qnn_Tensor_t outputs_text_mask_ncf[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "text_mask_ncf", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000152590218931f, .offset= 0}}}, .rank= 3, .dimensions=dimensions_text_mask_ncf, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "text_mask_ncf", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params_text_mask_ncf, // Node Params 1, // Num Node Params inputs_text_mask_ncf, // Input Tensor Names 1, // Num Input Tensor Names outputs_text_mask_ncf, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_dp_sentence_encoder_text_embedder_char_embedder_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_dp_sentence_encoder_text_embedder_char_embedder_weight[] = {163, 64}; VALIDATE(model.addTensor("tts_dp_sentence_encoder_text_embedder_char_embedder_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_dp_sentence_encoder_text_embedder_char_embedder_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000220268684643f, .offset= -34039}}}, .rank= 2, .dimensions=dimensions_tts_dp_sentence_encoder_text_embedder_char_embedder_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_dp_sentence_encoder_text_embedder_char_embedder_weight), .dataSize=BINLEN(tts_dp_sentence_encoder_text_embedder_char_embedder_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__sentence_encoder_text_embedder_char_embedder_Gather(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_text_embedder_char_embedder_Gather */ Qnn_Param_t params__sentence_encoder_text_embedder_char_embedder_Gather[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="axis", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_INT_32, {.int32Value = 0}}}} }; const char* inputs__sentence_encoder_text_embedder_char_embedder_Gather[] = { "tts_dp_sentence_encoder_text_embedder_char_embedder_weight", "text_ids" }; uint32_t dimensions__sentence_encoder_text_embedder_char_embedder_Gather_output_0[] = {1, 128, 64}; Qnn_Tensor_t outputs__sentence_encoder_text_embedder_char_embedder_Gather[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_text_embedder_char_embedder_Gather_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000220268684643f, .offset= -34039}}}, .rank= 3, .dimensions=dimensions__sentence_encoder_text_embedder_char_embedder_Gather_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_text_embedder_char_embedder_Gather", // Node Name "qti.aisw", // Package Name "Gather", // Qnn Node Type params__sentence_encoder_text_embedder_char_embedder_Gather, // Node Params 1, // Num Node Params inputs__sentence_encoder_text_embedder_char_embedder_Gather, // Input Tensor Names 2, // Num Input Tensor Names outputs__sentence_encoder_text_embedder_char_embedder_Gather, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_text_embedder_Transpose(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_text_embedder_Transpose */ uint32_t dimensions__sentence_encoder_text_embedder_Transpose_perm[] = {3}; uint32_t _sentence_encoder_text_embedder_Transpose_perm[] = {0, 2, 1}; Qnn_Param_t params__sentence_encoder_text_embedder_Transpose[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_text_embedder_Transpose_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__sentence_encoder_text_embedder_Transpose_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_text_embedder_Transpose_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__sentence_encoder_text_embedder_Transpose[] = { "_sentence_encoder_text_embedder_char_embedder_Gather_output_0" }; uint32_t dimensions__sentence_encoder_text_embedder_Transpose_output_0[] = {1, 64, 128}; Qnn_Tensor_t outputs__sentence_encoder_text_embedder_Transpose[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_text_embedder_Transpose_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000220268684643f, .offset= -34039}}}, .rank= 3, .dimensions=dimensions__sentence_encoder_text_embedder_Transpose_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_text_embedder_Transpose", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__sentence_encoder_text_embedder_Transpose, // Node Params 1, // Num Node Params inputs__sentence_encoder_text_embedder_Transpose, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_text_embedder_Transpose, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_text_embedder_Mul(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_text_embedder_Mul */ Qnn_Param_t params__sentence_encoder_text_embedder_Mul[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__sentence_encoder_text_embedder_Mul[] = { "_sentence_encoder_text_embedder_Transpose_output_0", "text_mask_ncf" }; uint32_t dimensions__sentence_encoder_text_embedder_Mul_output_0[] = {1, 64, 128}; Qnn_Tensor_t outputs__sentence_encoder_text_embedder_Mul[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_text_embedder_Mul_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000095646382761f, .offset= -35435}}}, .rank= 3, .dimensions=dimensions__sentence_encoder_text_embedder_Mul_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_text_embedder_Mul", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__sentence_encoder_text_embedder_Mul, // Node Params 1, // Num Node Params inputs__sentence_encoder_text_embedder_Mul, // Input Tensor Names 2, // Num Input Tensor Names outputs__sentence_encoder_text_embedder_Mul, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_text_embedder_Mul_output_0_nfc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_text_embedder_Mul_output_0_nfc */ uint32_t dimensions__sentence_encoder_text_embedder_Mul_output_0_nfc_perm[] = {3}; uint32_t _sentence_encoder_text_embedder_Mul_output_0_nfc_perm[] = {0, 2, 1}; Qnn_Param_t params__sentence_encoder_text_embedder_Mul_output_0_nfc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_text_embedder_Mul_output_0_nfc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__sentence_encoder_text_embedder_Mul_output_0_nfc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_text_embedder_Mul_output_0_nfc_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__sentence_encoder_text_embedder_Mul_output_0_nfc[] = { "_sentence_encoder_text_embedder_Mul_output_0" }; uint32_t dimensions__sentence_encoder_text_embedder_Mul_output_0_nfc[] = {1, 128, 64}; Qnn_Tensor_t outputs__sentence_encoder_text_embedder_Mul_output_0_nfc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_text_embedder_Mul_output_0_nfc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000095646382761f, .offset= -35435}}}, .rank= 3, .dimensions=dimensions__sentence_encoder_text_embedder_Mul_output_0_nfc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_text_embedder_Mul_output_0_nfc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__sentence_encoder_text_embedder_Mul_output_0_nfc, // Node Params 1, // Num Node Params inputs__sentence_encoder_text_embedder_Mul_output_0_nfc, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_text_embedder_Mul_output_0_nfc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor__sentence_encoder_ConstantOfShape_1_output_0(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions__sentence_encoder_ConstantOfShape_1_output_0[] = {1, 1, 1}; VALIDATE(model.addTensor("_sentence_encoder_ConstantOfShape_1_output_0", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_ConstantOfShape_1_output_0", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000152590218931f, .offset= 0}}}, .rank= 3, .dimensions=dimensions__sentence_encoder_ConstantOfShape_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(_sentence_encoder_ConstantOfShape_1_output_0), .dataSize=BINLEN(_sentence_encoder_ConstantOfShape_1_output_0)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__sentence_encoder_Concat_2(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_Concat_2 */ Qnn_Param_t params__sentence_encoder_Concat_2[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="axis", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}} }; const char* inputs__sentence_encoder_Concat_2[] = { "_sentence_encoder_ConstantOfShape_1_output_0", "text_mask" }; uint32_t dimensions__sentence_encoder_Concat_2_output_0[] = {1, 129, 1}; Qnn_Tensor_t outputs__sentence_encoder_Concat_2[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_Concat_2_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000152590218931f, .offset= 0}}}, .rank= 3, .dimensions=dimensions__sentence_encoder_Concat_2_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_Concat_2", // Node Name "qti.aisw", // Package Name "Concat", // Qnn Node Type params__sentence_encoder_Concat_2, // Node Params 1, // Num Node Params inputs__sentence_encoder_Concat_2, // Input Tensor Names 2, // Num Input Tensor Names outputs__sentence_encoder_Concat_2, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_Concat_2_output_0_ncf(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_Concat_2_output_0_ncf */ uint32_t dimensions__sentence_encoder_Concat_2_output_0_ncf_perm[] = {3}; uint32_t _sentence_encoder_Concat_2_output_0_ncf_perm[] = {0, 2, 1}; Qnn_Param_t params__sentence_encoder_Concat_2_output_0_ncf[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_Concat_2_output_0_ncf_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__sentence_encoder_Concat_2_output_0_ncf_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_Concat_2_output_0_ncf_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__sentence_encoder_Concat_2_output_0_ncf[] = { "_sentence_encoder_Concat_2_output_0" }; uint32_t dimensions__sentence_encoder_Concat_2_output_0_ncf[] = {1, 1, 129}; Qnn_Tensor_t outputs__sentence_encoder_Concat_2_output_0_ncf[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_Concat_2_output_0_ncf", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000152590218931f, .offset= 0}}}, .rank= 3, .dimensions=dimensions__sentence_encoder_Concat_2_output_0_ncf, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_Concat_2_output_0_ncf", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__sentence_encoder_Concat_2_output_0_ncf, // Node Params 1, // Num Node Params inputs__sentence_encoder_Concat_2_output_0_ncf, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_Concat_2_output_0_ncf, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__predictor_Reshape_1(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _predictor_Reshape_1 */ const char* inputs__predictor_Reshape_1[] = { "style_dp_ncf" }; uint32_t dimensions__predictor_Reshape_1_output_0[] = {1, 128}; Qnn_Tensor_t outputs__predictor_Reshape_1[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_predictor_Reshape_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000183746633411f, .offset= -34360}}}, .rank= 2, .dimensions=dimensions__predictor_Reshape_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_predictor_Reshape_1", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__predictor_Reshape_1, // Input Tensor Names 1, // Num Input Tensor Names outputs__predictor_Reshape_1, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_attn_encoder_Unsqueeze(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_attn_encoder_Unsqueeze */ const char* inputs__sentence_encoder_attn_encoder_Unsqueeze[] = { "_sentence_encoder_Concat_2_output_0_ncf" }; uint32_t dimensions__sentence_encoder_attn_encoder_Unsqueeze_output_0[] = {1, 1, 1, 129}; Qnn_Tensor_t outputs__sentence_encoder_attn_encoder_Unsqueeze[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_Unsqueeze_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000152590218931f, .offset= 0}}}, .rank= 4, .dimensions=dimensions__sentence_encoder_attn_encoder_Unsqueeze_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_attn_encoder_Unsqueeze", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__sentence_encoder_attn_encoder_Unsqueeze, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_attn_encoder_Unsqueeze, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_attn_encoder_Unsqueeze_1(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_attn_encoder_Unsqueeze_1 */ const char* inputs__sentence_encoder_attn_encoder_Unsqueeze_1[] = { "_sentence_encoder_Concat_2_output_0_ncf" }; uint32_t dimensions__sentence_encoder_attn_encoder_Unsqueeze_1_output_0[] = {1, 1, 129, 1}; Qnn_Tensor_t outputs__sentence_encoder_attn_encoder_Unsqueeze_1[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_Unsqueeze_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000152590218931f, .offset= 0}}}, .rank= 4, .dimensions=dimensions__sentence_encoder_attn_encoder_Unsqueeze_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_attn_encoder_Unsqueeze_1", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__sentence_encoder_attn_encoder_Unsqueeze_1, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_attn_encoder_Unsqueeze_1, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_Slice_2(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_Slice_2 */ uint32_t dimensions__sentence_encoder_Slice_2_ranges[] = {3, 3}; int32_t _sentence_encoder_Slice_2_ranges[] = {0, 1, 1, 0, 1, 1, 0, 1, 1}; Qnn_Param_t params__sentence_encoder_Slice_2[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="ranges", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_Slice_2_ranges", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_INT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__sentence_encoder_Slice_2_ranges, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_Slice_2_ranges, .dataSize=36}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="begin_mask", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="end_mask", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="new_axes_mask", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="shrink_axes", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}} }; const char* inputs__sentence_encoder_Slice_2[] = { "_sentence_encoder_Concat_2_output_0" }; uint32_t dimensions__sentence_encoder_Slice_2_output_0[] = {1, 1, 1}; Qnn_Tensor_t outputs__sentence_encoder_Slice_2[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_Slice_2_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000152590218931f, .offset= 0}}}, .rank= 3, .dimensions=dimensions__sentence_encoder_Slice_2_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_Slice_2", // Node Name "qti.aisw", // Package Name "StridedSlice", // Qnn Node Type params__sentence_encoder_Slice_2, // Node Params 5, // Num Node Params inputs__sentence_encoder_Slice_2, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_Slice_2, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_attn_encoder_Mul(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_attn_encoder_Mul */ Qnn_Param_t params__sentence_encoder_attn_encoder_Mul[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__sentence_encoder_attn_encoder_Mul[] = { "_sentence_encoder_attn_encoder_Unsqueeze_output_0", "_sentence_encoder_attn_encoder_Unsqueeze_1_output_0" }; uint32_t dimensions__sentence_encoder_attn_encoder_Mul_output_0[] = {1, 1, 129, 129}; Qnn_Tensor_t outputs__sentence_encoder_attn_encoder_Mul[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_Mul_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000152590218931f, .offset= 0}}}, .rank= 4, .dimensions=dimensions__sentence_encoder_attn_encoder_Mul_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_attn_encoder_Mul", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__sentence_encoder_attn_encoder_Mul, // Node Params 1, // Num Node Params inputs__sentence_encoder_attn_encoder_Mul, // Input Tensor Names 2, // Num Input Tensor Names outputs__sentence_encoder_attn_encoder_Mul, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor__sentence_encoder_attn_encoder_attn_layers_0_Constant_87_output_0(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions__sentence_encoder_attn_encoder_attn_layers_0_Constant_87_output_0[] = {1}; VALIDATE(model.addTensor("_sentence_encoder_attn_encoder_attn_layers_0_Constant_87_output_0", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_attn_layers_0_Constant_87_output_0", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000000015259022f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__sentence_encoder_attn_encoder_attn_layers_0_Constant_87_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(_sentence_encoder_attn_encoder_attn_layers_0_Constant_87_output_0), .dataSize=BINLEN(_sentence_encoder_attn_encoder_attn_layers_0_Constant_87_output_0)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__sentence_encoder_attn_encoder_attn_layers_0_Equal(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_attn_encoder_attn_layers_0_Equal */ Qnn_Param_t params__sentence_encoder_attn_encoder_attn_layers_0_Equal[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 3}}}} }; const char* inputs__sentence_encoder_attn_encoder_attn_layers_0_Equal[] = { "_sentence_encoder_attn_encoder_Mul_output_0", "_sentence_encoder_attn_encoder_attn_layers_0_Constant_87_output_0" }; uint32_t dimensions__sentence_encoder_attn_encoder_attn_layers_1_Cast_5_output_0[] = {1, 1, 129, 129}; Qnn_Tensor_t outputs__sentence_encoder_attn_encoder_attn_layers_0_Equal[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_attn_layers_1_Cast_5_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_BOOL_8, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 4, .dimensions=dimensions__sentence_encoder_attn_encoder_attn_layers_1_Cast_5_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_attn_encoder_attn_layers_0_Equal", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__sentence_encoder_attn_encoder_attn_layers_0_Equal, // Node Params 1, // Num Node Params inputs__sentence_encoder_attn_encoder_attn_layers_0_Equal, // Input Tensor Names 2, // Num Input Tensor Names outputs__sentence_encoder_attn_encoder_attn_layers_0_Equal, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor__sentence_encoder_Expand_output_0(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions__sentence_encoder_Expand_output_0[] = {1, 1, 64}; VALIDATE(model.addTensor("_sentence_encoder_Expand_output_0", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_Expand_output_0", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000551614248252f, .offset= -29704}}}, .rank= 3, .dimensions=dimensions__sentence_encoder_Expand_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(_sentence_encoder_Expand_output_0), .dataSize=BINLEN(_sentence_encoder_Expand_output_0)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__sentence_encoder_Concat_1(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_Concat_1 */ Qnn_Param_t params__sentence_encoder_Concat_1[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="axis", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}} }; const char* inputs__sentence_encoder_Concat_1[] = { "_sentence_encoder_Expand_output_0", "_sentence_encoder_text_embedder_Mul_output_0_nfc" }; uint32_t dimensions__sentence_encoder_Concat_1_output_0[] = {1, 129, 64}; Qnn_Tensor_t outputs__sentence_encoder_Concat_1[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_Concat_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000551614248252f, .offset= -29704}}}, .rank= 3, .dimensions=dimensions__sentence_encoder_Concat_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_Concat_1", // Node Name "qti.aisw", // Package Name "Concat", // Qnn Node Type params__sentence_encoder_Concat_1, // Node Params 1, // Num Node Params inputs__sentence_encoder_Concat_1, // Input Tensor Names 2, // Num Input Tensor Names outputs__sentence_encoder_Concat_1, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_convnext_convnext_0_Mul(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_convnext_convnext_0_Mul */ Qnn_Param_t params__sentence_encoder_convnext_convnext_0_Mul[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__sentence_encoder_convnext_convnext_0_Mul[] = { "_sentence_encoder_Concat_1_output_0", "_sentence_encoder_Concat_2_output_0" }; uint32_t dimensions__sentence_encoder_convnext_convnext_0_Mul_output_0[] = {1, 129, 64}; Qnn_Tensor_t outputs__sentence_encoder_convnext_convnext_0_Mul[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_0_Mul_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000551614248252f, .offset= -29704}}}, .rank= 3, .dimensions=dimensions__sentence_encoder_convnext_convnext_0_Mul_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_convnext_convnext_0_Mul", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__sentence_encoder_convnext_convnext_0_Mul, // Node Params 1, // Num Node Params inputs__sentence_encoder_convnext_convnext_0_Mul, // Input Tensor Names 2, // Num Input Tensor Names outputs__sentence_encoder_convnext_convnext_0_Mul, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_convnext_convnext_0_dwconv_Pad(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_convnext_convnext_0_dwconv_Pad */ uint32_t dimensions__sentence_encoder_convnext_convnext_0_dwconv_Pad_pad_amount[] = {3, 2}; uint32_t _sentence_encoder_convnext_convnext_0_dwconv_Pad_pad_amount[] = {0, 0, 2, 2, 0, 0}; Qnn_Param_t params__sentence_encoder_convnext_convnext_0_dwconv_Pad[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_0_dwconv_Pad_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__sentence_encoder_convnext_convnext_0_dwconv_Pad_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_convnext_convnext_0_dwconv_Pad_pad_amount, .dataSize=24}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="scheme", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 3}}}} }; const char* inputs__sentence_encoder_convnext_convnext_0_dwconv_Pad[] = { "_sentence_encoder_convnext_convnext_0_Mul_output_0" }; uint32_t dimensions__sentence_encoder_convnext_convnext_0_dwconv_Pad_output_0[] = {1, 133, 64}; Qnn_Tensor_t outputs__sentence_encoder_convnext_convnext_0_dwconv_Pad[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_0_dwconv_Pad_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000551614248252f, .offset= -29704}}}, .rank= 3, .dimensions=dimensions__sentence_encoder_convnext_convnext_0_dwconv_Pad_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_convnext_convnext_0_dwconv_Pad", // Node Name "qti.aisw", // Package Name "Pad", // Qnn Node Type params__sentence_encoder_convnext_convnext_0_dwconv_Pad, // Node Params 2, // Num Node Params inputs__sentence_encoder_convnext_convnext_0_dwconv_Pad, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_convnext_convnext_0_dwconv_Pad, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_convnext_convnext_0_dwconv_Pad_output_0_ncf(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_convnext_convnext_0_dwconv_Pad_output_0_ncf */ uint32_t dimensions__sentence_encoder_convnext_convnext_0_dwconv_Pad_output_0_ncf_perm[] = {3}; uint32_t _sentence_encoder_convnext_convnext_0_dwconv_Pad_output_0_ncf_perm[] = {0, 2, 1}; Qnn_Param_t params__sentence_encoder_convnext_convnext_0_dwconv_Pad_output_0_ncf[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_0_dwconv_Pad_output_0_ncf_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__sentence_encoder_convnext_convnext_0_dwconv_Pad_output_0_ncf_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_convnext_convnext_0_dwconv_Pad_output_0_ncf_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__sentence_encoder_convnext_convnext_0_dwconv_Pad_output_0_ncf[] = { "_sentence_encoder_convnext_convnext_0_dwconv_Pad_output_0" }; uint32_t dimensions__sentence_encoder_convnext_convnext_0_dwconv_Pad_output_0_ncf[] = {1, 64, 133}; Qnn_Tensor_t outputs__sentence_encoder_convnext_convnext_0_dwconv_Pad_output_0_ncf[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_0_dwconv_Pad_output_0_ncf", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000551614248252f, .offset= -29704}}}, .rank= 3, .dimensions=dimensions__sentence_encoder_convnext_convnext_0_dwconv_Pad_output_0_ncf, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_convnext_convnext_0_dwconv_Pad_output_0_ncf", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__sentence_encoder_convnext_convnext_0_dwconv_Pad_output_0_ncf, // Node Params 1, // Num Node Params inputs__sentence_encoder_convnext_convnext_0_dwconv_Pad_output_0_ncf, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_convnext_convnext_0_dwconv_Pad_output_0_ncf, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_convnext_convnext_0_dwconv_Conv_reshape_to_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_convnext_convnext_0_dwconv_Conv_reshape_to_2d */ const char* inputs__sentence_encoder_convnext_convnext_0_dwconv_Conv_reshape_to_2d[] = { "_sentence_encoder_convnext_convnext_0_dwconv_Pad_output_0_ncf" }; uint32_t dimensions__sentence_encoder_convnext_convnext_0_dwconv_Conv_reshape_to_2d[] = {1, 64, 1, 133}; Qnn_Tensor_t outputs__sentence_encoder_convnext_convnext_0_dwconv_Conv_reshape_to_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_0_dwconv_Conv_reshape_to_2d", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000551614248252f, .offset= -29704}}}, .rank= 4, .dimensions=dimensions__sentence_encoder_convnext_convnext_0_dwconv_Conv_reshape_to_2d, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_convnext_convnext_0_dwconv_Conv_reshape_to_2d", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__sentence_encoder_convnext_convnext_0_dwconv_Conv_reshape_to_2d, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_convnext_convnext_0_dwconv_Conv_reshape_to_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_convnext_convnext_0_dwconv_Conv_reshape_to_2d_nhwc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_convnext_convnext_0_dwconv_Conv_reshape_to_2d_nhwc */ uint32_t dimensions__sentence_encoder_convnext_convnext_0_dwconv_Conv_reshape_to_2d_nhwc_perm[] = {4}; uint32_t _sentence_encoder_convnext_convnext_0_dwconv_Conv_reshape_to_2d_nhwc_perm[] = {0, 2, 3, 1}; Qnn_Param_t params__sentence_encoder_convnext_convnext_0_dwconv_Conv_reshape_to_2d_nhwc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_0_dwconv_Conv_reshape_to_2d_nhwc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__sentence_encoder_convnext_convnext_0_dwconv_Conv_reshape_to_2d_nhwc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_convnext_convnext_0_dwconv_Conv_reshape_to_2d_nhwc_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__sentence_encoder_convnext_convnext_0_dwconv_Conv_reshape_to_2d_nhwc[] = { "_sentence_encoder_convnext_convnext_0_dwconv_Conv_reshape_to_2d" }; uint32_t dimensions__sentence_encoder_convnext_convnext_0_dwconv_Conv_reshape_to_2d_nhwc[] = {1, 1, 133, 64}; Qnn_Tensor_t outputs__sentence_encoder_convnext_convnext_0_dwconv_Conv_reshape_to_2d_nhwc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_0_dwconv_Conv_reshape_to_2d_nhwc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000551614248252f, .offset= -29704}}}, .rank= 4, .dimensions=dimensions__sentence_encoder_convnext_convnext_0_dwconv_Conv_reshape_to_2d_nhwc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_convnext_convnext_0_dwconv_Conv_reshape_to_2d_nhwc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__sentence_encoder_convnext_convnext_0_dwconv_Conv_reshape_to_2d_nhwc, // Node Params 1, // Num Node Params inputs__sentence_encoder_convnext_convnext_0_dwconv_Conv_reshape_to_2d_nhwc, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_convnext_convnext_0_dwconv_Conv_reshape_to_2d_nhwc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_dp_sentence_encoder_convnext_convnext_0_dwconv_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_dp_sentence_encoder_convnext_convnext_0_dwconv_weight[] = {1, 5, 1, 64}; VALIDATE(model.addTensor("tts_dp_sentence_encoder_convnext_convnext_0_dwconv_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_dp_sentence_encoder_convnext_convnext_0_dwconv_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0048741023056209f, .offset= -107}}}, .rank= 4, .dimensions=dimensions_tts_dp_sentence_encoder_convnext_convnext_0_dwconv_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_dp_sentence_encoder_convnext_convnext_0_dwconv_weight), .dataSize=BINLEN(tts_dp_sentence_encoder_convnext_convnext_0_dwconv_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_dp_sentence_encoder_convnext_convnext_0_dwconv_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_dp_sentence_encoder_convnext_convnext_0_dwconv_bias[] = {64}; VALIDATE(model.addTensor("tts_dp_sentence_encoder_convnext_convnext_0_dwconv_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_dp_sentence_encoder_convnext_convnext_0_dwconv_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0010023582726717f, .offset= -135}}}, .rank= 1, .dimensions=dimensions_tts_dp_sentence_encoder_convnext_convnext_0_dwconv_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_dp_sentence_encoder_convnext_convnext_0_dwconv_bias), .dataSize=BINLEN(tts_dp_sentence_encoder_convnext_convnext_0_dwconv_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__sentence_encoder_convnext_convnext_0_dwconv_Conv_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_convnext_convnext_0_dwconv_Conv_2d */ uint32_t dimensions__sentence_encoder_convnext_convnext_0_dwconv_Conv_2d_dilation[] = {2}; uint32_t _sentence_encoder_convnext_convnext_0_dwconv_Conv_2d_dilation[] = {1, 1}; uint32_t dimensions__sentence_encoder_convnext_convnext_0_dwconv_Conv_2d_pad_amount[] = {2, 2}; uint32_t _sentence_encoder_convnext_convnext_0_dwconv_Conv_2d_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions__sentence_encoder_convnext_convnext_0_dwconv_Conv_2d_stride[] = {2}; uint32_t _sentence_encoder_convnext_convnext_0_dwconv_Conv_2d_stride[] = {1, 1}; Qnn_Param_t params__sentence_encoder_convnext_convnext_0_dwconv_Conv_2d[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_0_dwconv_Conv_2d_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__sentence_encoder_convnext_convnext_0_dwconv_Conv_2d_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_convnext_convnext_0_dwconv_Conv_2d_dilation, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_0_dwconv_Conv_2d_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__sentence_encoder_convnext_convnext_0_dwconv_Conv_2d_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_convnext_convnext_0_dwconv_Conv_2d_pad_amount, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_0_dwconv_Conv_2d_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__sentence_encoder_convnext_convnext_0_dwconv_Conv_2d_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_convnext_convnext_0_dwconv_Conv_2d_stride, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__sentence_encoder_convnext_convnext_0_dwconv_Conv_2d[] = { "_sentence_encoder_convnext_convnext_0_dwconv_Conv_reshape_to_2d_nhwc", "tts_dp_sentence_encoder_convnext_convnext_0_dwconv_weight", "tts_dp_sentence_encoder_convnext_convnext_0_dwconv_bias" }; uint32_t dimensions__sentence_encoder_convnext_convnext_0_dwconv_Conv_intermediate[] = {1, 1, 129, 64}; Qnn_Tensor_t outputs__sentence_encoder_convnext_convnext_0_dwconv_Conv_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_0_dwconv_Conv_intermediate", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000497570144944f, .offset= -16450}}}, .rank= 4, .dimensions=dimensions__sentence_encoder_convnext_convnext_0_dwconv_Conv_intermediate, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_convnext_convnext_0_dwconv_Conv_2d", // Node Name "qti.aisw", // Package Name "DepthWiseConv2d", // Qnn Node Type params__sentence_encoder_convnext_convnext_0_dwconv_Conv_2d, // Node Params 3, // Num Node Params inputs__sentence_encoder_convnext_convnext_0_dwconv_Conv_2d, // Input Tensor Names 3, // Num Input Tensor Names outputs__sentence_encoder_convnext_convnext_0_dwconv_Conv_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_convnext_convnext_0_dwconv_Conv_intermediate_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_convnext_convnext_0_dwconv_Conv_intermediate_nchw */ uint32_t dimensions__sentence_encoder_convnext_convnext_0_dwconv_Conv_intermediate_nchw_perm[] = {4}; uint32_t _sentence_encoder_convnext_convnext_0_dwconv_Conv_intermediate_nchw_perm[] = {0, 3, 1, 2}; Qnn_Param_t params__sentence_encoder_convnext_convnext_0_dwconv_Conv_intermediate_nchw[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_0_dwconv_Conv_intermediate_nchw_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__sentence_encoder_convnext_convnext_0_dwconv_Conv_intermediate_nchw_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_convnext_convnext_0_dwconv_Conv_intermediate_nchw_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__sentence_encoder_convnext_convnext_0_dwconv_Conv_intermediate_nchw[] = { "_sentence_encoder_convnext_convnext_0_dwconv_Conv_intermediate" }; uint32_t dimensions__sentence_encoder_convnext_convnext_0_dwconv_Conv_intermediate_nchw[] = {1, 64, 1, 129}; Qnn_Tensor_t outputs__sentence_encoder_convnext_convnext_0_dwconv_Conv_intermediate_nchw[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_0_dwconv_Conv_intermediate_nchw", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000497570144944f, .offset= -16450}}}, .rank= 4, .dimensions=dimensions__sentence_encoder_convnext_convnext_0_dwconv_Conv_intermediate_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_convnext_convnext_0_dwconv_Conv_intermediate_nchw", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__sentence_encoder_convnext_convnext_0_dwconv_Conv_intermediate_nchw, // Node Params 1, // Num Node Params inputs__sentence_encoder_convnext_convnext_0_dwconv_Conv_intermediate_nchw, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_convnext_convnext_0_dwconv_Conv_intermediate_nchw, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_convnext_convnext_0_dwconv_Conv_intermediate(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_convnext_convnext_0_dwconv_Conv_intermediate */ const char* inputs__sentence_encoder_convnext_convnext_0_dwconv_Conv_intermediate[] = { "_sentence_encoder_convnext_convnext_0_dwconv_Conv_intermediate_nchw" }; uint32_t dimensions__sentence_encoder_convnext_convnext_0_dwconv_Conv_output_0[] = {1, 64, 129}; Qnn_Tensor_t outputs__sentence_encoder_convnext_convnext_0_dwconv_Conv_intermediate[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_0_dwconv_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000497570144944f, .offset= -16450}}}, .rank= 3, .dimensions=dimensions__sentence_encoder_convnext_convnext_0_dwconv_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_convnext_convnext_0_dwconv_Conv_intermediate", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__sentence_encoder_convnext_convnext_0_dwconv_Conv_intermediate, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_convnext_convnext_0_dwconv_Conv_intermediate, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_convnext_convnext_0_dwconv_Conv_output_0_nfc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_convnext_convnext_0_dwconv_Conv_output_0_nfc */ uint32_t dimensions__sentence_encoder_convnext_convnext_0_dwconv_Conv_output_0_nfc_perm[] = {3}; uint32_t _sentence_encoder_convnext_convnext_0_dwconv_Conv_output_0_nfc_perm[] = {0, 2, 1}; Qnn_Param_t params__sentence_encoder_convnext_convnext_0_dwconv_Conv_output_0_nfc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_0_dwconv_Conv_output_0_nfc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__sentence_encoder_convnext_convnext_0_dwconv_Conv_output_0_nfc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_convnext_convnext_0_dwconv_Conv_output_0_nfc_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__sentence_encoder_convnext_convnext_0_dwconv_Conv_output_0_nfc[] = { "_sentence_encoder_convnext_convnext_0_dwconv_Conv_output_0" }; uint32_t dimensions__sentence_encoder_convnext_convnext_0_dwconv_Conv_output_0_nfc[] = {1, 129, 64}; Qnn_Tensor_t outputs__sentence_encoder_convnext_convnext_0_dwconv_Conv_output_0_nfc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_0_dwconv_Conv_output_0_nfc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000497570144944f, .offset= -16450}}}, .rank= 3, .dimensions=dimensions__sentence_encoder_convnext_convnext_0_dwconv_Conv_output_0_nfc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_convnext_convnext_0_dwconv_Conv_output_0_nfc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__sentence_encoder_convnext_convnext_0_dwconv_Conv_output_0_nfc, // Node Params 1, // Num Node Params inputs__sentence_encoder_convnext_convnext_0_dwconv_Conv_output_0_nfc, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_convnext_convnext_0_dwconv_Conv_output_0_nfc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_convnext_convnext_0_Mul_1(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_convnext_convnext_0_Mul_1 */ Qnn_Param_t params__sentence_encoder_convnext_convnext_0_Mul_1[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__sentence_encoder_convnext_convnext_0_Mul_1[] = { "_sentence_encoder_convnext_convnext_0_dwconv_Conv_output_0_nfc", "_sentence_encoder_Concat_2_output_0" }; uint32_t dimensions__sentence_encoder_convnext_convnext_0_norm_Transpose_output_0[] = {1, 129, 64}; Qnn_Tensor_t outputs__sentence_encoder_convnext_convnext_0_Mul_1[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_0_norm_Transpose_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000497570144944f, .offset= -16450}}}, .rank= 3, .dimensions=dimensions__sentence_encoder_convnext_convnext_0_norm_Transpose_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_convnext_convnext_0_Mul_1", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__sentence_encoder_convnext_convnext_0_Mul_1, // Node Params 1, // Num Node Params inputs__sentence_encoder_convnext_convnext_0_Mul_1, // Input Tensor Names 2, // Num Input Tensor Names outputs__sentence_encoder_convnext_convnext_0_Mul_1, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_dp_sentence_encoder_convnext_convnext_0_norm_norm_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_dp_sentence_encoder_convnext_convnext_0_norm_norm_weight[] = {64}; VALIDATE(model.addTensor("tts_dp_sentence_encoder_convnext_convnext_0_norm_norm_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_dp_sentence_encoder_convnext_convnext_0_norm_norm_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0036124915350229f, .offset= 0}}}, .rank= 1, .dimensions=dimensions_tts_dp_sentence_encoder_convnext_convnext_0_norm_norm_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_dp_sentence_encoder_convnext_convnext_0_norm_norm_weight), .dataSize=BINLEN(tts_dp_sentence_encoder_convnext_convnext_0_norm_norm_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_dp_sentence_encoder_convnext_convnext_0_norm_norm_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_dp_sentence_encoder_convnext_convnext_0_norm_norm_bias[] = {64}; VALIDATE(model.addTensor("tts_dp_sentence_encoder_convnext_convnext_0_norm_norm_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_dp_sentence_encoder_convnext_convnext_0_norm_norm_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0014778855256736f, .offset= -108}}}, .rank= 1, .dimensions=dimensions_tts_dp_sentence_encoder_convnext_convnext_0_norm_norm_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_dp_sentence_encoder_convnext_convnext_0_norm_norm_bias), .dataSize=BINLEN(tts_dp_sentence_encoder_convnext_convnext_0_norm_norm_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__sentence_encoder_convnext_convnext_0_norm_norm_LayerNormalization(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_convnext_convnext_0_norm_norm_LayerNormalization */ uint32_t dimensions__sentence_encoder_convnext_convnext_0_norm_norm_LayerNormalization_axes[] = {1}; uint32_t _sentence_encoder_convnext_convnext_0_norm_norm_LayerNormalization_axes[] = {2}; Qnn_Param_t params__sentence_encoder_convnext_convnext_0_norm_norm_LayerNormalization[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="axes", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_0_norm_norm_LayerNormalization_axes", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__sentence_encoder_convnext_convnext_0_norm_norm_LayerNormalization_axes, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_convnext_convnext_0_norm_norm_LayerNormalization_axes, .dataSize=4}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="epsilon", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_FLOAT_32, {.floatValue = 0.000001000000f}}}} }; const char* inputs__sentence_encoder_convnext_convnext_0_norm_norm_LayerNormalization[] = { "_sentence_encoder_convnext_convnext_0_norm_Transpose_output_0", "tts_dp_sentence_encoder_convnext_convnext_0_norm_norm_weight", "tts_dp_sentence_encoder_convnext_convnext_0_norm_norm_bias" }; uint32_t dimensions__sentence_encoder_convnext_convnext_0_norm_Transpose_1_output_0[] = {1, 129, 64}; Qnn_Tensor_t outputs__sentence_encoder_convnext_convnext_0_norm_norm_LayerNormalization[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_0_norm_Transpose_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000759246177040f, .offset= -28396}}}, .rank= 3, .dimensions=dimensions__sentence_encoder_convnext_convnext_0_norm_Transpose_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_convnext_convnext_0_norm_norm_LayerNormalization", // Node Name "qti.aisw", // Package Name "LayerNorm", // Qnn Node Type params__sentence_encoder_convnext_convnext_0_norm_norm_LayerNormalization, // Node Params 2, // Num Node Params inputs__sentence_encoder_convnext_convnext_0_norm_norm_LayerNormalization, // Input Tensor Names 3, // Num Input Tensor Names outputs__sentence_encoder_convnext_convnext_0_norm_norm_LayerNormalization, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_convnext_convnext_0_norm_Transpose_1_output_0_ncf(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_convnext_convnext_0_norm_Transpose_1_output_0_ncf */ uint32_t dimensions__sentence_encoder_convnext_convnext_0_norm_Transpose_1_output_0_ncf_perm[] = {3}; uint32_t _sentence_encoder_convnext_convnext_0_norm_Transpose_1_output_0_ncf_perm[] = {0, 2, 1}; Qnn_Param_t params__sentence_encoder_convnext_convnext_0_norm_Transpose_1_output_0_ncf[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_0_norm_Transpose_1_output_0_ncf_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__sentence_encoder_convnext_convnext_0_norm_Transpose_1_output_0_ncf_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_convnext_convnext_0_norm_Transpose_1_output_0_ncf_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__sentence_encoder_convnext_convnext_0_norm_Transpose_1_output_0_ncf[] = { "_sentence_encoder_convnext_convnext_0_norm_Transpose_1_output_0" }; uint32_t dimensions__sentence_encoder_convnext_convnext_0_norm_Transpose_1_output_0_ncf[] = {1, 64, 129}; Qnn_Tensor_t outputs__sentence_encoder_convnext_convnext_0_norm_Transpose_1_output_0_ncf[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_0_norm_Transpose_1_output_0_ncf", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000759246177040f, .offset= -28396}}}, .rank= 3, .dimensions=dimensions__sentence_encoder_convnext_convnext_0_norm_Transpose_1_output_0_ncf, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_convnext_convnext_0_norm_Transpose_1_output_0_ncf", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__sentence_encoder_convnext_convnext_0_norm_Transpose_1_output_0_ncf, // Node Params 1, // Num Node Params inputs__sentence_encoder_convnext_convnext_0_norm_Transpose_1_output_0_ncf, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_convnext_convnext_0_norm_Transpose_1_output_0_ncf, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_convnext_convnext_0_pwconv1_Conv_reshape_to_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_convnext_convnext_0_pwconv1_Conv_reshape_to_2d */ const char* inputs__sentence_encoder_convnext_convnext_0_pwconv1_Conv_reshape_to_2d[] = { "_sentence_encoder_convnext_convnext_0_norm_Transpose_1_output_0_ncf" }; uint32_t dimensions__sentence_encoder_convnext_convnext_0_pwconv1_Conv_reshape_to_2d[] = {1, 64, 1, 129}; Qnn_Tensor_t outputs__sentence_encoder_convnext_convnext_0_pwconv1_Conv_reshape_to_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_0_pwconv1_Conv_reshape_to_2d", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000759246177040f, .offset= -28396}}}, .rank= 4, .dimensions=dimensions__sentence_encoder_convnext_convnext_0_pwconv1_Conv_reshape_to_2d, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_convnext_convnext_0_pwconv1_Conv_reshape_to_2d", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__sentence_encoder_convnext_convnext_0_pwconv1_Conv_reshape_to_2d, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_convnext_convnext_0_pwconv1_Conv_reshape_to_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_convnext_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_convnext_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc */ uint32_t dimensions__sentence_encoder_convnext_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc_perm[] = {4}; uint32_t _sentence_encoder_convnext_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc_perm[] = {0, 2, 3, 1}; Qnn_Param_t params__sentence_encoder_convnext_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__sentence_encoder_convnext_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_convnext_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__sentence_encoder_convnext_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc[] = { "_sentence_encoder_convnext_convnext_0_pwconv1_Conv_reshape_to_2d" }; uint32_t dimensions__sentence_encoder_convnext_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc[] = {1, 1, 129, 64}; Qnn_Tensor_t outputs__sentence_encoder_convnext_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000759246177040f, .offset= -28396}}}, .rank= 4, .dimensions=dimensions__sentence_encoder_convnext_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_convnext_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__sentence_encoder_convnext_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc, // Node Params 1, // Num Node Params inputs__sentence_encoder_convnext_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_convnext_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_dp_sentence_encoder_convnext_convnext_0_pwconv1_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_dp_sentence_encoder_convnext_convnext_0_pwconv1_weight[] = {1, 1, 64, 256}; VALIDATE(model.addTensor("tts_dp_sentence_encoder_convnext_convnext_0_pwconv1_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_dp_sentence_encoder_convnext_convnext_0_pwconv1_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0048165316693485f, .offset= -129}}}, .rank= 4, .dimensions=dimensions_tts_dp_sentence_encoder_convnext_convnext_0_pwconv1_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_dp_sentence_encoder_convnext_convnext_0_pwconv1_weight), .dataSize=BINLEN(tts_dp_sentence_encoder_convnext_convnext_0_pwconv1_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_dp_sentence_encoder_convnext_convnext_0_pwconv1_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_dp_sentence_encoder_convnext_convnext_0_pwconv1_bias[] = {256}; VALIDATE(model.addTensor("tts_dp_sentence_encoder_convnext_convnext_0_pwconv1_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_dp_sentence_encoder_convnext_convnext_0_pwconv1_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0021537514403462f, .offset= -233}}}, .rank= 1, .dimensions=dimensions_tts_dp_sentence_encoder_convnext_convnext_0_pwconv1_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_dp_sentence_encoder_convnext_convnext_0_pwconv1_bias), .dataSize=BINLEN(tts_dp_sentence_encoder_convnext_convnext_0_pwconv1_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__sentence_encoder_convnext_convnext_0_pwconv1_Conv_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_convnext_convnext_0_pwconv1_Conv_2d */ uint32_t dimensions__sentence_encoder_convnext_convnext_0_pwconv1_Conv_2d_dilation[] = {2}; uint32_t _sentence_encoder_convnext_convnext_0_pwconv1_Conv_2d_dilation[] = {1, 1}; uint32_t dimensions__sentence_encoder_convnext_convnext_0_pwconv1_Conv_2d_pad_amount[] = {2, 2}; uint32_t _sentence_encoder_convnext_convnext_0_pwconv1_Conv_2d_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions__sentence_encoder_convnext_convnext_0_pwconv1_Conv_2d_stride[] = {2}; uint32_t _sentence_encoder_convnext_convnext_0_pwconv1_Conv_2d_stride[] = {1, 1}; Qnn_Param_t params__sentence_encoder_convnext_convnext_0_pwconv1_Conv_2d[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_0_pwconv1_Conv_2d_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__sentence_encoder_convnext_convnext_0_pwconv1_Conv_2d_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_convnext_convnext_0_pwconv1_Conv_2d_dilation, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_0_pwconv1_Conv_2d_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__sentence_encoder_convnext_convnext_0_pwconv1_Conv_2d_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_convnext_convnext_0_pwconv1_Conv_2d_pad_amount, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_0_pwconv1_Conv_2d_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__sentence_encoder_convnext_convnext_0_pwconv1_Conv_2d_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_convnext_convnext_0_pwconv1_Conv_2d_stride, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="group", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="reuse_sparse_indices", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}} }; const char* inputs__sentence_encoder_convnext_convnext_0_pwconv1_Conv_2d[] = { "_sentence_encoder_convnext_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc", "tts_dp_sentence_encoder_convnext_convnext_0_pwconv1_weight", "tts_dp_sentence_encoder_convnext_convnext_0_pwconv1_bias" }; uint32_t dimensions__sentence_encoder_convnext_convnext_0_pwconv1_Conv_intermediate[] = {1, 1, 129, 256}; Qnn_Tensor_t outputs__sentence_encoder_convnext_convnext_0_pwconv1_Conv_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_0_pwconv1_Conv_intermediate", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0001251234352821f, .offset= -41578}}}, .rank= 4, .dimensions=dimensions__sentence_encoder_convnext_convnext_0_pwconv1_Conv_intermediate, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_convnext_convnext_0_pwconv1_Conv_2d", // Node Name "qti.aisw", // Package Name "Conv2d", // Qnn Node Type params__sentence_encoder_convnext_convnext_0_pwconv1_Conv_2d, // Node Params 5, // Num Node Params inputs__sentence_encoder_convnext_convnext_0_pwconv1_Conv_2d, // Input Tensor Names 3, // Num Input Tensor Names outputs__sentence_encoder_convnext_convnext_0_pwconv1_Conv_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_convnext_convnext_0_pwconv1_Conv_intermediate_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_convnext_convnext_0_pwconv1_Conv_intermediate_nchw */ uint32_t dimensions__sentence_encoder_convnext_convnext_0_pwconv1_Conv_intermediate_nchw_perm[] = {4}; uint32_t _sentence_encoder_convnext_convnext_0_pwconv1_Conv_intermediate_nchw_perm[] = {0, 3, 1, 2}; Qnn_Param_t params__sentence_encoder_convnext_convnext_0_pwconv1_Conv_intermediate_nchw[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_0_pwconv1_Conv_intermediate_nchw_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__sentence_encoder_convnext_convnext_0_pwconv1_Conv_intermediate_nchw_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_convnext_convnext_0_pwconv1_Conv_intermediate_nchw_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__sentence_encoder_convnext_convnext_0_pwconv1_Conv_intermediate_nchw[] = { "_sentence_encoder_convnext_convnext_0_pwconv1_Conv_intermediate" }; uint32_t dimensions__sentence_encoder_convnext_convnext_0_pwconv1_Conv_intermediate_nchw[] = {1, 256, 1, 129}; Qnn_Tensor_t outputs__sentence_encoder_convnext_convnext_0_pwconv1_Conv_intermediate_nchw[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_0_pwconv1_Conv_intermediate_nchw", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0001251234352821f, .offset= -41578}}}, .rank= 4, .dimensions=dimensions__sentence_encoder_convnext_convnext_0_pwconv1_Conv_intermediate_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_convnext_convnext_0_pwconv1_Conv_intermediate_nchw", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__sentence_encoder_convnext_convnext_0_pwconv1_Conv_intermediate_nchw, // Node Params 1, // Num Node Params inputs__sentence_encoder_convnext_convnext_0_pwconv1_Conv_intermediate_nchw, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_convnext_convnext_0_pwconv1_Conv_intermediate_nchw, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_convnext_convnext_0_pwconv1_Conv_intermediate(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_convnext_convnext_0_pwconv1_Conv_intermediate */ const char* inputs__sentence_encoder_convnext_convnext_0_pwconv1_Conv_intermediate[] = { "_sentence_encoder_convnext_convnext_0_pwconv1_Conv_intermediate_nchw" }; uint32_t dimensions__sentence_encoder_convnext_convnext_0_pwconv1_Conv_output_0[] = {1, 256, 129}; Qnn_Tensor_t outputs__sentence_encoder_convnext_convnext_0_pwconv1_Conv_intermediate[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_0_pwconv1_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0001251234352821f, .offset= -41578}}}, .rank= 3, .dimensions=dimensions__sentence_encoder_convnext_convnext_0_pwconv1_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_convnext_convnext_0_pwconv1_Conv_intermediate", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__sentence_encoder_convnext_convnext_0_pwconv1_Conv_intermediate, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_convnext_convnext_0_pwconv1_Conv_intermediate, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__elementwiseneuron_2(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _elementwiseneuron_2 */ Qnn_Param_t params__elementwiseneuron_2[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}} }; const char* inputs__elementwiseneuron_2[] = { "_sentence_encoder_convnext_convnext_0_pwconv1_Conv_output_0" }; uint32_t dimensions__sentence_encoder_convnext_convnext_0_act_Mul_1_output_0[] = {1, 256, 129}; Qnn_Tensor_t outputs__elementwiseneuron_2[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_0_act_Mul_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000482715731778f, .offset= -3521}}}, .rank= 3, .dimensions=dimensions__sentence_encoder_convnext_convnext_0_act_Mul_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_elementwiseneuron_2", // Node Name "qti.aisw", // Package Name "ElementWiseNeuron", // Qnn Node Type params__elementwiseneuron_2, // Node Params 1, // Num Node Params inputs__elementwiseneuron_2, // Input Tensor Names 1, // Num Input Tensor Names outputs__elementwiseneuron_2, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_convnext_convnext_0_pwconv2_Conv_reshape_to_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_convnext_convnext_0_pwconv2_Conv_reshape_to_2d */ const char* inputs__sentence_encoder_convnext_convnext_0_pwconv2_Conv_reshape_to_2d[] = { "_sentence_encoder_convnext_convnext_0_act_Mul_1_output_0" }; uint32_t dimensions__sentence_encoder_convnext_convnext_0_pwconv2_Conv_reshape_to_2d[] = {1, 256, 1, 129}; Qnn_Tensor_t outputs__sentence_encoder_convnext_convnext_0_pwconv2_Conv_reshape_to_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_0_pwconv2_Conv_reshape_to_2d", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000482715731778f, .offset= -3521}}}, .rank= 4, .dimensions=dimensions__sentence_encoder_convnext_convnext_0_pwconv2_Conv_reshape_to_2d, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_convnext_convnext_0_pwconv2_Conv_reshape_to_2d", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__sentence_encoder_convnext_convnext_0_pwconv2_Conv_reshape_to_2d, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_convnext_convnext_0_pwconv2_Conv_reshape_to_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_convnext_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_convnext_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc */ uint32_t dimensions__sentence_encoder_convnext_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc_perm[] = {4}; uint32_t _sentence_encoder_convnext_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc_perm[] = {0, 2, 3, 1}; Qnn_Param_t params__sentence_encoder_convnext_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__sentence_encoder_convnext_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_convnext_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__sentence_encoder_convnext_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc[] = { "_sentence_encoder_convnext_convnext_0_pwconv2_Conv_reshape_to_2d" }; uint32_t dimensions__sentence_encoder_convnext_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc[] = {1, 1, 129, 256}; Qnn_Tensor_t outputs__sentence_encoder_convnext_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000482715731778f, .offset= -3521}}}, .rank= 4, .dimensions=dimensions__sentence_encoder_convnext_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_convnext_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__sentence_encoder_convnext_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc, // Node Params 1, // Num Node Params inputs__sentence_encoder_convnext_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_convnext_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_dp_sentence_encoder_convnext_convnext_0_pwconv2_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_dp_sentence_encoder_convnext_convnext_0_pwconv2_weight[] = {1, 1, 256, 64}; VALIDATE(model.addTensor("tts_dp_sentence_encoder_convnext_convnext_0_pwconv2_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_dp_sentence_encoder_convnext_convnext_0_pwconv2_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0034599537029862f, .offset= -125}}}, .rank= 4, .dimensions=dimensions_tts_dp_sentence_encoder_convnext_convnext_0_pwconv2_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_dp_sentence_encoder_convnext_convnext_0_pwconv2_weight), .dataSize=BINLEN(tts_dp_sentence_encoder_convnext_convnext_0_pwconv2_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_dp_sentence_encoder_convnext_convnext_0_pwconv2_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_dp_sentence_encoder_convnext_convnext_0_pwconv2_bias[] = {64}; VALIDATE(model.addTensor("tts_dp_sentence_encoder_convnext_convnext_0_pwconv2_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_dp_sentence_encoder_convnext_convnext_0_pwconv2_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0007965331897140f, .offset= -102}}}, .rank= 1, .dimensions=dimensions_tts_dp_sentence_encoder_convnext_convnext_0_pwconv2_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_dp_sentence_encoder_convnext_convnext_0_pwconv2_bias), .dataSize=BINLEN(tts_dp_sentence_encoder_convnext_convnext_0_pwconv2_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__sentence_encoder_convnext_convnext_0_pwconv2_Conv_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_convnext_convnext_0_pwconv2_Conv_2d */ uint32_t dimensions__sentence_encoder_convnext_convnext_0_pwconv2_Conv_2d_dilation[] = {2}; uint32_t _sentence_encoder_convnext_convnext_0_pwconv2_Conv_2d_dilation[] = {1, 1}; uint32_t dimensions__sentence_encoder_convnext_convnext_0_pwconv2_Conv_2d_pad_amount[] = {2, 2}; uint32_t _sentence_encoder_convnext_convnext_0_pwconv2_Conv_2d_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions__sentence_encoder_convnext_convnext_0_pwconv2_Conv_2d_stride[] = {2}; uint32_t _sentence_encoder_convnext_convnext_0_pwconv2_Conv_2d_stride[] = {1, 1}; Qnn_Param_t params__sentence_encoder_convnext_convnext_0_pwconv2_Conv_2d[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_0_pwconv2_Conv_2d_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__sentence_encoder_convnext_convnext_0_pwconv2_Conv_2d_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_convnext_convnext_0_pwconv2_Conv_2d_dilation, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_0_pwconv2_Conv_2d_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__sentence_encoder_convnext_convnext_0_pwconv2_Conv_2d_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_convnext_convnext_0_pwconv2_Conv_2d_pad_amount, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_0_pwconv2_Conv_2d_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__sentence_encoder_convnext_convnext_0_pwconv2_Conv_2d_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_convnext_convnext_0_pwconv2_Conv_2d_stride, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="group", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="reuse_sparse_indices", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}} }; const char* inputs__sentence_encoder_convnext_convnext_0_pwconv2_Conv_2d[] = { "_sentence_encoder_convnext_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc", "tts_dp_sentence_encoder_convnext_convnext_0_pwconv2_weight", "tts_dp_sentence_encoder_convnext_convnext_0_pwconv2_bias" }; uint32_t dimensions__sentence_encoder_convnext_convnext_0_pwconv2_Conv_intermediate[] = {1, 1, 129, 64}; Qnn_Tensor_t outputs__sentence_encoder_convnext_convnext_0_pwconv2_Conv_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_0_pwconv2_Conv_intermediate", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000859733336256f, .offset= -33587}}}, .rank= 4, .dimensions=dimensions__sentence_encoder_convnext_convnext_0_pwconv2_Conv_intermediate, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_convnext_convnext_0_pwconv2_Conv_2d", // Node Name "qti.aisw", // Package Name "Conv2d", // Qnn Node Type params__sentence_encoder_convnext_convnext_0_pwconv2_Conv_2d, // Node Params 5, // Num Node Params inputs__sentence_encoder_convnext_convnext_0_pwconv2_Conv_2d, // Input Tensor Names 3, // Num Input Tensor Names outputs__sentence_encoder_convnext_convnext_0_pwconv2_Conv_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_convnext_convnext_0_pwconv2_Conv_intermediate_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_convnext_convnext_0_pwconv2_Conv_intermediate_nchw */ uint32_t dimensions__sentence_encoder_convnext_convnext_0_pwconv2_Conv_intermediate_nchw_perm[] = {4}; uint32_t _sentence_encoder_convnext_convnext_0_pwconv2_Conv_intermediate_nchw_perm[] = {0, 3, 1, 2}; Qnn_Param_t params__sentence_encoder_convnext_convnext_0_pwconv2_Conv_intermediate_nchw[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_0_pwconv2_Conv_intermediate_nchw_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__sentence_encoder_convnext_convnext_0_pwconv2_Conv_intermediate_nchw_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_convnext_convnext_0_pwconv2_Conv_intermediate_nchw_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__sentence_encoder_convnext_convnext_0_pwconv2_Conv_intermediate_nchw[] = { "_sentence_encoder_convnext_convnext_0_pwconv2_Conv_intermediate" }; uint32_t dimensions__sentence_encoder_convnext_convnext_0_pwconv2_Conv_intermediate_nchw[] = {1, 64, 1, 129}; Qnn_Tensor_t outputs__sentence_encoder_convnext_convnext_0_pwconv2_Conv_intermediate_nchw[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_0_pwconv2_Conv_intermediate_nchw", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000859733336256f, .offset= -33587}}}, .rank= 4, .dimensions=dimensions__sentence_encoder_convnext_convnext_0_pwconv2_Conv_intermediate_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_convnext_convnext_0_pwconv2_Conv_intermediate_nchw", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__sentence_encoder_convnext_convnext_0_pwconv2_Conv_intermediate_nchw, // Node Params 1, // Num Node Params inputs__sentence_encoder_convnext_convnext_0_pwconv2_Conv_intermediate_nchw, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_convnext_convnext_0_pwconv2_Conv_intermediate_nchw, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_convnext_convnext_0_pwconv2_Conv_intermediate(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_convnext_convnext_0_pwconv2_Conv_intermediate */ const char* inputs__sentence_encoder_convnext_convnext_0_pwconv2_Conv_intermediate[] = { "_sentence_encoder_convnext_convnext_0_pwconv2_Conv_intermediate_nchw" }; uint32_t dimensions__sentence_encoder_convnext_convnext_0_pwconv2_Conv_output_0[] = {1, 64, 129}; Qnn_Tensor_t outputs__sentence_encoder_convnext_convnext_0_pwconv2_Conv_intermediate[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_0_pwconv2_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000859733336256f, .offset= -33587}}}, .rank= 3, .dimensions=dimensions__sentence_encoder_convnext_convnext_0_pwconv2_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_convnext_convnext_0_pwconv2_Conv_intermediate", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__sentence_encoder_convnext_convnext_0_pwconv2_Conv_intermediate, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_convnext_convnext_0_pwconv2_Conv_intermediate, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_convnext_convnext_0_pwconv2_Conv_output_0_nfc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_convnext_convnext_0_pwconv2_Conv_output_0_nfc */ uint32_t dimensions__sentence_encoder_convnext_convnext_0_pwconv2_Conv_output_0_nfc_perm[] = {3}; uint32_t _sentence_encoder_convnext_convnext_0_pwconv2_Conv_output_0_nfc_perm[] = {0, 2, 1}; Qnn_Param_t params__sentence_encoder_convnext_convnext_0_pwconv2_Conv_output_0_nfc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_0_pwconv2_Conv_output_0_nfc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__sentence_encoder_convnext_convnext_0_pwconv2_Conv_output_0_nfc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_convnext_convnext_0_pwconv2_Conv_output_0_nfc_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__sentence_encoder_convnext_convnext_0_pwconv2_Conv_output_0_nfc[] = { "_sentence_encoder_convnext_convnext_0_pwconv2_Conv_output_0" }; uint32_t dimensions__sentence_encoder_convnext_convnext_0_pwconv2_Conv_output_0_nfc[] = {1, 129, 64}; Qnn_Tensor_t outputs__sentence_encoder_convnext_convnext_0_pwconv2_Conv_output_0_nfc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_0_pwconv2_Conv_output_0_nfc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000859733336256f, .offset= -33587}}}, .rank= 3, .dimensions=dimensions__sentence_encoder_convnext_convnext_0_pwconv2_Conv_output_0_nfc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_convnext_convnext_0_pwconv2_Conv_output_0_nfc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__sentence_encoder_convnext_convnext_0_pwconv2_Conv_output_0_nfc, // Node Params 1, // Num Node Params inputs__sentence_encoder_convnext_convnext_0_pwconv2_Conv_output_0_nfc, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_convnext_convnext_0_pwconv2_Conv_output_0_nfc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_dp_sentence_encoder_convnext_convnext_0_gamma(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_dp_sentence_encoder_convnext_convnext_0_gamma[] = {1, 1, 64}; VALIDATE(model.addTensor("tts_dp_sentence_encoder_convnext_convnext_0_gamma", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_dp_sentence_encoder_convnext_convnext_0_gamma", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000020135173600f, .offset= -1017}}}, .rank= 3, .dimensions=dimensions_tts_dp_sentence_encoder_convnext_convnext_0_gamma, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_dp_sentence_encoder_convnext_convnext_0_gamma), .dataSize=BINLEN(tts_dp_sentence_encoder_convnext_convnext_0_gamma)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__sentence_encoder_convnext_convnext_0_Mul_2(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_convnext_convnext_0_Mul_2 */ Qnn_Param_t params__sentence_encoder_convnext_convnext_0_Mul_2[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__sentence_encoder_convnext_convnext_0_Mul_2[] = { "tts_dp_sentence_encoder_convnext_convnext_0_gamma", "_sentence_encoder_convnext_convnext_0_pwconv2_Conv_output_0_nfc" }; uint32_t dimensions__sentence_encoder_convnext_convnext_0_Mul_2_output_0[] = {1, 129, 64}; Qnn_Tensor_t outputs__sentence_encoder_convnext_convnext_0_Mul_2[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_0_Mul_2_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000093241815193f, .offset= -27267}}}, .rank= 3, .dimensions=dimensions__sentence_encoder_convnext_convnext_0_Mul_2_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_convnext_convnext_0_Mul_2", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__sentence_encoder_convnext_convnext_0_Mul_2, // Node Params 1, // Num Node Params inputs__sentence_encoder_convnext_convnext_0_Mul_2, // Input Tensor Names 2, // Num Input Tensor Names outputs__sentence_encoder_convnext_convnext_0_Mul_2, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_convnext_convnext_0_Add(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_convnext_convnext_0_Add */ Qnn_Param_t params__sentence_encoder_convnext_convnext_0_Add[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}} }; const char* inputs__sentence_encoder_convnext_convnext_0_Add[] = { "_sentence_encoder_convnext_convnext_0_Mul_output_0", "_sentence_encoder_convnext_convnext_0_Mul_2_output_0" }; uint32_t dimensions__sentence_encoder_convnext_convnext_0_Add_output_0[] = {1, 129, 64}; Qnn_Tensor_t outputs__sentence_encoder_convnext_convnext_0_Add[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_0_Add_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000551619268663f, .offset= -29704}}}, .rank= 3, .dimensions=dimensions__sentence_encoder_convnext_convnext_0_Add_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_convnext_convnext_0_Add", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__sentence_encoder_convnext_convnext_0_Add, // Node Params 1, // Num Node Params inputs__sentence_encoder_convnext_convnext_0_Add, // Input Tensor Names 2, // Num Input Tensor Names outputs__sentence_encoder_convnext_convnext_0_Add, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_convnext_convnext_0_Mul_3(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_convnext_convnext_0_Mul_3 */ Qnn_Param_t params__sentence_encoder_convnext_convnext_0_Mul_3[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__sentence_encoder_convnext_convnext_0_Mul_3[] = { "_sentence_encoder_convnext_convnext_0_Add_output_0", "_sentence_encoder_Concat_2_output_0" }; uint32_t dimensions__sentence_encoder_convnext_convnext_0_Mul_3_output_0[] = {1, 129, 64}; Qnn_Tensor_t outputs__sentence_encoder_convnext_convnext_0_Mul_3[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_0_Mul_3_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000551619268663f, .offset= -29704}}}, .rank= 3, .dimensions=dimensions__sentence_encoder_convnext_convnext_0_Mul_3_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_convnext_convnext_0_Mul_3", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__sentence_encoder_convnext_convnext_0_Mul_3, // Node Params 1, // Num Node Params inputs__sentence_encoder_convnext_convnext_0_Mul_3, // Input Tensor Names 2, // Num Input Tensor Names outputs__sentence_encoder_convnext_convnext_0_Mul_3, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_convnext_convnext_1_Mul(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_convnext_convnext_1_Mul */ Qnn_Param_t params__sentence_encoder_convnext_convnext_1_Mul[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__sentence_encoder_convnext_convnext_1_Mul[] = { "_sentence_encoder_convnext_convnext_0_Mul_3_output_0", "_sentence_encoder_Concat_2_output_0" }; uint32_t dimensions__sentence_encoder_convnext_convnext_1_Mul_output_0[] = {1, 129, 64}; Qnn_Tensor_t outputs__sentence_encoder_convnext_convnext_1_Mul[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_1_Mul_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000551619268663f, .offset= -29704}}}, .rank= 3, .dimensions=dimensions__sentence_encoder_convnext_convnext_1_Mul_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_convnext_convnext_1_Mul", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__sentence_encoder_convnext_convnext_1_Mul, // Node Params 1, // Num Node Params inputs__sentence_encoder_convnext_convnext_1_Mul, // Input Tensor Names 2, // Num Input Tensor Names outputs__sentence_encoder_convnext_convnext_1_Mul, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_convnext_convnext_1_dwconv_Pad(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_convnext_convnext_1_dwconv_Pad */ uint32_t dimensions__sentence_encoder_convnext_convnext_1_dwconv_Pad_pad_amount[] = {3, 2}; uint32_t _sentence_encoder_convnext_convnext_1_dwconv_Pad_pad_amount[] = {0, 0, 2, 2, 0, 0}; Qnn_Param_t params__sentence_encoder_convnext_convnext_1_dwconv_Pad[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_1_dwconv_Pad_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__sentence_encoder_convnext_convnext_1_dwconv_Pad_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_convnext_convnext_1_dwconv_Pad_pad_amount, .dataSize=24}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="scheme", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 3}}}} }; const char* inputs__sentence_encoder_convnext_convnext_1_dwconv_Pad[] = { "_sentence_encoder_convnext_convnext_1_Mul_output_0" }; uint32_t dimensions__sentence_encoder_convnext_convnext_1_dwconv_Pad_output_0[] = {1, 133, 64}; Qnn_Tensor_t outputs__sentence_encoder_convnext_convnext_1_dwconv_Pad[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_1_dwconv_Pad_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000551619268663f, .offset= -29704}}}, .rank= 3, .dimensions=dimensions__sentence_encoder_convnext_convnext_1_dwconv_Pad_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_convnext_convnext_1_dwconv_Pad", // Node Name "qti.aisw", // Package Name "Pad", // Qnn Node Type params__sentence_encoder_convnext_convnext_1_dwconv_Pad, // Node Params 2, // Num Node Params inputs__sentence_encoder_convnext_convnext_1_dwconv_Pad, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_convnext_convnext_1_dwconv_Pad, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_convnext_convnext_1_dwconv_Pad_output_0_ncf(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_convnext_convnext_1_dwconv_Pad_output_0_ncf */ uint32_t dimensions__sentence_encoder_convnext_convnext_1_dwconv_Pad_output_0_ncf_perm[] = {3}; uint32_t _sentence_encoder_convnext_convnext_1_dwconv_Pad_output_0_ncf_perm[] = {0, 2, 1}; Qnn_Param_t params__sentence_encoder_convnext_convnext_1_dwconv_Pad_output_0_ncf[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_1_dwconv_Pad_output_0_ncf_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__sentence_encoder_convnext_convnext_1_dwconv_Pad_output_0_ncf_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_convnext_convnext_1_dwconv_Pad_output_0_ncf_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__sentence_encoder_convnext_convnext_1_dwconv_Pad_output_0_ncf[] = { "_sentence_encoder_convnext_convnext_1_dwconv_Pad_output_0" }; uint32_t dimensions__sentence_encoder_convnext_convnext_1_dwconv_Pad_output_0_ncf[] = {1, 64, 133}; Qnn_Tensor_t outputs__sentence_encoder_convnext_convnext_1_dwconv_Pad_output_0_ncf[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_1_dwconv_Pad_output_0_ncf", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000551619268663f, .offset= -29704}}}, .rank= 3, .dimensions=dimensions__sentence_encoder_convnext_convnext_1_dwconv_Pad_output_0_ncf, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_convnext_convnext_1_dwconv_Pad_output_0_ncf", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__sentence_encoder_convnext_convnext_1_dwconv_Pad_output_0_ncf, // Node Params 1, // Num Node Params inputs__sentence_encoder_convnext_convnext_1_dwconv_Pad_output_0_ncf, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_convnext_convnext_1_dwconv_Pad_output_0_ncf, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_convnext_convnext_1_dwconv_Conv_reshape_to_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_convnext_convnext_1_dwconv_Conv_reshape_to_2d */ const char* inputs__sentence_encoder_convnext_convnext_1_dwconv_Conv_reshape_to_2d[] = { "_sentence_encoder_convnext_convnext_1_dwconv_Pad_output_0_ncf" }; uint32_t dimensions__sentence_encoder_convnext_convnext_1_dwconv_Conv_reshape_to_2d[] = {1, 64, 1, 133}; Qnn_Tensor_t outputs__sentence_encoder_convnext_convnext_1_dwconv_Conv_reshape_to_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_1_dwconv_Conv_reshape_to_2d", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000551619268663f, .offset= -29704}}}, .rank= 4, .dimensions=dimensions__sentence_encoder_convnext_convnext_1_dwconv_Conv_reshape_to_2d, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_convnext_convnext_1_dwconv_Conv_reshape_to_2d", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__sentence_encoder_convnext_convnext_1_dwconv_Conv_reshape_to_2d, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_convnext_convnext_1_dwconv_Conv_reshape_to_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_convnext_convnext_1_dwconv_Conv_reshape_to_2d_nhwc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_convnext_convnext_1_dwconv_Conv_reshape_to_2d_nhwc */ uint32_t dimensions__sentence_encoder_convnext_convnext_1_dwconv_Conv_reshape_to_2d_nhwc_perm[] = {4}; uint32_t _sentence_encoder_convnext_convnext_1_dwconv_Conv_reshape_to_2d_nhwc_perm[] = {0, 2, 3, 1}; Qnn_Param_t params__sentence_encoder_convnext_convnext_1_dwconv_Conv_reshape_to_2d_nhwc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_1_dwconv_Conv_reshape_to_2d_nhwc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__sentence_encoder_convnext_convnext_1_dwconv_Conv_reshape_to_2d_nhwc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_convnext_convnext_1_dwconv_Conv_reshape_to_2d_nhwc_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__sentence_encoder_convnext_convnext_1_dwconv_Conv_reshape_to_2d_nhwc[] = { "_sentence_encoder_convnext_convnext_1_dwconv_Conv_reshape_to_2d" }; uint32_t dimensions__sentence_encoder_convnext_convnext_1_dwconv_Conv_reshape_to_2d_nhwc[] = {1, 1, 133, 64}; Qnn_Tensor_t outputs__sentence_encoder_convnext_convnext_1_dwconv_Conv_reshape_to_2d_nhwc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_1_dwconv_Conv_reshape_to_2d_nhwc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000551619268663f, .offset= -29704}}}, .rank= 4, .dimensions=dimensions__sentence_encoder_convnext_convnext_1_dwconv_Conv_reshape_to_2d_nhwc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_convnext_convnext_1_dwconv_Conv_reshape_to_2d_nhwc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__sentence_encoder_convnext_convnext_1_dwconv_Conv_reshape_to_2d_nhwc, // Node Params 1, // Num Node Params inputs__sentence_encoder_convnext_convnext_1_dwconv_Conv_reshape_to_2d_nhwc, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_convnext_convnext_1_dwconv_Conv_reshape_to_2d_nhwc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_dp_sentence_encoder_convnext_convnext_1_dwconv_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_dp_sentence_encoder_convnext_convnext_1_dwconv_weight[] = {1, 5, 1, 64}; VALIDATE(model.addTensor("tts_dp_sentence_encoder_convnext_convnext_1_dwconv_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_dp_sentence_encoder_convnext_convnext_1_dwconv_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0050047575496137f, .offset= -132}}}, .rank= 4, .dimensions=dimensions_tts_dp_sentence_encoder_convnext_convnext_1_dwconv_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_dp_sentence_encoder_convnext_convnext_1_dwconv_weight), .dataSize=BINLEN(tts_dp_sentence_encoder_convnext_convnext_1_dwconv_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_dp_sentence_encoder_convnext_convnext_1_dwconv_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_dp_sentence_encoder_convnext_convnext_1_dwconv_bias[] = {64}; VALIDATE(model.addTensor("tts_dp_sentence_encoder_convnext_convnext_1_dwconv_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_dp_sentence_encoder_convnext_convnext_1_dwconv_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0009108853409998f, .offset= -109}}}, .rank= 1, .dimensions=dimensions_tts_dp_sentence_encoder_convnext_convnext_1_dwconv_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_dp_sentence_encoder_convnext_convnext_1_dwconv_bias), .dataSize=BINLEN(tts_dp_sentence_encoder_convnext_convnext_1_dwconv_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__sentence_encoder_convnext_convnext_1_dwconv_Conv_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_convnext_convnext_1_dwconv_Conv_2d */ uint32_t dimensions__sentence_encoder_convnext_convnext_1_dwconv_Conv_2d_dilation[] = {2}; uint32_t _sentence_encoder_convnext_convnext_1_dwconv_Conv_2d_dilation[] = {1, 1}; uint32_t dimensions__sentence_encoder_convnext_convnext_1_dwconv_Conv_2d_pad_amount[] = {2, 2}; uint32_t _sentence_encoder_convnext_convnext_1_dwconv_Conv_2d_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions__sentence_encoder_convnext_convnext_1_dwconv_Conv_2d_stride[] = {2}; uint32_t _sentence_encoder_convnext_convnext_1_dwconv_Conv_2d_stride[] = {1, 1}; Qnn_Param_t params__sentence_encoder_convnext_convnext_1_dwconv_Conv_2d[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_1_dwconv_Conv_2d_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__sentence_encoder_convnext_convnext_1_dwconv_Conv_2d_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_convnext_convnext_1_dwconv_Conv_2d_dilation, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_1_dwconv_Conv_2d_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__sentence_encoder_convnext_convnext_1_dwconv_Conv_2d_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_convnext_convnext_1_dwconv_Conv_2d_pad_amount, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_1_dwconv_Conv_2d_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__sentence_encoder_convnext_convnext_1_dwconv_Conv_2d_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_convnext_convnext_1_dwconv_Conv_2d_stride, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__sentence_encoder_convnext_convnext_1_dwconv_Conv_2d[] = { "_sentence_encoder_convnext_convnext_1_dwconv_Conv_reshape_to_2d_nhwc", "tts_dp_sentence_encoder_convnext_convnext_1_dwconv_weight", "tts_dp_sentence_encoder_convnext_convnext_1_dwconv_bias" }; uint32_t dimensions__sentence_encoder_convnext_convnext_1_dwconv_Conv_intermediate[] = {1, 1, 129, 64}; Qnn_Tensor_t outputs__sentence_encoder_convnext_convnext_1_dwconv_Conv_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_1_dwconv_Conv_intermediate", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000423072451667f, .offset= -25210}}}, .rank= 4, .dimensions=dimensions__sentence_encoder_convnext_convnext_1_dwconv_Conv_intermediate, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_convnext_convnext_1_dwconv_Conv_2d", // Node Name "qti.aisw", // Package Name "DepthWiseConv2d", // Qnn Node Type params__sentence_encoder_convnext_convnext_1_dwconv_Conv_2d, // Node Params 3, // Num Node Params inputs__sentence_encoder_convnext_convnext_1_dwconv_Conv_2d, // Input Tensor Names 3, // Num Input Tensor Names outputs__sentence_encoder_convnext_convnext_1_dwconv_Conv_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_convnext_convnext_1_dwconv_Conv_intermediate_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_convnext_convnext_1_dwconv_Conv_intermediate_nchw */ uint32_t dimensions__sentence_encoder_convnext_convnext_1_dwconv_Conv_intermediate_nchw_perm[] = {4}; uint32_t _sentence_encoder_convnext_convnext_1_dwconv_Conv_intermediate_nchw_perm[] = {0, 3, 1, 2}; Qnn_Param_t params__sentence_encoder_convnext_convnext_1_dwconv_Conv_intermediate_nchw[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_1_dwconv_Conv_intermediate_nchw_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__sentence_encoder_convnext_convnext_1_dwconv_Conv_intermediate_nchw_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_convnext_convnext_1_dwconv_Conv_intermediate_nchw_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__sentence_encoder_convnext_convnext_1_dwconv_Conv_intermediate_nchw[] = { "_sentence_encoder_convnext_convnext_1_dwconv_Conv_intermediate" }; uint32_t dimensions__sentence_encoder_convnext_convnext_1_dwconv_Conv_intermediate_nchw[] = {1, 64, 1, 129}; Qnn_Tensor_t outputs__sentence_encoder_convnext_convnext_1_dwconv_Conv_intermediate_nchw[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_1_dwconv_Conv_intermediate_nchw", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000423072451667f, .offset= -25210}}}, .rank= 4, .dimensions=dimensions__sentence_encoder_convnext_convnext_1_dwconv_Conv_intermediate_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_convnext_convnext_1_dwconv_Conv_intermediate_nchw", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__sentence_encoder_convnext_convnext_1_dwconv_Conv_intermediate_nchw, // Node Params 1, // Num Node Params inputs__sentence_encoder_convnext_convnext_1_dwconv_Conv_intermediate_nchw, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_convnext_convnext_1_dwconv_Conv_intermediate_nchw, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_convnext_convnext_1_dwconv_Conv_intermediate(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_convnext_convnext_1_dwconv_Conv_intermediate */ const char* inputs__sentence_encoder_convnext_convnext_1_dwconv_Conv_intermediate[] = { "_sentence_encoder_convnext_convnext_1_dwconv_Conv_intermediate_nchw" }; uint32_t dimensions__sentence_encoder_convnext_convnext_1_dwconv_Conv_output_0[] = {1, 64, 129}; Qnn_Tensor_t outputs__sentence_encoder_convnext_convnext_1_dwconv_Conv_intermediate[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_1_dwconv_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000423072451667f, .offset= -25210}}}, .rank= 3, .dimensions=dimensions__sentence_encoder_convnext_convnext_1_dwconv_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_convnext_convnext_1_dwconv_Conv_intermediate", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__sentence_encoder_convnext_convnext_1_dwconv_Conv_intermediate, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_convnext_convnext_1_dwconv_Conv_intermediate, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_convnext_convnext_1_dwconv_Conv_output_0_nfc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_convnext_convnext_1_dwconv_Conv_output_0_nfc */ uint32_t dimensions__sentence_encoder_convnext_convnext_1_dwconv_Conv_output_0_nfc_perm[] = {3}; uint32_t _sentence_encoder_convnext_convnext_1_dwconv_Conv_output_0_nfc_perm[] = {0, 2, 1}; Qnn_Param_t params__sentence_encoder_convnext_convnext_1_dwconv_Conv_output_0_nfc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_1_dwconv_Conv_output_0_nfc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__sentence_encoder_convnext_convnext_1_dwconv_Conv_output_0_nfc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_convnext_convnext_1_dwconv_Conv_output_0_nfc_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__sentence_encoder_convnext_convnext_1_dwconv_Conv_output_0_nfc[] = { "_sentence_encoder_convnext_convnext_1_dwconv_Conv_output_0" }; uint32_t dimensions__sentence_encoder_convnext_convnext_1_dwconv_Conv_output_0_nfc[] = {1, 129, 64}; Qnn_Tensor_t outputs__sentence_encoder_convnext_convnext_1_dwconv_Conv_output_0_nfc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_1_dwconv_Conv_output_0_nfc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000423072451667f, .offset= -25210}}}, .rank= 3, .dimensions=dimensions__sentence_encoder_convnext_convnext_1_dwconv_Conv_output_0_nfc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_convnext_convnext_1_dwconv_Conv_output_0_nfc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__sentence_encoder_convnext_convnext_1_dwconv_Conv_output_0_nfc, // Node Params 1, // Num Node Params inputs__sentence_encoder_convnext_convnext_1_dwconv_Conv_output_0_nfc, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_convnext_convnext_1_dwconv_Conv_output_0_nfc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_convnext_convnext_1_Mul_1(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_convnext_convnext_1_Mul_1 */ Qnn_Param_t params__sentence_encoder_convnext_convnext_1_Mul_1[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__sentence_encoder_convnext_convnext_1_Mul_1[] = { "_sentence_encoder_convnext_convnext_1_dwconv_Conv_output_0_nfc", "_sentence_encoder_Concat_2_output_0" }; uint32_t dimensions__sentence_encoder_convnext_convnext_1_norm_Transpose_output_0[] = {1, 129, 64}; Qnn_Tensor_t outputs__sentence_encoder_convnext_convnext_1_Mul_1[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_1_norm_Transpose_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000423072451667f, .offset= -25210}}}, .rank= 3, .dimensions=dimensions__sentence_encoder_convnext_convnext_1_norm_Transpose_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_convnext_convnext_1_Mul_1", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__sentence_encoder_convnext_convnext_1_Mul_1, // Node Params 1, // Num Node Params inputs__sentence_encoder_convnext_convnext_1_Mul_1, // Input Tensor Names 2, // Num Input Tensor Names outputs__sentence_encoder_convnext_convnext_1_Mul_1, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_dp_sentence_encoder_convnext_convnext_1_norm_norm_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_dp_sentence_encoder_convnext_convnext_1_norm_norm_weight[] = {64}; VALIDATE(model.addTensor("tts_dp_sentence_encoder_convnext_convnext_1_norm_norm_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_dp_sentence_encoder_convnext_convnext_1_norm_norm_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0035840258933604f, .offset= 0}}}, .rank= 1, .dimensions=dimensions_tts_dp_sentence_encoder_convnext_convnext_1_norm_norm_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_dp_sentence_encoder_convnext_convnext_1_norm_norm_weight), .dataSize=BINLEN(tts_dp_sentence_encoder_convnext_convnext_1_norm_norm_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_dp_sentence_encoder_convnext_convnext_1_norm_norm_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_dp_sentence_encoder_convnext_convnext_1_norm_norm_bias[] = {64}; VALIDATE(model.addTensor("tts_dp_sentence_encoder_convnext_convnext_1_norm_norm_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_dp_sentence_encoder_convnext_convnext_1_norm_norm_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0013857837766409f, .offset= -137}}}, .rank= 1, .dimensions=dimensions_tts_dp_sentence_encoder_convnext_convnext_1_norm_norm_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_dp_sentence_encoder_convnext_convnext_1_norm_norm_bias), .dataSize=BINLEN(tts_dp_sentence_encoder_convnext_convnext_1_norm_norm_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__sentence_encoder_convnext_convnext_1_norm_norm_LayerNormalization(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_convnext_convnext_1_norm_norm_LayerNormalization */ uint32_t dimensions__sentence_encoder_convnext_convnext_1_norm_norm_LayerNormalization_axes[] = {1}; uint32_t _sentence_encoder_convnext_convnext_1_norm_norm_LayerNormalization_axes[] = {2}; Qnn_Param_t params__sentence_encoder_convnext_convnext_1_norm_norm_LayerNormalization[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="axes", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_1_norm_norm_LayerNormalization_axes", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__sentence_encoder_convnext_convnext_1_norm_norm_LayerNormalization_axes, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_convnext_convnext_1_norm_norm_LayerNormalization_axes, .dataSize=4}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="epsilon", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_FLOAT_32, {.floatValue = 0.000001000000f}}}} }; const char* inputs__sentence_encoder_convnext_convnext_1_norm_norm_LayerNormalization[] = { "_sentence_encoder_convnext_convnext_1_norm_Transpose_output_0", "tts_dp_sentence_encoder_convnext_convnext_1_norm_norm_weight", "tts_dp_sentence_encoder_convnext_convnext_1_norm_norm_bias" }; uint32_t dimensions__sentence_encoder_convnext_convnext_1_norm_Transpose_1_output_0[] = {1, 129, 64}; Qnn_Tensor_t outputs__sentence_encoder_convnext_convnext_1_norm_norm_LayerNormalization[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_1_norm_Transpose_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000767392848502f, .offset= -26750}}}, .rank= 3, .dimensions=dimensions__sentence_encoder_convnext_convnext_1_norm_Transpose_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_convnext_convnext_1_norm_norm_LayerNormalization", // Node Name "qti.aisw", // Package Name "LayerNorm", // Qnn Node Type params__sentence_encoder_convnext_convnext_1_norm_norm_LayerNormalization, // Node Params 2, // Num Node Params inputs__sentence_encoder_convnext_convnext_1_norm_norm_LayerNormalization, // Input Tensor Names 3, // Num Input Tensor Names outputs__sentence_encoder_convnext_convnext_1_norm_norm_LayerNormalization, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_convnext_convnext_1_norm_Transpose_1_output_0_ncf(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_convnext_convnext_1_norm_Transpose_1_output_0_ncf */ uint32_t dimensions__sentence_encoder_convnext_convnext_1_norm_Transpose_1_output_0_ncf_perm[] = {3}; uint32_t _sentence_encoder_convnext_convnext_1_norm_Transpose_1_output_0_ncf_perm[] = {0, 2, 1}; Qnn_Param_t params__sentence_encoder_convnext_convnext_1_norm_Transpose_1_output_0_ncf[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_1_norm_Transpose_1_output_0_ncf_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__sentence_encoder_convnext_convnext_1_norm_Transpose_1_output_0_ncf_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_convnext_convnext_1_norm_Transpose_1_output_0_ncf_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__sentence_encoder_convnext_convnext_1_norm_Transpose_1_output_0_ncf[] = { "_sentence_encoder_convnext_convnext_1_norm_Transpose_1_output_0" }; uint32_t dimensions__sentence_encoder_convnext_convnext_1_norm_Transpose_1_output_0_ncf[] = {1, 64, 129}; Qnn_Tensor_t outputs__sentence_encoder_convnext_convnext_1_norm_Transpose_1_output_0_ncf[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_1_norm_Transpose_1_output_0_ncf", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000767392848502f, .offset= -26750}}}, .rank= 3, .dimensions=dimensions__sentence_encoder_convnext_convnext_1_norm_Transpose_1_output_0_ncf, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_convnext_convnext_1_norm_Transpose_1_output_0_ncf", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__sentence_encoder_convnext_convnext_1_norm_Transpose_1_output_0_ncf, // Node Params 1, // Num Node Params inputs__sentence_encoder_convnext_convnext_1_norm_Transpose_1_output_0_ncf, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_convnext_convnext_1_norm_Transpose_1_output_0_ncf, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_convnext_convnext_1_pwconv1_Conv_reshape_to_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_convnext_convnext_1_pwconv1_Conv_reshape_to_2d */ const char* inputs__sentence_encoder_convnext_convnext_1_pwconv1_Conv_reshape_to_2d[] = { "_sentence_encoder_convnext_convnext_1_norm_Transpose_1_output_0_ncf" }; uint32_t dimensions__sentence_encoder_convnext_convnext_1_pwconv1_Conv_reshape_to_2d[] = {1, 64, 1, 129}; Qnn_Tensor_t outputs__sentence_encoder_convnext_convnext_1_pwconv1_Conv_reshape_to_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_1_pwconv1_Conv_reshape_to_2d", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000767392848502f, .offset= -26750}}}, .rank= 4, .dimensions=dimensions__sentence_encoder_convnext_convnext_1_pwconv1_Conv_reshape_to_2d, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_convnext_convnext_1_pwconv1_Conv_reshape_to_2d", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__sentence_encoder_convnext_convnext_1_pwconv1_Conv_reshape_to_2d, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_convnext_convnext_1_pwconv1_Conv_reshape_to_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_convnext_convnext_1_pwconv1_Conv_reshape_to_2d_nhwc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_convnext_convnext_1_pwconv1_Conv_reshape_to_2d_nhwc */ uint32_t dimensions__sentence_encoder_convnext_convnext_1_pwconv1_Conv_reshape_to_2d_nhwc_perm[] = {4}; uint32_t _sentence_encoder_convnext_convnext_1_pwconv1_Conv_reshape_to_2d_nhwc_perm[] = {0, 2, 3, 1}; Qnn_Param_t params__sentence_encoder_convnext_convnext_1_pwconv1_Conv_reshape_to_2d_nhwc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_1_pwconv1_Conv_reshape_to_2d_nhwc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__sentence_encoder_convnext_convnext_1_pwconv1_Conv_reshape_to_2d_nhwc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_convnext_convnext_1_pwconv1_Conv_reshape_to_2d_nhwc_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__sentence_encoder_convnext_convnext_1_pwconv1_Conv_reshape_to_2d_nhwc[] = { "_sentence_encoder_convnext_convnext_1_pwconv1_Conv_reshape_to_2d" }; uint32_t dimensions__sentence_encoder_convnext_convnext_1_pwconv1_Conv_reshape_to_2d_nhwc[] = {1, 1, 129, 64}; Qnn_Tensor_t outputs__sentence_encoder_convnext_convnext_1_pwconv1_Conv_reshape_to_2d_nhwc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_1_pwconv1_Conv_reshape_to_2d_nhwc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000767392848502f, .offset= -26750}}}, .rank= 4, .dimensions=dimensions__sentence_encoder_convnext_convnext_1_pwconv1_Conv_reshape_to_2d_nhwc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_convnext_convnext_1_pwconv1_Conv_reshape_to_2d_nhwc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__sentence_encoder_convnext_convnext_1_pwconv1_Conv_reshape_to_2d_nhwc, // Node Params 1, // Num Node Params inputs__sentence_encoder_convnext_convnext_1_pwconv1_Conv_reshape_to_2d_nhwc, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_convnext_convnext_1_pwconv1_Conv_reshape_to_2d_nhwc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_dp_sentence_encoder_convnext_convnext_1_pwconv1_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_dp_sentence_encoder_convnext_convnext_1_pwconv1_weight[] = {1, 1, 64, 256}; VALIDATE(model.addTensor("tts_dp_sentence_encoder_convnext_convnext_1_pwconv1_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_dp_sentence_encoder_convnext_convnext_1_pwconv1_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0054039629176259f, .offset= -129}}}, .rank= 4, .dimensions=dimensions_tts_dp_sentence_encoder_convnext_convnext_1_pwconv1_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_dp_sentence_encoder_convnext_convnext_1_pwconv1_weight), .dataSize=BINLEN(tts_dp_sentence_encoder_convnext_convnext_1_pwconv1_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_dp_sentence_encoder_convnext_convnext_1_pwconv1_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_dp_sentence_encoder_convnext_convnext_1_pwconv1_bias[] = {256}; VALIDATE(model.addTensor("tts_dp_sentence_encoder_convnext_convnext_1_pwconv1_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_dp_sentence_encoder_convnext_convnext_1_pwconv1_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0021973026450723f, .offset= -234}}}, .rank= 1, .dimensions=dimensions_tts_dp_sentence_encoder_convnext_convnext_1_pwconv1_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_dp_sentence_encoder_convnext_convnext_1_pwconv1_bias), .dataSize=BINLEN(tts_dp_sentence_encoder_convnext_convnext_1_pwconv1_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__sentence_encoder_convnext_convnext_1_pwconv1_Conv_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_convnext_convnext_1_pwconv1_Conv_2d */ uint32_t dimensions__sentence_encoder_convnext_convnext_1_pwconv1_Conv_2d_dilation[] = {2}; uint32_t _sentence_encoder_convnext_convnext_1_pwconv1_Conv_2d_dilation[] = {1, 1}; uint32_t dimensions__sentence_encoder_convnext_convnext_1_pwconv1_Conv_2d_pad_amount[] = {2, 2}; uint32_t _sentence_encoder_convnext_convnext_1_pwconv1_Conv_2d_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions__sentence_encoder_convnext_convnext_1_pwconv1_Conv_2d_stride[] = {2}; uint32_t _sentence_encoder_convnext_convnext_1_pwconv1_Conv_2d_stride[] = {1, 1}; Qnn_Param_t params__sentence_encoder_convnext_convnext_1_pwconv1_Conv_2d[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_1_pwconv1_Conv_2d_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__sentence_encoder_convnext_convnext_1_pwconv1_Conv_2d_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_convnext_convnext_1_pwconv1_Conv_2d_dilation, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_1_pwconv1_Conv_2d_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__sentence_encoder_convnext_convnext_1_pwconv1_Conv_2d_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_convnext_convnext_1_pwconv1_Conv_2d_pad_amount, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_1_pwconv1_Conv_2d_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__sentence_encoder_convnext_convnext_1_pwconv1_Conv_2d_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_convnext_convnext_1_pwconv1_Conv_2d_stride, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="group", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="reuse_sparse_indices", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}} }; const char* inputs__sentence_encoder_convnext_convnext_1_pwconv1_Conv_2d[] = { "_sentence_encoder_convnext_convnext_1_pwconv1_Conv_reshape_to_2d_nhwc", "tts_dp_sentence_encoder_convnext_convnext_1_pwconv1_weight", "tts_dp_sentence_encoder_convnext_convnext_1_pwconv1_bias" }; uint32_t dimensions__sentence_encoder_convnext_convnext_1_pwconv1_Conv_intermediate[] = {1, 1, 129, 256}; Qnn_Tensor_t outputs__sentence_encoder_convnext_convnext_1_pwconv1_Conv_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_1_pwconv1_Conv_intermediate", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0001037092588376f, .offset= -50925}}}, .rank= 4, .dimensions=dimensions__sentence_encoder_convnext_convnext_1_pwconv1_Conv_intermediate, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_convnext_convnext_1_pwconv1_Conv_2d", // Node Name "qti.aisw", // Package Name "Conv2d", // Qnn Node Type params__sentence_encoder_convnext_convnext_1_pwconv1_Conv_2d, // Node Params 5, // Num Node Params inputs__sentence_encoder_convnext_convnext_1_pwconv1_Conv_2d, // Input Tensor Names 3, // Num Input Tensor Names outputs__sentence_encoder_convnext_convnext_1_pwconv1_Conv_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_convnext_convnext_1_pwconv1_Conv_intermediate_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_convnext_convnext_1_pwconv1_Conv_intermediate_nchw */ uint32_t dimensions__sentence_encoder_convnext_convnext_1_pwconv1_Conv_intermediate_nchw_perm[] = {4}; uint32_t _sentence_encoder_convnext_convnext_1_pwconv1_Conv_intermediate_nchw_perm[] = {0, 3, 1, 2}; Qnn_Param_t params__sentence_encoder_convnext_convnext_1_pwconv1_Conv_intermediate_nchw[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_1_pwconv1_Conv_intermediate_nchw_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__sentence_encoder_convnext_convnext_1_pwconv1_Conv_intermediate_nchw_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_convnext_convnext_1_pwconv1_Conv_intermediate_nchw_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__sentence_encoder_convnext_convnext_1_pwconv1_Conv_intermediate_nchw[] = { "_sentence_encoder_convnext_convnext_1_pwconv1_Conv_intermediate" }; uint32_t dimensions__sentence_encoder_convnext_convnext_1_pwconv1_Conv_intermediate_nchw[] = {1, 256, 1, 129}; Qnn_Tensor_t outputs__sentence_encoder_convnext_convnext_1_pwconv1_Conv_intermediate_nchw[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_1_pwconv1_Conv_intermediate_nchw", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0001037092588376f, .offset= -50925}}}, .rank= 4, .dimensions=dimensions__sentence_encoder_convnext_convnext_1_pwconv1_Conv_intermediate_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_convnext_convnext_1_pwconv1_Conv_intermediate_nchw", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__sentence_encoder_convnext_convnext_1_pwconv1_Conv_intermediate_nchw, // Node Params 1, // Num Node Params inputs__sentence_encoder_convnext_convnext_1_pwconv1_Conv_intermediate_nchw, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_convnext_convnext_1_pwconv1_Conv_intermediate_nchw, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_convnext_convnext_1_pwconv1_Conv_intermediate(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_convnext_convnext_1_pwconv1_Conv_intermediate */ const char* inputs__sentence_encoder_convnext_convnext_1_pwconv1_Conv_intermediate[] = { "_sentence_encoder_convnext_convnext_1_pwconv1_Conv_intermediate_nchw" }; uint32_t dimensions__sentence_encoder_convnext_convnext_1_pwconv1_Conv_output_0[] = {1, 256, 129}; Qnn_Tensor_t outputs__sentence_encoder_convnext_convnext_1_pwconv1_Conv_intermediate[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_1_pwconv1_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0001037092588376f, .offset= -50925}}}, .rank= 3, .dimensions=dimensions__sentence_encoder_convnext_convnext_1_pwconv1_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_convnext_convnext_1_pwconv1_Conv_intermediate", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__sentence_encoder_convnext_convnext_1_pwconv1_Conv_intermediate, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_convnext_convnext_1_pwconv1_Conv_intermediate, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__elementwiseneuron_4(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _elementwiseneuron_4 */ Qnn_Param_t params__elementwiseneuron_4[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}} }; const char* inputs__elementwiseneuron_4[] = { "_sentence_encoder_convnext_convnext_1_pwconv1_Conv_output_0" }; uint32_t dimensions__sentence_encoder_convnext_convnext_1_act_Mul_1_output_0[] = {1, 256, 129}; Qnn_Tensor_t outputs__elementwiseneuron_4[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_1_act_Mul_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000242150781560f, .offset= -7019}}}, .rank= 3, .dimensions=dimensions__sentence_encoder_convnext_convnext_1_act_Mul_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_elementwiseneuron_4", // Node Name "qti.aisw", // Package Name "ElementWiseNeuron", // Qnn Node Type params__elementwiseneuron_4, // Node Params 1, // Num Node Params inputs__elementwiseneuron_4, // Input Tensor Names 1, // Num Input Tensor Names outputs__elementwiseneuron_4, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_convnext_convnext_1_pwconv2_Conv_reshape_to_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_convnext_convnext_1_pwconv2_Conv_reshape_to_2d */ const char* inputs__sentence_encoder_convnext_convnext_1_pwconv2_Conv_reshape_to_2d[] = { "_sentence_encoder_convnext_convnext_1_act_Mul_1_output_0" }; uint32_t dimensions__sentence_encoder_convnext_convnext_1_pwconv2_Conv_reshape_to_2d[] = {1, 256, 1, 129}; Qnn_Tensor_t outputs__sentence_encoder_convnext_convnext_1_pwconv2_Conv_reshape_to_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_1_pwconv2_Conv_reshape_to_2d", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000242150781560f, .offset= -7019}}}, .rank= 4, .dimensions=dimensions__sentence_encoder_convnext_convnext_1_pwconv2_Conv_reshape_to_2d, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_convnext_convnext_1_pwconv2_Conv_reshape_to_2d", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__sentence_encoder_convnext_convnext_1_pwconv2_Conv_reshape_to_2d, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_convnext_convnext_1_pwconv2_Conv_reshape_to_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_convnext_convnext_1_pwconv2_Conv_reshape_to_2d_nhwc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_convnext_convnext_1_pwconv2_Conv_reshape_to_2d_nhwc */ uint32_t dimensions__sentence_encoder_convnext_convnext_1_pwconv2_Conv_reshape_to_2d_nhwc_perm[] = {4}; uint32_t _sentence_encoder_convnext_convnext_1_pwconv2_Conv_reshape_to_2d_nhwc_perm[] = {0, 2, 3, 1}; Qnn_Param_t params__sentence_encoder_convnext_convnext_1_pwconv2_Conv_reshape_to_2d_nhwc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_1_pwconv2_Conv_reshape_to_2d_nhwc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__sentence_encoder_convnext_convnext_1_pwconv2_Conv_reshape_to_2d_nhwc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_convnext_convnext_1_pwconv2_Conv_reshape_to_2d_nhwc_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__sentence_encoder_convnext_convnext_1_pwconv2_Conv_reshape_to_2d_nhwc[] = { "_sentence_encoder_convnext_convnext_1_pwconv2_Conv_reshape_to_2d" }; uint32_t dimensions__sentence_encoder_convnext_convnext_1_pwconv2_Conv_reshape_to_2d_nhwc[] = {1, 1, 129, 256}; Qnn_Tensor_t outputs__sentence_encoder_convnext_convnext_1_pwconv2_Conv_reshape_to_2d_nhwc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_1_pwconv2_Conv_reshape_to_2d_nhwc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000242150781560f, .offset= -7019}}}, .rank= 4, .dimensions=dimensions__sentence_encoder_convnext_convnext_1_pwconv2_Conv_reshape_to_2d_nhwc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_convnext_convnext_1_pwconv2_Conv_reshape_to_2d_nhwc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__sentence_encoder_convnext_convnext_1_pwconv2_Conv_reshape_to_2d_nhwc, // Node Params 1, // Num Node Params inputs__sentence_encoder_convnext_convnext_1_pwconv2_Conv_reshape_to_2d_nhwc, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_convnext_convnext_1_pwconv2_Conv_reshape_to_2d_nhwc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_dp_sentence_encoder_convnext_convnext_1_pwconv2_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_dp_sentence_encoder_convnext_convnext_1_pwconv2_weight[] = {1, 1, 256, 64}; VALIDATE(model.addTensor("tts_dp_sentence_encoder_convnext_convnext_1_pwconv2_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_dp_sentence_encoder_convnext_convnext_1_pwconv2_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0034828952047974f, .offset= -128}}}, .rank= 4, .dimensions=dimensions_tts_dp_sentence_encoder_convnext_convnext_1_pwconv2_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_dp_sentence_encoder_convnext_convnext_1_pwconv2_weight), .dataSize=BINLEN(tts_dp_sentence_encoder_convnext_convnext_1_pwconv2_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_dp_sentence_encoder_convnext_convnext_1_pwconv2_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_dp_sentence_encoder_convnext_convnext_1_pwconv2_bias[] = {64}; VALIDATE(model.addTensor("tts_dp_sentence_encoder_convnext_convnext_1_pwconv2_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_dp_sentence_encoder_convnext_convnext_1_pwconv2_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0010753445094451f, .offset= -133}}}, .rank= 1, .dimensions=dimensions_tts_dp_sentence_encoder_convnext_convnext_1_pwconv2_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_dp_sentence_encoder_convnext_convnext_1_pwconv2_bias), .dataSize=BINLEN(tts_dp_sentence_encoder_convnext_convnext_1_pwconv2_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__sentence_encoder_convnext_convnext_1_pwconv2_Conv_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_convnext_convnext_1_pwconv2_Conv_2d */ uint32_t dimensions__sentence_encoder_convnext_convnext_1_pwconv2_Conv_2d_dilation[] = {2}; uint32_t _sentence_encoder_convnext_convnext_1_pwconv2_Conv_2d_dilation[] = {1, 1}; uint32_t dimensions__sentence_encoder_convnext_convnext_1_pwconv2_Conv_2d_pad_amount[] = {2, 2}; uint32_t _sentence_encoder_convnext_convnext_1_pwconv2_Conv_2d_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions__sentence_encoder_convnext_convnext_1_pwconv2_Conv_2d_stride[] = {2}; uint32_t _sentence_encoder_convnext_convnext_1_pwconv2_Conv_2d_stride[] = {1, 1}; Qnn_Param_t params__sentence_encoder_convnext_convnext_1_pwconv2_Conv_2d[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_1_pwconv2_Conv_2d_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__sentence_encoder_convnext_convnext_1_pwconv2_Conv_2d_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_convnext_convnext_1_pwconv2_Conv_2d_dilation, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_1_pwconv2_Conv_2d_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__sentence_encoder_convnext_convnext_1_pwconv2_Conv_2d_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_convnext_convnext_1_pwconv2_Conv_2d_pad_amount, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_1_pwconv2_Conv_2d_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__sentence_encoder_convnext_convnext_1_pwconv2_Conv_2d_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_convnext_convnext_1_pwconv2_Conv_2d_stride, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="group", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="reuse_sparse_indices", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}} }; const char* inputs__sentence_encoder_convnext_convnext_1_pwconv2_Conv_2d[] = { "_sentence_encoder_convnext_convnext_1_pwconv2_Conv_reshape_to_2d_nhwc", "tts_dp_sentence_encoder_convnext_convnext_1_pwconv2_weight", "tts_dp_sentence_encoder_convnext_convnext_1_pwconv2_bias" }; uint32_t dimensions__sentence_encoder_convnext_convnext_1_pwconv2_Conv_intermediate[] = {1, 1, 129, 64}; Qnn_Tensor_t outputs__sentence_encoder_convnext_convnext_1_pwconv2_Conv_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_1_pwconv2_Conv_intermediate", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000476628883916f, .offset= -30446}}}, .rank= 4, .dimensions=dimensions__sentence_encoder_convnext_convnext_1_pwconv2_Conv_intermediate, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_convnext_convnext_1_pwconv2_Conv_2d", // Node Name "qti.aisw", // Package Name "Conv2d", // Qnn Node Type params__sentence_encoder_convnext_convnext_1_pwconv2_Conv_2d, // Node Params 5, // Num Node Params inputs__sentence_encoder_convnext_convnext_1_pwconv2_Conv_2d, // Input Tensor Names 3, // Num Input Tensor Names outputs__sentence_encoder_convnext_convnext_1_pwconv2_Conv_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_convnext_convnext_1_pwconv2_Conv_intermediate_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_convnext_convnext_1_pwconv2_Conv_intermediate_nchw */ uint32_t dimensions__sentence_encoder_convnext_convnext_1_pwconv2_Conv_intermediate_nchw_perm[] = {4}; uint32_t _sentence_encoder_convnext_convnext_1_pwconv2_Conv_intermediate_nchw_perm[] = {0, 3, 1, 2}; Qnn_Param_t params__sentence_encoder_convnext_convnext_1_pwconv2_Conv_intermediate_nchw[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_1_pwconv2_Conv_intermediate_nchw_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__sentence_encoder_convnext_convnext_1_pwconv2_Conv_intermediate_nchw_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_convnext_convnext_1_pwconv2_Conv_intermediate_nchw_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__sentence_encoder_convnext_convnext_1_pwconv2_Conv_intermediate_nchw[] = { "_sentence_encoder_convnext_convnext_1_pwconv2_Conv_intermediate" }; uint32_t dimensions__sentence_encoder_convnext_convnext_1_pwconv2_Conv_intermediate_nchw[] = {1, 64, 1, 129}; Qnn_Tensor_t outputs__sentence_encoder_convnext_convnext_1_pwconv2_Conv_intermediate_nchw[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_1_pwconv2_Conv_intermediate_nchw", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000476628883916f, .offset= -30446}}}, .rank= 4, .dimensions=dimensions__sentence_encoder_convnext_convnext_1_pwconv2_Conv_intermediate_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_convnext_convnext_1_pwconv2_Conv_intermediate_nchw", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__sentence_encoder_convnext_convnext_1_pwconv2_Conv_intermediate_nchw, // Node Params 1, // Num Node Params inputs__sentence_encoder_convnext_convnext_1_pwconv2_Conv_intermediate_nchw, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_convnext_convnext_1_pwconv2_Conv_intermediate_nchw, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_convnext_convnext_1_pwconv2_Conv_intermediate(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_convnext_convnext_1_pwconv2_Conv_intermediate */ const char* inputs__sentence_encoder_convnext_convnext_1_pwconv2_Conv_intermediate[] = { "_sentence_encoder_convnext_convnext_1_pwconv2_Conv_intermediate_nchw" }; uint32_t dimensions__sentence_encoder_convnext_convnext_1_pwconv2_Conv_output_0[] = {1, 64, 129}; Qnn_Tensor_t outputs__sentence_encoder_convnext_convnext_1_pwconv2_Conv_intermediate[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_1_pwconv2_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000476628883916f, .offset= -30446}}}, .rank= 3, .dimensions=dimensions__sentence_encoder_convnext_convnext_1_pwconv2_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_convnext_convnext_1_pwconv2_Conv_intermediate", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__sentence_encoder_convnext_convnext_1_pwconv2_Conv_intermediate, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_convnext_convnext_1_pwconv2_Conv_intermediate, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_convnext_convnext_1_pwconv2_Conv_output_0_nfc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_convnext_convnext_1_pwconv2_Conv_output_0_nfc */ uint32_t dimensions__sentence_encoder_convnext_convnext_1_pwconv2_Conv_output_0_nfc_perm[] = {3}; uint32_t _sentence_encoder_convnext_convnext_1_pwconv2_Conv_output_0_nfc_perm[] = {0, 2, 1}; Qnn_Param_t params__sentence_encoder_convnext_convnext_1_pwconv2_Conv_output_0_nfc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_1_pwconv2_Conv_output_0_nfc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__sentence_encoder_convnext_convnext_1_pwconv2_Conv_output_0_nfc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_convnext_convnext_1_pwconv2_Conv_output_0_nfc_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__sentence_encoder_convnext_convnext_1_pwconv2_Conv_output_0_nfc[] = { "_sentence_encoder_convnext_convnext_1_pwconv2_Conv_output_0" }; uint32_t dimensions__sentence_encoder_convnext_convnext_1_pwconv2_Conv_output_0_nfc[] = {1, 129, 64}; Qnn_Tensor_t outputs__sentence_encoder_convnext_convnext_1_pwconv2_Conv_output_0_nfc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_1_pwconv2_Conv_output_0_nfc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000476628883916f, .offset= -30446}}}, .rank= 3, .dimensions=dimensions__sentence_encoder_convnext_convnext_1_pwconv2_Conv_output_0_nfc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_convnext_convnext_1_pwconv2_Conv_output_0_nfc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__sentence_encoder_convnext_convnext_1_pwconv2_Conv_output_0_nfc, // Node Params 1, // Num Node Params inputs__sentence_encoder_convnext_convnext_1_pwconv2_Conv_output_0_nfc, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_convnext_convnext_1_pwconv2_Conv_output_0_nfc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_dp_sentence_encoder_convnext_convnext_1_gamma(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_dp_sentence_encoder_convnext_convnext_1_gamma[] = {1, 1, 64}; VALIDATE(model.addTensor("tts_dp_sentence_encoder_convnext_convnext_1_gamma", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_dp_sentence_encoder_convnext_convnext_1_gamma", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000019501467250f, .offset= -1161}}}, .rank= 3, .dimensions=dimensions_tts_dp_sentence_encoder_convnext_convnext_1_gamma, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_dp_sentence_encoder_convnext_convnext_1_gamma), .dataSize=BINLEN(tts_dp_sentence_encoder_convnext_convnext_1_gamma)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__sentence_encoder_convnext_convnext_1_Mul_2(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_convnext_convnext_1_Mul_2 */ Qnn_Param_t params__sentence_encoder_convnext_convnext_1_Mul_2[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__sentence_encoder_convnext_convnext_1_Mul_2[] = { "tts_dp_sentence_encoder_convnext_convnext_1_gamma", "_sentence_encoder_convnext_convnext_1_pwconv2_Conv_output_0_nfc" }; uint32_t dimensions__sentence_encoder_convnext_convnext_1_Mul_2_output_0[] = {1, 129, 64}; Qnn_Tensor_t outputs__sentence_encoder_convnext_convnext_1_Mul_2[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_1_Mul_2_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000045732604121f, .offset= -30682}}}, .rank= 3, .dimensions=dimensions__sentence_encoder_convnext_convnext_1_Mul_2_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_convnext_convnext_1_Mul_2", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__sentence_encoder_convnext_convnext_1_Mul_2, // Node Params 1, // Num Node Params inputs__sentence_encoder_convnext_convnext_1_Mul_2, // Input Tensor Names 2, // Num Input Tensor Names outputs__sentence_encoder_convnext_convnext_1_Mul_2, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_convnext_convnext_1_Add(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_convnext_convnext_1_Add */ Qnn_Param_t params__sentence_encoder_convnext_convnext_1_Add[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}} }; const char* inputs__sentence_encoder_convnext_convnext_1_Add[] = { "_sentence_encoder_convnext_convnext_1_Mul_output_0", "_sentence_encoder_convnext_convnext_1_Mul_2_output_0" }; uint32_t dimensions__sentence_encoder_convnext_convnext_1_Add_output_0[] = {1, 129, 64}; Qnn_Tensor_t outputs__sentence_encoder_convnext_convnext_1_Add[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_1_Add_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000551625162188f, .offset= -29704}}}, .rank= 3, .dimensions=dimensions__sentence_encoder_convnext_convnext_1_Add_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_convnext_convnext_1_Add", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__sentence_encoder_convnext_convnext_1_Add, // Node Params 1, // Num Node Params inputs__sentence_encoder_convnext_convnext_1_Add, // Input Tensor Names 2, // Num Input Tensor Names outputs__sentence_encoder_convnext_convnext_1_Add, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_convnext_convnext_1_Mul_3(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_convnext_convnext_1_Mul_3 */ Qnn_Param_t params__sentence_encoder_convnext_convnext_1_Mul_3[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__sentence_encoder_convnext_convnext_1_Mul_3[] = { "_sentence_encoder_convnext_convnext_1_Add_output_0", "_sentence_encoder_Concat_2_output_0" }; uint32_t dimensions__sentence_encoder_convnext_convnext_1_Mul_3_output_0[] = {1, 129, 64}; Qnn_Tensor_t outputs__sentence_encoder_convnext_convnext_1_Mul_3[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_1_Mul_3_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000551625162188f, .offset= -29704}}}, .rank= 3, .dimensions=dimensions__sentence_encoder_convnext_convnext_1_Mul_3_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_convnext_convnext_1_Mul_3", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__sentence_encoder_convnext_convnext_1_Mul_3, // Node Params 1, // Num Node Params inputs__sentence_encoder_convnext_convnext_1_Mul_3, // Input Tensor Names 2, // Num Input Tensor Names outputs__sentence_encoder_convnext_convnext_1_Mul_3, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_convnext_convnext_2_Mul(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_convnext_convnext_2_Mul */ Qnn_Param_t params__sentence_encoder_convnext_convnext_2_Mul[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__sentence_encoder_convnext_convnext_2_Mul[] = { "_sentence_encoder_convnext_convnext_1_Mul_3_output_0", "_sentence_encoder_Concat_2_output_0" }; uint32_t dimensions__sentence_encoder_convnext_convnext_2_Mul_output_0[] = {1, 129, 64}; Qnn_Tensor_t outputs__sentence_encoder_convnext_convnext_2_Mul[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_2_Mul_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000551625162188f, .offset= -29704}}}, .rank= 3, .dimensions=dimensions__sentence_encoder_convnext_convnext_2_Mul_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_convnext_convnext_2_Mul", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__sentence_encoder_convnext_convnext_2_Mul, // Node Params 1, // Num Node Params inputs__sentence_encoder_convnext_convnext_2_Mul, // Input Tensor Names 2, // Num Input Tensor Names outputs__sentence_encoder_convnext_convnext_2_Mul, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_convnext_convnext_2_dwconv_Pad(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_convnext_convnext_2_dwconv_Pad */ uint32_t dimensions__sentence_encoder_convnext_convnext_2_dwconv_Pad_pad_amount[] = {3, 2}; uint32_t _sentence_encoder_convnext_convnext_2_dwconv_Pad_pad_amount[] = {0, 0, 2, 2, 0, 0}; Qnn_Param_t params__sentence_encoder_convnext_convnext_2_dwconv_Pad[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_2_dwconv_Pad_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__sentence_encoder_convnext_convnext_2_dwconv_Pad_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_convnext_convnext_2_dwconv_Pad_pad_amount, .dataSize=24}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="scheme", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 3}}}} }; const char* inputs__sentence_encoder_convnext_convnext_2_dwconv_Pad[] = { "_sentence_encoder_convnext_convnext_2_Mul_output_0" }; uint32_t dimensions__sentence_encoder_convnext_convnext_2_dwconv_Pad_output_0[] = {1, 133, 64}; Qnn_Tensor_t outputs__sentence_encoder_convnext_convnext_2_dwconv_Pad[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_2_dwconv_Pad_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000551625162188f, .offset= -29704}}}, .rank= 3, .dimensions=dimensions__sentence_encoder_convnext_convnext_2_dwconv_Pad_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_convnext_convnext_2_dwconv_Pad", // Node Name "qti.aisw", // Package Name "Pad", // Qnn Node Type params__sentence_encoder_convnext_convnext_2_dwconv_Pad, // Node Params 2, // Num Node Params inputs__sentence_encoder_convnext_convnext_2_dwconv_Pad, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_convnext_convnext_2_dwconv_Pad, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_convnext_convnext_2_dwconv_Pad_output_0_ncf(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_convnext_convnext_2_dwconv_Pad_output_0_ncf */ uint32_t dimensions__sentence_encoder_convnext_convnext_2_dwconv_Pad_output_0_ncf_perm[] = {3}; uint32_t _sentence_encoder_convnext_convnext_2_dwconv_Pad_output_0_ncf_perm[] = {0, 2, 1}; Qnn_Param_t params__sentence_encoder_convnext_convnext_2_dwconv_Pad_output_0_ncf[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_2_dwconv_Pad_output_0_ncf_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__sentence_encoder_convnext_convnext_2_dwconv_Pad_output_0_ncf_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_convnext_convnext_2_dwconv_Pad_output_0_ncf_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__sentence_encoder_convnext_convnext_2_dwconv_Pad_output_0_ncf[] = { "_sentence_encoder_convnext_convnext_2_dwconv_Pad_output_0" }; uint32_t dimensions__sentence_encoder_convnext_convnext_2_dwconv_Pad_output_0_ncf[] = {1, 64, 133}; Qnn_Tensor_t outputs__sentence_encoder_convnext_convnext_2_dwconv_Pad_output_0_ncf[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_2_dwconv_Pad_output_0_ncf", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000551625162188f, .offset= -29704}}}, .rank= 3, .dimensions=dimensions__sentence_encoder_convnext_convnext_2_dwconv_Pad_output_0_ncf, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_convnext_convnext_2_dwconv_Pad_output_0_ncf", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__sentence_encoder_convnext_convnext_2_dwconv_Pad_output_0_ncf, // Node Params 1, // Num Node Params inputs__sentence_encoder_convnext_convnext_2_dwconv_Pad_output_0_ncf, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_convnext_convnext_2_dwconv_Pad_output_0_ncf, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_convnext_convnext_2_dwconv_Conv_reshape_to_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_convnext_convnext_2_dwconv_Conv_reshape_to_2d */ const char* inputs__sentence_encoder_convnext_convnext_2_dwconv_Conv_reshape_to_2d[] = { "_sentence_encoder_convnext_convnext_2_dwconv_Pad_output_0_ncf" }; uint32_t dimensions__sentence_encoder_convnext_convnext_2_dwconv_Conv_reshape_to_2d[] = {1, 64, 1, 133}; Qnn_Tensor_t outputs__sentence_encoder_convnext_convnext_2_dwconv_Conv_reshape_to_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_2_dwconv_Conv_reshape_to_2d", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000551625162188f, .offset= -29704}}}, .rank= 4, .dimensions=dimensions__sentence_encoder_convnext_convnext_2_dwconv_Conv_reshape_to_2d, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_convnext_convnext_2_dwconv_Conv_reshape_to_2d", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__sentence_encoder_convnext_convnext_2_dwconv_Conv_reshape_to_2d, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_convnext_convnext_2_dwconv_Conv_reshape_to_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_convnext_convnext_2_dwconv_Conv_reshape_to_2d_nhwc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_convnext_convnext_2_dwconv_Conv_reshape_to_2d_nhwc */ uint32_t dimensions__sentence_encoder_convnext_convnext_2_dwconv_Conv_reshape_to_2d_nhwc_perm[] = {4}; uint32_t _sentence_encoder_convnext_convnext_2_dwconv_Conv_reshape_to_2d_nhwc_perm[] = {0, 2, 3, 1}; Qnn_Param_t params__sentence_encoder_convnext_convnext_2_dwconv_Conv_reshape_to_2d_nhwc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_2_dwconv_Conv_reshape_to_2d_nhwc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__sentence_encoder_convnext_convnext_2_dwconv_Conv_reshape_to_2d_nhwc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_convnext_convnext_2_dwconv_Conv_reshape_to_2d_nhwc_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__sentence_encoder_convnext_convnext_2_dwconv_Conv_reshape_to_2d_nhwc[] = { "_sentence_encoder_convnext_convnext_2_dwconv_Conv_reshape_to_2d" }; uint32_t dimensions__sentence_encoder_convnext_convnext_2_dwconv_Conv_reshape_to_2d_nhwc[] = {1, 1, 133, 64}; Qnn_Tensor_t outputs__sentence_encoder_convnext_convnext_2_dwconv_Conv_reshape_to_2d_nhwc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_2_dwconv_Conv_reshape_to_2d_nhwc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000551625162188f, .offset= -29704}}}, .rank= 4, .dimensions=dimensions__sentence_encoder_convnext_convnext_2_dwconv_Conv_reshape_to_2d_nhwc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_convnext_convnext_2_dwconv_Conv_reshape_to_2d_nhwc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__sentence_encoder_convnext_convnext_2_dwconv_Conv_reshape_to_2d_nhwc, // Node Params 1, // Num Node Params inputs__sentence_encoder_convnext_convnext_2_dwconv_Conv_reshape_to_2d_nhwc, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_convnext_convnext_2_dwconv_Conv_reshape_to_2d_nhwc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_dp_sentence_encoder_convnext_convnext_2_dwconv_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_dp_sentence_encoder_convnext_convnext_2_dwconv_weight[] = {1, 5, 1, 64}; VALIDATE(model.addTensor("tts_dp_sentence_encoder_convnext_convnext_2_dwconv_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_dp_sentence_encoder_convnext_convnext_2_dwconv_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0052623907104135f, .offset= -130}}}, .rank= 4, .dimensions=dimensions_tts_dp_sentence_encoder_convnext_convnext_2_dwconv_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_dp_sentence_encoder_convnext_convnext_2_dwconv_weight), .dataSize=BINLEN(tts_dp_sentence_encoder_convnext_convnext_2_dwconv_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_dp_sentence_encoder_convnext_convnext_2_dwconv_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_dp_sentence_encoder_convnext_convnext_2_dwconv_bias[] = {64}; VALIDATE(model.addTensor("tts_dp_sentence_encoder_convnext_convnext_2_dwconv_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_dp_sentence_encoder_convnext_convnext_2_dwconv_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0008560911519453f, .offset= -124}}}, .rank= 1, .dimensions=dimensions_tts_dp_sentence_encoder_convnext_convnext_2_dwconv_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_dp_sentence_encoder_convnext_convnext_2_dwconv_bias), .dataSize=BINLEN(tts_dp_sentence_encoder_convnext_convnext_2_dwconv_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__sentence_encoder_convnext_convnext_2_dwconv_Conv_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_convnext_convnext_2_dwconv_Conv_2d */ uint32_t dimensions__sentence_encoder_convnext_convnext_2_dwconv_Conv_2d_dilation[] = {2}; uint32_t _sentence_encoder_convnext_convnext_2_dwconv_Conv_2d_dilation[] = {1, 1}; uint32_t dimensions__sentence_encoder_convnext_convnext_2_dwconv_Conv_2d_pad_amount[] = {2, 2}; uint32_t _sentence_encoder_convnext_convnext_2_dwconv_Conv_2d_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions__sentence_encoder_convnext_convnext_2_dwconv_Conv_2d_stride[] = {2}; uint32_t _sentence_encoder_convnext_convnext_2_dwconv_Conv_2d_stride[] = {1, 1}; Qnn_Param_t params__sentence_encoder_convnext_convnext_2_dwconv_Conv_2d[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_2_dwconv_Conv_2d_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__sentence_encoder_convnext_convnext_2_dwconv_Conv_2d_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_convnext_convnext_2_dwconv_Conv_2d_dilation, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_2_dwconv_Conv_2d_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__sentence_encoder_convnext_convnext_2_dwconv_Conv_2d_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_convnext_convnext_2_dwconv_Conv_2d_pad_amount, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_2_dwconv_Conv_2d_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__sentence_encoder_convnext_convnext_2_dwconv_Conv_2d_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_convnext_convnext_2_dwconv_Conv_2d_stride, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__sentence_encoder_convnext_convnext_2_dwconv_Conv_2d[] = { "_sentence_encoder_convnext_convnext_2_dwconv_Conv_reshape_to_2d_nhwc", "tts_dp_sentence_encoder_convnext_convnext_2_dwconv_weight", "tts_dp_sentence_encoder_convnext_convnext_2_dwconv_bias" }; uint32_t dimensions__sentence_encoder_convnext_convnext_2_dwconv_Conv_intermediate[] = {1, 1, 129, 64}; Qnn_Tensor_t outputs__sentence_encoder_convnext_convnext_2_dwconv_Conv_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_2_dwconv_Conv_intermediate", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000659081488266f, .offset= -34449}}}, .rank= 4, .dimensions=dimensions__sentence_encoder_convnext_convnext_2_dwconv_Conv_intermediate, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_convnext_convnext_2_dwconv_Conv_2d", // Node Name "qti.aisw", // Package Name "DepthWiseConv2d", // Qnn Node Type params__sentence_encoder_convnext_convnext_2_dwconv_Conv_2d, // Node Params 3, // Num Node Params inputs__sentence_encoder_convnext_convnext_2_dwconv_Conv_2d, // Input Tensor Names 3, // Num Input Tensor Names outputs__sentence_encoder_convnext_convnext_2_dwconv_Conv_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_convnext_convnext_2_dwconv_Conv_intermediate_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_convnext_convnext_2_dwconv_Conv_intermediate_nchw */ uint32_t dimensions__sentence_encoder_convnext_convnext_2_dwconv_Conv_intermediate_nchw_perm[] = {4}; uint32_t _sentence_encoder_convnext_convnext_2_dwconv_Conv_intermediate_nchw_perm[] = {0, 3, 1, 2}; Qnn_Param_t params__sentence_encoder_convnext_convnext_2_dwconv_Conv_intermediate_nchw[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_2_dwconv_Conv_intermediate_nchw_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__sentence_encoder_convnext_convnext_2_dwconv_Conv_intermediate_nchw_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_convnext_convnext_2_dwconv_Conv_intermediate_nchw_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__sentence_encoder_convnext_convnext_2_dwconv_Conv_intermediate_nchw[] = { "_sentence_encoder_convnext_convnext_2_dwconv_Conv_intermediate" }; uint32_t dimensions__sentence_encoder_convnext_convnext_2_dwconv_Conv_intermediate_nchw[] = {1, 64, 1, 129}; Qnn_Tensor_t outputs__sentence_encoder_convnext_convnext_2_dwconv_Conv_intermediate_nchw[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_2_dwconv_Conv_intermediate_nchw", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000659081488266f, .offset= -34449}}}, .rank= 4, .dimensions=dimensions__sentence_encoder_convnext_convnext_2_dwconv_Conv_intermediate_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_convnext_convnext_2_dwconv_Conv_intermediate_nchw", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__sentence_encoder_convnext_convnext_2_dwconv_Conv_intermediate_nchw, // Node Params 1, // Num Node Params inputs__sentence_encoder_convnext_convnext_2_dwconv_Conv_intermediate_nchw, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_convnext_convnext_2_dwconv_Conv_intermediate_nchw, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_convnext_convnext_2_dwconv_Conv_intermediate(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_convnext_convnext_2_dwconv_Conv_intermediate */ const char* inputs__sentence_encoder_convnext_convnext_2_dwconv_Conv_intermediate[] = { "_sentence_encoder_convnext_convnext_2_dwconv_Conv_intermediate_nchw" }; uint32_t dimensions__sentence_encoder_convnext_convnext_2_dwconv_Conv_output_0[] = {1, 64, 129}; Qnn_Tensor_t outputs__sentence_encoder_convnext_convnext_2_dwconv_Conv_intermediate[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_2_dwconv_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000659081488266f, .offset= -34449}}}, .rank= 3, .dimensions=dimensions__sentence_encoder_convnext_convnext_2_dwconv_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_convnext_convnext_2_dwconv_Conv_intermediate", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__sentence_encoder_convnext_convnext_2_dwconv_Conv_intermediate, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_convnext_convnext_2_dwconv_Conv_intermediate, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_convnext_convnext_2_dwconv_Conv_output_0_nfc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_convnext_convnext_2_dwconv_Conv_output_0_nfc */ uint32_t dimensions__sentence_encoder_convnext_convnext_2_dwconv_Conv_output_0_nfc_perm[] = {3}; uint32_t _sentence_encoder_convnext_convnext_2_dwconv_Conv_output_0_nfc_perm[] = {0, 2, 1}; Qnn_Param_t params__sentence_encoder_convnext_convnext_2_dwconv_Conv_output_0_nfc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_2_dwconv_Conv_output_0_nfc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__sentence_encoder_convnext_convnext_2_dwconv_Conv_output_0_nfc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_convnext_convnext_2_dwconv_Conv_output_0_nfc_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__sentence_encoder_convnext_convnext_2_dwconv_Conv_output_0_nfc[] = { "_sentence_encoder_convnext_convnext_2_dwconv_Conv_output_0" }; uint32_t dimensions__sentence_encoder_convnext_convnext_2_dwconv_Conv_output_0_nfc[] = {1, 129, 64}; Qnn_Tensor_t outputs__sentence_encoder_convnext_convnext_2_dwconv_Conv_output_0_nfc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_2_dwconv_Conv_output_0_nfc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000659081488266f, .offset= -34449}}}, .rank= 3, .dimensions=dimensions__sentence_encoder_convnext_convnext_2_dwconv_Conv_output_0_nfc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_convnext_convnext_2_dwconv_Conv_output_0_nfc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__sentence_encoder_convnext_convnext_2_dwconv_Conv_output_0_nfc, // Node Params 1, // Num Node Params inputs__sentence_encoder_convnext_convnext_2_dwconv_Conv_output_0_nfc, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_convnext_convnext_2_dwconv_Conv_output_0_nfc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_convnext_convnext_2_Mul_1(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_convnext_convnext_2_Mul_1 */ Qnn_Param_t params__sentence_encoder_convnext_convnext_2_Mul_1[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__sentence_encoder_convnext_convnext_2_Mul_1[] = { "_sentence_encoder_convnext_convnext_2_dwconv_Conv_output_0_nfc", "_sentence_encoder_Concat_2_output_0" }; uint32_t dimensions__sentence_encoder_convnext_convnext_2_norm_Transpose_output_0[] = {1, 129, 64}; Qnn_Tensor_t outputs__sentence_encoder_convnext_convnext_2_Mul_1[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_2_norm_Transpose_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000659081488266f, .offset= -34449}}}, .rank= 3, .dimensions=dimensions__sentence_encoder_convnext_convnext_2_norm_Transpose_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_convnext_convnext_2_Mul_1", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__sentence_encoder_convnext_convnext_2_Mul_1, // Node Params 1, // Num Node Params inputs__sentence_encoder_convnext_convnext_2_Mul_1, // Input Tensor Names 2, // Num Input Tensor Names outputs__sentence_encoder_convnext_convnext_2_Mul_1, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_dp_sentence_encoder_convnext_convnext_2_norm_norm_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_dp_sentence_encoder_convnext_convnext_2_norm_norm_weight[] = {64}; VALIDATE(model.addTensor("tts_dp_sentence_encoder_convnext_convnext_2_norm_norm_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_dp_sentence_encoder_convnext_convnext_2_norm_norm_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0036789409350604f, .offset= 0}}}, .rank= 1, .dimensions=dimensions_tts_dp_sentence_encoder_convnext_convnext_2_norm_norm_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_dp_sentence_encoder_convnext_convnext_2_norm_norm_weight), .dataSize=BINLEN(tts_dp_sentence_encoder_convnext_convnext_2_norm_norm_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_dp_sentence_encoder_convnext_convnext_2_norm_norm_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_dp_sentence_encoder_convnext_convnext_2_norm_norm_bias[] = {64}; VALIDATE(model.addTensor("tts_dp_sentence_encoder_convnext_convnext_2_norm_norm_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_dp_sentence_encoder_convnext_convnext_2_norm_norm_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0014227550709620f, .offset= -120}}}, .rank= 1, .dimensions=dimensions_tts_dp_sentence_encoder_convnext_convnext_2_norm_norm_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_dp_sentence_encoder_convnext_convnext_2_norm_norm_bias), .dataSize=BINLEN(tts_dp_sentence_encoder_convnext_convnext_2_norm_norm_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__sentence_encoder_convnext_convnext_2_norm_norm_LayerNormalization(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_convnext_convnext_2_norm_norm_LayerNormalization */ uint32_t dimensions__sentence_encoder_convnext_convnext_2_norm_norm_LayerNormalization_axes[] = {1}; uint32_t _sentence_encoder_convnext_convnext_2_norm_norm_LayerNormalization_axes[] = {2}; Qnn_Param_t params__sentence_encoder_convnext_convnext_2_norm_norm_LayerNormalization[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="axes", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_2_norm_norm_LayerNormalization_axes", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__sentence_encoder_convnext_convnext_2_norm_norm_LayerNormalization_axes, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_convnext_convnext_2_norm_norm_LayerNormalization_axes, .dataSize=4}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="epsilon", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_FLOAT_32, {.floatValue = 0.000001000000f}}}} }; const char* inputs__sentence_encoder_convnext_convnext_2_norm_norm_LayerNormalization[] = { "_sentence_encoder_convnext_convnext_2_norm_Transpose_output_0", "tts_dp_sentence_encoder_convnext_convnext_2_norm_norm_weight", "tts_dp_sentence_encoder_convnext_convnext_2_norm_norm_bias" }; uint32_t dimensions__sentence_encoder_convnext_convnext_2_norm_Transpose_1_output_0[] = {1, 129, 64}; Qnn_Tensor_t outputs__sentence_encoder_convnext_convnext_2_norm_norm_LayerNormalization[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_2_norm_Transpose_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000906725981622f, .offset= -40663}}}, .rank= 3, .dimensions=dimensions__sentence_encoder_convnext_convnext_2_norm_Transpose_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_convnext_convnext_2_norm_norm_LayerNormalization", // Node Name "qti.aisw", // Package Name "LayerNorm", // Qnn Node Type params__sentence_encoder_convnext_convnext_2_norm_norm_LayerNormalization, // Node Params 2, // Num Node Params inputs__sentence_encoder_convnext_convnext_2_norm_norm_LayerNormalization, // Input Tensor Names 3, // Num Input Tensor Names outputs__sentence_encoder_convnext_convnext_2_norm_norm_LayerNormalization, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_convnext_convnext_2_norm_Transpose_1_output_0_ncf(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_convnext_convnext_2_norm_Transpose_1_output_0_ncf */ uint32_t dimensions__sentence_encoder_convnext_convnext_2_norm_Transpose_1_output_0_ncf_perm[] = {3}; uint32_t _sentence_encoder_convnext_convnext_2_norm_Transpose_1_output_0_ncf_perm[] = {0, 2, 1}; Qnn_Param_t params__sentence_encoder_convnext_convnext_2_norm_Transpose_1_output_0_ncf[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_2_norm_Transpose_1_output_0_ncf_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__sentence_encoder_convnext_convnext_2_norm_Transpose_1_output_0_ncf_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_convnext_convnext_2_norm_Transpose_1_output_0_ncf_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__sentence_encoder_convnext_convnext_2_norm_Transpose_1_output_0_ncf[] = { "_sentence_encoder_convnext_convnext_2_norm_Transpose_1_output_0" }; uint32_t dimensions__sentence_encoder_convnext_convnext_2_norm_Transpose_1_output_0_ncf[] = {1, 64, 129}; Qnn_Tensor_t outputs__sentence_encoder_convnext_convnext_2_norm_Transpose_1_output_0_ncf[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_2_norm_Transpose_1_output_0_ncf", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000906725981622f, .offset= -40663}}}, .rank= 3, .dimensions=dimensions__sentence_encoder_convnext_convnext_2_norm_Transpose_1_output_0_ncf, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_convnext_convnext_2_norm_Transpose_1_output_0_ncf", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__sentence_encoder_convnext_convnext_2_norm_Transpose_1_output_0_ncf, // Node Params 1, // Num Node Params inputs__sentence_encoder_convnext_convnext_2_norm_Transpose_1_output_0_ncf, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_convnext_convnext_2_norm_Transpose_1_output_0_ncf, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_convnext_convnext_2_pwconv1_Conv_reshape_to_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_convnext_convnext_2_pwconv1_Conv_reshape_to_2d */ const char* inputs__sentence_encoder_convnext_convnext_2_pwconv1_Conv_reshape_to_2d[] = { "_sentence_encoder_convnext_convnext_2_norm_Transpose_1_output_0_ncf" }; uint32_t dimensions__sentence_encoder_convnext_convnext_2_pwconv1_Conv_reshape_to_2d[] = {1, 64, 1, 129}; Qnn_Tensor_t outputs__sentence_encoder_convnext_convnext_2_pwconv1_Conv_reshape_to_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_2_pwconv1_Conv_reshape_to_2d", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000906725981622f, .offset= -40663}}}, .rank= 4, .dimensions=dimensions__sentence_encoder_convnext_convnext_2_pwconv1_Conv_reshape_to_2d, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_convnext_convnext_2_pwconv1_Conv_reshape_to_2d", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__sentence_encoder_convnext_convnext_2_pwconv1_Conv_reshape_to_2d, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_convnext_convnext_2_pwconv1_Conv_reshape_to_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_convnext_convnext_2_pwconv1_Conv_reshape_to_2d_nhwc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_convnext_convnext_2_pwconv1_Conv_reshape_to_2d_nhwc */ uint32_t dimensions__sentence_encoder_convnext_convnext_2_pwconv1_Conv_reshape_to_2d_nhwc_perm[] = {4}; uint32_t _sentence_encoder_convnext_convnext_2_pwconv1_Conv_reshape_to_2d_nhwc_perm[] = {0, 2, 3, 1}; Qnn_Param_t params__sentence_encoder_convnext_convnext_2_pwconv1_Conv_reshape_to_2d_nhwc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_2_pwconv1_Conv_reshape_to_2d_nhwc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__sentence_encoder_convnext_convnext_2_pwconv1_Conv_reshape_to_2d_nhwc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_convnext_convnext_2_pwconv1_Conv_reshape_to_2d_nhwc_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__sentence_encoder_convnext_convnext_2_pwconv1_Conv_reshape_to_2d_nhwc[] = { "_sentence_encoder_convnext_convnext_2_pwconv1_Conv_reshape_to_2d" }; uint32_t dimensions__sentence_encoder_convnext_convnext_2_pwconv1_Conv_reshape_to_2d_nhwc[] = {1, 1, 129, 64}; Qnn_Tensor_t outputs__sentence_encoder_convnext_convnext_2_pwconv1_Conv_reshape_to_2d_nhwc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_2_pwconv1_Conv_reshape_to_2d_nhwc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000906725981622f, .offset= -40663}}}, .rank= 4, .dimensions=dimensions__sentence_encoder_convnext_convnext_2_pwconv1_Conv_reshape_to_2d_nhwc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_convnext_convnext_2_pwconv1_Conv_reshape_to_2d_nhwc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__sentence_encoder_convnext_convnext_2_pwconv1_Conv_reshape_to_2d_nhwc, // Node Params 1, // Num Node Params inputs__sentence_encoder_convnext_convnext_2_pwconv1_Conv_reshape_to_2d_nhwc, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_convnext_convnext_2_pwconv1_Conv_reshape_to_2d_nhwc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_dp_sentence_encoder_convnext_convnext_2_pwconv1_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_dp_sentence_encoder_convnext_convnext_2_pwconv1_weight[] = {1, 1, 64, 256}; VALIDATE(model.addTensor("tts_dp_sentence_encoder_convnext_convnext_2_pwconv1_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_dp_sentence_encoder_convnext_convnext_2_pwconv1_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0045686503872275f, .offset= -127}}}, .rank= 4, .dimensions=dimensions_tts_dp_sentence_encoder_convnext_convnext_2_pwconv1_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_dp_sentence_encoder_convnext_convnext_2_pwconv1_weight), .dataSize=BINLEN(tts_dp_sentence_encoder_convnext_convnext_2_pwconv1_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_dp_sentence_encoder_convnext_convnext_2_pwconv1_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_dp_sentence_encoder_convnext_convnext_2_pwconv1_bias[] = {256}; VALIDATE(model.addTensor("tts_dp_sentence_encoder_convnext_convnext_2_pwconv1_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_dp_sentence_encoder_convnext_convnext_2_pwconv1_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0020407955162227f, .offset= -234}}}, .rank= 1, .dimensions=dimensions_tts_dp_sentence_encoder_convnext_convnext_2_pwconv1_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_dp_sentence_encoder_convnext_convnext_2_pwconv1_bias), .dataSize=BINLEN(tts_dp_sentence_encoder_convnext_convnext_2_pwconv1_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__sentence_encoder_convnext_convnext_2_pwconv1_Conv_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_convnext_convnext_2_pwconv1_Conv_2d */ uint32_t dimensions__sentence_encoder_convnext_convnext_2_pwconv1_Conv_2d_dilation[] = {2}; uint32_t _sentence_encoder_convnext_convnext_2_pwconv1_Conv_2d_dilation[] = {1, 1}; uint32_t dimensions__sentence_encoder_convnext_convnext_2_pwconv1_Conv_2d_pad_amount[] = {2, 2}; uint32_t _sentence_encoder_convnext_convnext_2_pwconv1_Conv_2d_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions__sentence_encoder_convnext_convnext_2_pwconv1_Conv_2d_stride[] = {2}; uint32_t _sentence_encoder_convnext_convnext_2_pwconv1_Conv_2d_stride[] = {1, 1}; Qnn_Param_t params__sentence_encoder_convnext_convnext_2_pwconv1_Conv_2d[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_2_pwconv1_Conv_2d_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__sentence_encoder_convnext_convnext_2_pwconv1_Conv_2d_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_convnext_convnext_2_pwconv1_Conv_2d_dilation, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_2_pwconv1_Conv_2d_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__sentence_encoder_convnext_convnext_2_pwconv1_Conv_2d_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_convnext_convnext_2_pwconv1_Conv_2d_pad_amount, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_2_pwconv1_Conv_2d_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__sentence_encoder_convnext_convnext_2_pwconv1_Conv_2d_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_convnext_convnext_2_pwconv1_Conv_2d_stride, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="group", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="reuse_sparse_indices", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}} }; const char* inputs__sentence_encoder_convnext_convnext_2_pwconv1_Conv_2d[] = { "_sentence_encoder_convnext_convnext_2_pwconv1_Conv_reshape_to_2d_nhwc", "tts_dp_sentence_encoder_convnext_convnext_2_pwconv1_weight", "tts_dp_sentence_encoder_convnext_convnext_2_pwconv1_bias" }; uint32_t dimensions__sentence_encoder_convnext_convnext_2_pwconv1_Conv_intermediate[] = {1, 1, 129, 256}; Qnn_Tensor_t outputs__sentence_encoder_convnext_convnext_2_pwconv1_Conv_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_2_pwconv1_Conv_intermediate", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000938654266065f, .offset= -42858}}}, .rank= 4, .dimensions=dimensions__sentence_encoder_convnext_convnext_2_pwconv1_Conv_intermediate, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_convnext_convnext_2_pwconv1_Conv_2d", // Node Name "qti.aisw", // Package Name "Conv2d", // Qnn Node Type params__sentence_encoder_convnext_convnext_2_pwconv1_Conv_2d, // Node Params 5, // Num Node Params inputs__sentence_encoder_convnext_convnext_2_pwconv1_Conv_2d, // Input Tensor Names 3, // Num Input Tensor Names outputs__sentence_encoder_convnext_convnext_2_pwconv1_Conv_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_convnext_convnext_2_pwconv1_Conv_intermediate_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_convnext_convnext_2_pwconv1_Conv_intermediate_nchw */ uint32_t dimensions__sentence_encoder_convnext_convnext_2_pwconv1_Conv_intermediate_nchw_perm[] = {4}; uint32_t _sentence_encoder_convnext_convnext_2_pwconv1_Conv_intermediate_nchw_perm[] = {0, 3, 1, 2}; Qnn_Param_t params__sentence_encoder_convnext_convnext_2_pwconv1_Conv_intermediate_nchw[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_2_pwconv1_Conv_intermediate_nchw_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__sentence_encoder_convnext_convnext_2_pwconv1_Conv_intermediate_nchw_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_convnext_convnext_2_pwconv1_Conv_intermediate_nchw_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__sentence_encoder_convnext_convnext_2_pwconv1_Conv_intermediate_nchw[] = { "_sentence_encoder_convnext_convnext_2_pwconv1_Conv_intermediate" }; uint32_t dimensions__sentence_encoder_convnext_convnext_2_pwconv1_Conv_intermediate_nchw[] = {1, 256, 1, 129}; Qnn_Tensor_t outputs__sentence_encoder_convnext_convnext_2_pwconv1_Conv_intermediate_nchw[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_2_pwconv1_Conv_intermediate_nchw", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000938654266065f, .offset= -42858}}}, .rank= 4, .dimensions=dimensions__sentence_encoder_convnext_convnext_2_pwconv1_Conv_intermediate_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_convnext_convnext_2_pwconv1_Conv_intermediate_nchw", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__sentence_encoder_convnext_convnext_2_pwconv1_Conv_intermediate_nchw, // Node Params 1, // Num Node Params inputs__sentence_encoder_convnext_convnext_2_pwconv1_Conv_intermediate_nchw, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_convnext_convnext_2_pwconv1_Conv_intermediate_nchw, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_convnext_convnext_2_pwconv1_Conv_intermediate(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_convnext_convnext_2_pwconv1_Conv_intermediate */ const char* inputs__sentence_encoder_convnext_convnext_2_pwconv1_Conv_intermediate[] = { "_sentence_encoder_convnext_convnext_2_pwconv1_Conv_intermediate_nchw" }; uint32_t dimensions__sentence_encoder_convnext_convnext_2_pwconv1_Conv_output_0[] = {1, 256, 129}; Qnn_Tensor_t outputs__sentence_encoder_convnext_convnext_2_pwconv1_Conv_intermediate[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_2_pwconv1_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000938654266065f, .offset= -42858}}}, .rank= 3, .dimensions=dimensions__sentence_encoder_convnext_convnext_2_pwconv1_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_convnext_convnext_2_pwconv1_Conv_intermediate", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__sentence_encoder_convnext_convnext_2_pwconv1_Conv_intermediate, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_convnext_convnext_2_pwconv1_Conv_intermediate, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__elementwiseneuron_6(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _elementwiseneuron_6 */ Qnn_Param_t params__elementwiseneuron_6[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}} }; const char* inputs__elementwiseneuron_6[] = { "_sentence_encoder_convnext_convnext_2_pwconv1_Conv_output_0" }; uint32_t dimensions__sentence_encoder_convnext_convnext_2_act_Mul_1_output_0[] = {1, 256, 129}; Qnn_Tensor_t outputs__elementwiseneuron_6[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_2_act_Mul_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000345328371623f, .offset= -4922}}}, .rank= 3, .dimensions=dimensions__sentence_encoder_convnext_convnext_2_act_Mul_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_elementwiseneuron_6", // Node Name "qti.aisw", // Package Name "ElementWiseNeuron", // Qnn Node Type params__elementwiseneuron_6, // Node Params 1, // Num Node Params inputs__elementwiseneuron_6, // Input Tensor Names 1, // Num Input Tensor Names outputs__elementwiseneuron_6, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_convnext_convnext_2_pwconv2_Conv_reshape_to_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_convnext_convnext_2_pwconv2_Conv_reshape_to_2d */ const char* inputs__sentence_encoder_convnext_convnext_2_pwconv2_Conv_reshape_to_2d[] = { "_sentence_encoder_convnext_convnext_2_act_Mul_1_output_0" }; uint32_t dimensions__sentence_encoder_convnext_convnext_2_pwconv2_Conv_reshape_to_2d[] = {1, 256, 1, 129}; Qnn_Tensor_t outputs__sentence_encoder_convnext_convnext_2_pwconv2_Conv_reshape_to_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_2_pwconv2_Conv_reshape_to_2d", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000345328371623f, .offset= -4922}}}, .rank= 4, .dimensions=dimensions__sentence_encoder_convnext_convnext_2_pwconv2_Conv_reshape_to_2d, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_convnext_convnext_2_pwconv2_Conv_reshape_to_2d", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__sentence_encoder_convnext_convnext_2_pwconv2_Conv_reshape_to_2d, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_convnext_convnext_2_pwconv2_Conv_reshape_to_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_convnext_convnext_2_pwconv2_Conv_reshape_to_2d_nhwc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_convnext_convnext_2_pwconv2_Conv_reshape_to_2d_nhwc */ uint32_t dimensions__sentence_encoder_convnext_convnext_2_pwconv2_Conv_reshape_to_2d_nhwc_perm[] = {4}; uint32_t _sentence_encoder_convnext_convnext_2_pwconv2_Conv_reshape_to_2d_nhwc_perm[] = {0, 2, 3, 1}; Qnn_Param_t params__sentence_encoder_convnext_convnext_2_pwconv2_Conv_reshape_to_2d_nhwc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_2_pwconv2_Conv_reshape_to_2d_nhwc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__sentence_encoder_convnext_convnext_2_pwconv2_Conv_reshape_to_2d_nhwc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_convnext_convnext_2_pwconv2_Conv_reshape_to_2d_nhwc_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__sentence_encoder_convnext_convnext_2_pwconv2_Conv_reshape_to_2d_nhwc[] = { "_sentence_encoder_convnext_convnext_2_pwconv2_Conv_reshape_to_2d" }; uint32_t dimensions__sentence_encoder_convnext_convnext_2_pwconv2_Conv_reshape_to_2d_nhwc[] = {1, 1, 129, 256}; Qnn_Tensor_t outputs__sentence_encoder_convnext_convnext_2_pwconv2_Conv_reshape_to_2d_nhwc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_2_pwconv2_Conv_reshape_to_2d_nhwc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000345328371623f, .offset= -4922}}}, .rank= 4, .dimensions=dimensions__sentence_encoder_convnext_convnext_2_pwconv2_Conv_reshape_to_2d_nhwc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_convnext_convnext_2_pwconv2_Conv_reshape_to_2d_nhwc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__sentence_encoder_convnext_convnext_2_pwconv2_Conv_reshape_to_2d_nhwc, // Node Params 1, // Num Node Params inputs__sentence_encoder_convnext_convnext_2_pwconv2_Conv_reshape_to_2d_nhwc, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_convnext_convnext_2_pwconv2_Conv_reshape_to_2d_nhwc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_dp_sentence_encoder_convnext_convnext_2_pwconv2_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_dp_sentence_encoder_convnext_convnext_2_pwconv2_weight[] = {1, 1, 256, 64}; VALIDATE(model.addTensor("tts_dp_sentence_encoder_convnext_convnext_2_pwconv2_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_dp_sentence_encoder_convnext_convnext_2_pwconv2_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0031105158850551f, .offset= -121}}}, .rank= 4, .dimensions=dimensions_tts_dp_sentence_encoder_convnext_convnext_2_pwconv2_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_dp_sentence_encoder_convnext_convnext_2_pwconv2_weight), .dataSize=BINLEN(tts_dp_sentence_encoder_convnext_convnext_2_pwconv2_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_dp_sentence_encoder_convnext_convnext_2_pwconv2_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_dp_sentence_encoder_convnext_convnext_2_pwconv2_bias[] = {64}; VALIDATE(model.addTensor("tts_dp_sentence_encoder_convnext_convnext_2_pwconv2_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_dp_sentence_encoder_convnext_convnext_2_pwconv2_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0012834523804486f, .offset= -111}}}, .rank= 1, .dimensions=dimensions_tts_dp_sentence_encoder_convnext_convnext_2_pwconv2_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_dp_sentence_encoder_convnext_convnext_2_pwconv2_bias), .dataSize=BINLEN(tts_dp_sentence_encoder_convnext_convnext_2_pwconv2_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__sentence_encoder_convnext_convnext_2_pwconv2_Conv_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_convnext_convnext_2_pwconv2_Conv_2d */ uint32_t dimensions__sentence_encoder_convnext_convnext_2_pwconv2_Conv_2d_dilation[] = {2}; uint32_t _sentence_encoder_convnext_convnext_2_pwconv2_Conv_2d_dilation[] = {1, 1}; uint32_t dimensions__sentence_encoder_convnext_convnext_2_pwconv2_Conv_2d_pad_amount[] = {2, 2}; uint32_t _sentence_encoder_convnext_convnext_2_pwconv2_Conv_2d_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions__sentence_encoder_convnext_convnext_2_pwconv2_Conv_2d_stride[] = {2}; uint32_t _sentence_encoder_convnext_convnext_2_pwconv2_Conv_2d_stride[] = {1, 1}; Qnn_Param_t params__sentence_encoder_convnext_convnext_2_pwconv2_Conv_2d[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_2_pwconv2_Conv_2d_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__sentence_encoder_convnext_convnext_2_pwconv2_Conv_2d_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_convnext_convnext_2_pwconv2_Conv_2d_dilation, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_2_pwconv2_Conv_2d_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__sentence_encoder_convnext_convnext_2_pwconv2_Conv_2d_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_convnext_convnext_2_pwconv2_Conv_2d_pad_amount, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_2_pwconv2_Conv_2d_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__sentence_encoder_convnext_convnext_2_pwconv2_Conv_2d_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_convnext_convnext_2_pwconv2_Conv_2d_stride, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="group", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="reuse_sparse_indices", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}} }; const char* inputs__sentence_encoder_convnext_convnext_2_pwconv2_Conv_2d[] = { "_sentence_encoder_convnext_convnext_2_pwconv2_Conv_reshape_to_2d_nhwc", "tts_dp_sentence_encoder_convnext_convnext_2_pwconv2_weight", "tts_dp_sentence_encoder_convnext_convnext_2_pwconv2_bias" }; uint32_t dimensions__sentence_encoder_convnext_convnext_2_pwconv2_Conv_intermediate[] = {1, 1, 129, 64}; Qnn_Tensor_t outputs__sentence_encoder_convnext_convnext_2_pwconv2_Conv_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_2_pwconv2_Conv_intermediate", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000687881620252f, .offset= -28358}}}, .rank= 4, .dimensions=dimensions__sentence_encoder_convnext_convnext_2_pwconv2_Conv_intermediate, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_convnext_convnext_2_pwconv2_Conv_2d", // Node Name "qti.aisw", // Package Name "Conv2d", // Qnn Node Type params__sentence_encoder_convnext_convnext_2_pwconv2_Conv_2d, // Node Params 5, // Num Node Params inputs__sentence_encoder_convnext_convnext_2_pwconv2_Conv_2d, // Input Tensor Names 3, // Num Input Tensor Names outputs__sentence_encoder_convnext_convnext_2_pwconv2_Conv_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_convnext_convnext_2_pwconv2_Conv_intermediate_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_convnext_convnext_2_pwconv2_Conv_intermediate_nchw */ uint32_t dimensions__sentence_encoder_convnext_convnext_2_pwconv2_Conv_intermediate_nchw_perm[] = {4}; uint32_t _sentence_encoder_convnext_convnext_2_pwconv2_Conv_intermediate_nchw_perm[] = {0, 3, 1, 2}; Qnn_Param_t params__sentence_encoder_convnext_convnext_2_pwconv2_Conv_intermediate_nchw[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_2_pwconv2_Conv_intermediate_nchw_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__sentence_encoder_convnext_convnext_2_pwconv2_Conv_intermediate_nchw_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_convnext_convnext_2_pwconv2_Conv_intermediate_nchw_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__sentence_encoder_convnext_convnext_2_pwconv2_Conv_intermediate_nchw[] = { "_sentence_encoder_convnext_convnext_2_pwconv2_Conv_intermediate" }; uint32_t dimensions__sentence_encoder_convnext_convnext_2_pwconv2_Conv_intermediate_nchw[] = {1, 64, 1, 129}; Qnn_Tensor_t outputs__sentence_encoder_convnext_convnext_2_pwconv2_Conv_intermediate_nchw[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_2_pwconv2_Conv_intermediate_nchw", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000687881620252f, .offset= -28358}}}, .rank= 4, .dimensions=dimensions__sentence_encoder_convnext_convnext_2_pwconv2_Conv_intermediate_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_convnext_convnext_2_pwconv2_Conv_intermediate_nchw", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__sentence_encoder_convnext_convnext_2_pwconv2_Conv_intermediate_nchw, // Node Params 1, // Num Node Params inputs__sentence_encoder_convnext_convnext_2_pwconv2_Conv_intermediate_nchw, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_convnext_convnext_2_pwconv2_Conv_intermediate_nchw, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_convnext_convnext_2_pwconv2_Conv_intermediate(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_convnext_convnext_2_pwconv2_Conv_intermediate */ const char* inputs__sentence_encoder_convnext_convnext_2_pwconv2_Conv_intermediate[] = { "_sentence_encoder_convnext_convnext_2_pwconv2_Conv_intermediate_nchw" }; uint32_t dimensions__sentence_encoder_convnext_convnext_2_pwconv2_Conv_output_0[] = {1, 64, 129}; Qnn_Tensor_t outputs__sentence_encoder_convnext_convnext_2_pwconv2_Conv_intermediate[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_2_pwconv2_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000687881620252f, .offset= -28358}}}, .rank= 3, .dimensions=dimensions__sentence_encoder_convnext_convnext_2_pwconv2_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_convnext_convnext_2_pwconv2_Conv_intermediate", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__sentence_encoder_convnext_convnext_2_pwconv2_Conv_intermediate, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_convnext_convnext_2_pwconv2_Conv_intermediate, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_convnext_convnext_2_pwconv2_Conv_output_0_nfc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_convnext_convnext_2_pwconv2_Conv_output_0_nfc */ uint32_t dimensions__sentence_encoder_convnext_convnext_2_pwconv2_Conv_output_0_nfc_perm[] = {3}; uint32_t _sentence_encoder_convnext_convnext_2_pwconv2_Conv_output_0_nfc_perm[] = {0, 2, 1}; Qnn_Param_t params__sentence_encoder_convnext_convnext_2_pwconv2_Conv_output_0_nfc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_2_pwconv2_Conv_output_0_nfc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__sentence_encoder_convnext_convnext_2_pwconv2_Conv_output_0_nfc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_convnext_convnext_2_pwconv2_Conv_output_0_nfc_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__sentence_encoder_convnext_convnext_2_pwconv2_Conv_output_0_nfc[] = { "_sentence_encoder_convnext_convnext_2_pwconv2_Conv_output_0" }; uint32_t dimensions__sentence_encoder_convnext_convnext_2_pwconv2_Conv_output_0_nfc[] = {1, 129, 64}; Qnn_Tensor_t outputs__sentence_encoder_convnext_convnext_2_pwconv2_Conv_output_0_nfc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_2_pwconv2_Conv_output_0_nfc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000687881620252f, .offset= -28358}}}, .rank= 3, .dimensions=dimensions__sentence_encoder_convnext_convnext_2_pwconv2_Conv_output_0_nfc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_convnext_convnext_2_pwconv2_Conv_output_0_nfc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__sentence_encoder_convnext_convnext_2_pwconv2_Conv_output_0_nfc, // Node Params 1, // Num Node Params inputs__sentence_encoder_convnext_convnext_2_pwconv2_Conv_output_0_nfc, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_convnext_convnext_2_pwconv2_Conv_output_0_nfc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_dp_sentence_encoder_convnext_convnext_2_gamma(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_dp_sentence_encoder_convnext_convnext_2_gamma[] = {1, 1, 64}; VALIDATE(model.addTensor("tts_dp_sentence_encoder_convnext_convnext_2_gamma", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_dp_sentence_encoder_convnext_convnext_2_gamma", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000026676127618f, .offset= -2030}}}, .rank= 3, .dimensions=dimensions_tts_dp_sentence_encoder_convnext_convnext_2_gamma, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_dp_sentence_encoder_convnext_convnext_2_gamma), .dataSize=BINLEN(tts_dp_sentence_encoder_convnext_convnext_2_gamma)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__sentence_encoder_convnext_convnext_2_Mul_2(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_convnext_convnext_2_Mul_2 */ Qnn_Param_t params__sentence_encoder_convnext_convnext_2_Mul_2[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__sentence_encoder_convnext_convnext_2_Mul_2[] = { "tts_dp_sentence_encoder_convnext_convnext_2_gamma", "_sentence_encoder_convnext_convnext_2_pwconv2_Conv_output_0_nfc" }; uint32_t dimensions__sentence_encoder_convnext_convnext_2_Mul_2_output_0[] = {1, 129, 64}; Qnn_Tensor_t outputs__sentence_encoder_convnext_convnext_2_Mul_2[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_2_Mul_2_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000106483248601f, .offset= -31034}}}, .rank= 3, .dimensions=dimensions__sentence_encoder_convnext_convnext_2_Mul_2_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_convnext_convnext_2_Mul_2", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__sentence_encoder_convnext_convnext_2_Mul_2, // Node Params 1, // Num Node Params inputs__sentence_encoder_convnext_convnext_2_Mul_2, // Input Tensor Names 2, // Num Input Tensor Names outputs__sentence_encoder_convnext_convnext_2_Mul_2, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_convnext_convnext_2_Add(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_convnext_convnext_2_Add */ Qnn_Param_t params__sentence_encoder_convnext_convnext_2_Add[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}} }; const char* inputs__sentence_encoder_convnext_convnext_2_Add[] = { "_sentence_encoder_convnext_convnext_2_Mul_output_0", "_sentence_encoder_convnext_convnext_2_Mul_2_output_0" }; uint32_t dimensions__sentence_encoder_convnext_convnext_2_Add_output_0[] = {1, 129, 64}; Qnn_Tensor_t outputs__sentence_encoder_convnext_convnext_2_Add[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_2_Add_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000551753073523f, .offset= -29697}}}, .rank= 3, .dimensions=dimensions__sentence_encoder_convnext_convnext_2_Add_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_convnext_convnext_2_Add", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__sentence_encoder_convnext_convnext_2_Add, // Node Params 1, // Num Node Params inputs__sentence_encoder_convnext_convnext_2_Add, // Input Tensor Names 2, // Num Input Tensor Names outputs__sentence_encoder_convnext_convnext_2_Add, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_convnext_convnext_2_Mul_3(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_convnext_convnext_2_Mul_3 */ Qnn_Param_t params__sentence_encoder_convnext_convnext_2_Mul_3[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__sentence_encoder_convnext_convnext_2_Mul_3[] = { "_sentence_encoder_convnext_convnext_2_Add_output_0", "_sentence_encoder_Concat_2_output_0" }; uint32_t dimensions__sentence_encoder_convnext_convnext_2_Mul_3_output_0[] = {1, 129, 64}; Qnn_Tensor_t outputs__sentence_encoder_convnext_convnext_2_Mul_3[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_2_Mul_3_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000551753073523f, .offset= -29697}}}, .rank= 3, .dimensions=dimensions__sentence_encoder_convnext_convnext_2_Mul_3_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_convnext_convnext_2_Mul_3", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__sentence_encoder_convnext_convnext_2_Mul_3, // Node Params 1, // Num Node Params inputs__sentence_encoder_convnext_convnext_2_Mul_3, // Input Tensor Names 2, // Num Input Tensor Names outputs__sentence_encoder_convnext_convnext_2_Mul_3, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_convnext_convnext_3_Mul(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_convnext_convnext_3_Mul */ Qnn_Param_t params__sentence_encoder_convnext_convnext_3_Mul[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__sentence_encoder_convnext_convnext_3_Mul[] = { "_sentence_encoder_convnext_convnext_2_Mul_3_output_0", "_sentence_encoder_Concat_2_output_0" }; uint32_t dimensions__sentence_encoder_convnext_convnext_3_Mul_output_0[] = {1, 129, 64}; Qnn_Tensor_t outputs__sentence_encoder_convnext_convnext_3_Mul[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_3_Mul_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000551753073523f, .offset= -29697}}}, .rank= 3, .dimensions=dimensions__sentence_encoder_convnext_convnext_3_Mul_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_convnext_convnext_3_Mul", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__sentence_encoder_convnext_convnext_3_Mul, // Node Params 1, // Num Node Params inputs__sentence_encoder_convnext_convnext_3_Mul, // Input Tensor Names 2, // Num Input Tensor Names outputs__sentence_encoder_convnext_convnext_3_Mul, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_convnext_convnext_3_dwconv_Pad(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_convnext_convnext_3_dwconv_Pad */ uint32_t dimensions__sentence_encoder_convnext_convnext_3_dwconv_Pad_pad_amount[] = {3, 2}; uint32_t _sentence_encoder_convnext_convnext_3_dwconv_Pad_pad_amount[] = {0, 0, 2, 2, 0, 0}; Qnn_Param_t params__sentence_encoder_convnext_convnext_3_dwconv_Pad[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_3_dwconv_Pad_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__sentence_encoder_convnext_convnext_3_dwconv_Pad_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_convnext_convnext_3_dwconv_Pad_pad_amount, .dataSize=24}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="scheme", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 3}}}} }; const char* inputs__sentence_encoder_convnext_convnext_3_dwconv_Pad[] = { "_sentence_encoder_convnext_convnext_3_Mul_output_0" }; uint32_t dimensions__sentence_encoder_convnext_convnext_3_dwconv_Pad_output_0[] = {1, 133, 64}; Qnn_Tensor_t outputs__sentence_encoder_convnext_convnext_3_dwconv_Pad[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_3_dwconv_Pad_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000551753073523f, .offset= -29697}}}, .rank= 3, .dimensions=dimensions__sentence_encoder_convnext_convnext_3_dwconv_Pad_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_convnext_convnext_3_dwconv_Pad", // Node Name "qti.aisw", // Package Name "Pad", // Qnn Node Type params__sentence_encoder_convnext_convnext_3_dwconv_Pad, // Node Params 2, // Num Node Params inputs__sentence_encoder_convnext_convnext_3_dwconv_Pad, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_convnext_convnext_3_dwconv_Pad, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_convnext_convnext_3_dwconv_Pad_output_0_ncf(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_convnext_convnext_3_dwconv_Pad_output_0_ncf */ uint32_t dimensions__sentence_encoder_convnext_convnext_3_dwconv_Pad_output_0_ncf_perm[] = {3}; uint32_t _sentence_encoder_convnext_convnext_3_dwconv_Pad_output_0_ncf_perm[] = {0, 2, 1}; Qnn_Param_t params__sentence_encoder_convnext_convnext_3_dwconv_Pad_output_0_ncf[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_3_dwconv_Pad_output_0_ncf_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__sentence_encoder_convnext_convnext_3_dwconv_Pad_output_0_ncf_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_convnext_convnext_3_dwconv_Pad_output_0_ncf_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__sentence_encoder_convnext_convnext_3_dwconv_Pad_output_0_ncf[] = { "_sentence_encoder_convnext_convnext_3_dwconv_Pad_output_0" }; uint32_t dimensions__sentence_encoder_convnext_convnext_3_dwconv_Pad_output_0_ncf[] = {1, 64, 133}; Qnn_Tensor_t outputs__sentence_encoder_convnext_convnext_3_dwconv_Pad_output_0_ncf[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_3_dwconv_Pad_output_0_ncf", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000551753073523f, .offset= -29697}}}, .rank= 3, .dimensions=dimensions__sentence_encoder_convnext_convnext_3_dwconv_Pad_output_0_ncf, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_convnext_convnext_3_dwconv_Pad_output_0_ncf", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__sentence_encoder_convnext_convnext_3_dwconv_Pad_output_0_ncf, // Node Params 1, // Num Node Params inputs__sentence_encoder_convnext_convnext_3_dwconv_Pad_output_0_ncf, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_convnext_convnext_3_dwconv_Pad_output_0_ncf, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_convnext_convnext_3_dwconv_Conv_reshape_to_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_convnext_convnext_3_dwconv_Conv_reshape_to_2d */ const char* inputs__sentence_encoder_convnext_convnext_3_dwconv_Conv_reshape_to_2d[] = { "_sentence_encoder_convnext_convnext_3_dwconv_Pad_output_0_ncf" }; uint32_t dimensions__sentence_encoder_convnext_convnext_3_dwconv_Conv_reshape_to_2d[] = {1, 64, 1, 133}; Qnn_Tensor_t outputs__sentence_encoder_convnext_convnext_3_dwconv_Conv_reshape_to_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_3_dwconv_Conv_reshape_to_2d", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000551753073523f, .offset= -29697}}}, .rank= 4, .dimensions=dimensions__sentence_encoder_convnext_convnext_3_dwconv_Conv_reshape_to_2d, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_convnext_convnext_3_dwconv_Conv_reshape_to_2d", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__sentence_encoder_convnext_convnext_3_dwconv_Conv_reshape_to_2d, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_convnext_convnext_3_dwconv_Conv_reshape_to_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_convnext_convnext_3_dwconv_Conv_reshape_to_2d_nhwc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_convnext_convnext_3_dwconv_Conv_reshape_to_2d_nhwc */ uint32_t dimensions__sentence_encoder_convnext_convnext_3_dwconv_Conv_reshape_to_2d_nhwc_perm[] = {4}; uint32_t _sentence_encoder_convnext_convnext_3_dwconv_Conv_reshape_to_2d_nhwc_perm[] = {0, 2, 3, 1}; Qnn_Param_t params__sentence_encoder_convnext_convnext_3_dwconv_Conv_reshape_to_2d_nhwc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_3_dwconv_Conv_reshape_to_2d_nhwc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__sentence_encoder_convnext_convnext_3_dwconv_Conv_reshape_to_2d_nhwc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_convnext_convnext_3_dwconv_Conv_reshape_to_2d_nhwc_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__sentence_encoder_convnext_convnext_3_dwconv_Conv_reshape_to_2d_nhwc[] = { "_sentence_encoder_convnext_convnext_3_dwconv_Conv_reshape_to_2d" }; uint32_t dimensions__sentence_encoder_convnext_convnext_3_dwconv_Conv_reshape_to_2d_nhwc[] = {1, 1, 133, 64}; Qnn_Tensor_t outputs__sentence_encoder_convnext_convnext_3_dwconv_Conv_reshape_to_2d_nhwc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_3_dwconv_Conv_reshape_to_2d_nhwc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000551753073523f, .offset= -29697}}}, .rank= 4, .dimensions=dimensions__sentence_encoder_convnext_convnext_3_dwconv_Conv_reshape_to_2d_nhwc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_convnext_convnext_3_dwconv_Conv_reshape_to_2d_nhwc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__sentence_encoder_convnext_convnext_3_dwconv_Conv_reshape_to_2d_nhwc, // Node Params 1, // Num Node Params inputs__sentence_encoder_convnext_convnext_3_dwconv_Conv_reshape_to_2d_nhwc, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_convnext_convnext_3_dwconv_Conv_reshape_to_2d_nhwc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_dp_sentence_encoder_convnext_convnext_3_dwconv_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_dp_sentence_encoder_convnext_convnext_3_dwconv_weight[] = {1, 5, 1, 64}; VALIDATE(model.addTensor("tts_dp_sentence_encoder_convnext_convnext_3_dwconv_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_dp_sentence_encoder_convnext_convnext_3_dwconv_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0046361172571778f, .offset= -134}}}, .rank= 4, .dimensions=dimensions_tts_dp_sentence_encoder_convnext_convnext_3_dwconv_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_dp_sentence_encoder_convnext_convnext_3_dwconv_weight), .dataSize=BINLEN(tts_dp_sentence_encoder_convnext_convnext_3_dwconv_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_dp_sentence_encoder_convnext_convnext_3_dwconv_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_dp_sentence_encoder_convnext_convnext_3_dwconv_bias[] = {64}; VALIDATE(model.addTensor("tts_dp_sentence_encoder_convnext_convnext_3_dwconv_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_dp_sentence_encoder_convnext_convnext_3_dwconv_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0013074106536806f, .offset= -102}}}, .rank= 1, .dimensions=dimensions_tts_dp_sentence_encoder_convnext_convnext_3_dwconv_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_dp_sentence_encoder_convnext_convnext_3_dwconv_bias), .dataSize=BINLEN(tts_dp_sentence_encoder_convnext_convnext_3_dwconv_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__sentence_encoder_convnext_convnext_3_dwconv_Conv_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_convnext_convnext_3_dwconv_Conv_2d */ uint32_t dimensions__sentence_encoder_convnext_convnext_3_dwconv_Conv_2d_dilation[] = {2}; uint32_t _sentence_encoder_convnext_convnext_3_dwconv_Conv_2d_dilation[] = {1, 1}; uint32_t dimensions__sentence_encoder_convnext_convnext_3_dwconv_Conv_2d_pad_amount[] = {2, 2}; uint32_t _sentence_encoder_convnext_convnext_3_dwconv_Conv_2d_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions__sentence_encoder_convnext_convnext_3_dwconv_Conv_2d_stride[] = {2}; uint32_t _sentence_encoder_convnext_convnext_3_dwconv_Conv_2d_stride[] = {1, 1}; Qnn_Param_t params__sentence_encoder_convnext_convnext_3_dwconv_Conv_2d[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_3_dwconv_Conv_2d_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__sentence_encoder_convnext_convnext_3_dwconv_Conv_2d_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_convnext_convnext_3_dwconv_Conv_2d_dilation, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_3_dwconv_Conv_2d_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__sentence_encoder_convnext_convnext_3_dwconv_Conv_2d_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_convnext_convnext_3_dwconv_Conv_2d_pad_amount, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_3_dwconv_Conv_2d_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__sentence_encoder_convnext_convnext_3_dwconv_Conv_2d_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_convnext_convnext_3_dwconv_Conv_2d_stride, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__sentence_encoder_convnext_convnext_3_dwconv_Conv_2d[] = { "_sentence_encoder_convnext_convnext_3_dwconv_Conv_reshape_to_2d_nhwc", "tts_dp_sentence_encoder_convnext_convnext_3_dwconv_weight", "tts_dp_sentence_encoder_convnext_convnext_3_dwconv_bias" }; uint32_t dimensions__sentence_encoder_convnext_convnext_3_dwconv_Conv_intermediate[] = {1, 1, 129, 64}; Qnn_Tensor_t outputs__sentence_encoder_convnext_convnext_3_dwconv_Conv_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_3_dwconv_Conv_intermediate", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000398972042603f, .offset= -17677}}}, .rank= 4, .dimensions=dimensions__sentence_encoder_convnext_convnext_3_dwconv_Conv_intermediate, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_convnext_convnext_3_dwconv_Conv_2d", // Node Name "qti.aisw", // Package Name "DepthWiseConv2d", // Qnn Node Type params__sentence_encoder_convnext_convnext_3_dwconv_Conv_2d, // Node Params 3, // Num Node Params inputs__sentence_encoder_convnext_convnext_3_dwconv_Conv_2d, // Input Tensor Names 3, // Num Input Tensor Names outputs__sentence_encoder_convnext_convnext_3_dwconv_Conv_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_convnext_convnext_3_dwconv_Conv_intermediate_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_convnext_convnext_3_dwconv_Conv_intermediate_nchw */ uint32_t dimensions__sentence_encoder_convnext_convnext_3_dwconv_Conv_intermediate_nchw_perm[] = {4}; uint32_t _sentence_encoder_convnext_convnext_3_dwconv_Conv_intermediate_nchw_perm[] = {0, 3, 1, 2}; Qnn_Param_t params__sentence_encoder_convnext_convnext_3_dwconv_Conv_intermediate_nchw[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_3_dwconv_Conv_intermediate_nchw_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__sentence_encoder_convnext_convnext_3_dwconv_Conv_intermediate_nchw_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_convnext_convnext_3_dwconv_Conv_intermediate_nchw_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__sentence_encoder_convnext_convnext_3_dwconv_Conv_intermediate_nchw[] = { "_sentence_encoder_convnext_convnext_3_dwconv_Conv_intermediate" }; uint32_t dimensions__sentence_encoder_convnext_convnext_3_dwconv_Conv_intermediate_nchw[] = {1, 64, 1, 129}; Qnn_Tensor_t outputs__sentence_encoder_convnext_convnext_3_dwconv_Conv_intermediate_nchw[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_3_dwconv_Conv_intermediate_nchw", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000398972042603f, .offset= -17677}}}, .rank= 4, .dimensions=dimensions__sentence_encoder_convnext_convnext_3_dwconv_Conv_intermediate_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_convnext_convnext_3_dwconv_Conv_intermediate_nchw", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__sentence_encoder_convnext_convnext_3_dwconv_Conv_intermediate_nchw, // Node Params 1, // Num Node Params inputs__sentence_encoder_convnext_convnext_3_dwconv_Conv_intermediate_nchw, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_convnext_convnext_3_dwconv_Conv_intermediate_nchw, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_convnext_convnext_3_dwconv_Conv_intermediate(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_convnext_convnext_3_dwconv_Conv_intermediate */ const char* inputs__sentence_encoder_convnext_convnext_3_dwconv_Conv_intermediate[] = { "_sentence_encoder_convnext_convnext_3_dwconv_Conv_intermediate_nchw" }; uint32_t dimensions__sentence_encoder_convnext_convnext_3_dwconv_Conv_output_0[] = {1, 64, 129}; Qnn_Tensor_t outputs__sentence_encoder_convnext_convnext_3_dwconv_Conv_intermediate[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_3_dwconv_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000398972042603f, .offset= -17677}}}, .rank= 3, .dimensions=dimensions__sentence_encoder_convnext_convnext_3_dwconv_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_convnext_convnext_3_dwconv_Conv_intermediate", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__sentence_encoder_convnext_convnext_3_dwconv_Conv_intermediate, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_convnext_convnext_3_dwconv_Conv_intermediate, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_convnext_convnext_3_dwconv_Conv_output_0_nfc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_convnext_convnext_3_dwconv_Conv_output_0_nfc */ uint32_t dimensions__sentence_encoder_convnext_convnext_3_dwconv_Conv_output_0_nfc_perm[] = {3}; uint32_t _sentence_encoder_convnext_convnext_3_dwconv_Conv_output_0_nfc_perm[] = {0, 2, 1}; Qnn_Param_t params__sentence_encoder_convnext_convnext_3_dwconv_Conv_output_0_nfc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_3_dwconv_Conv_output_0_nfc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__sentence_encoder_convnext_convnext_3_dwconv_Conv_output_0_nfc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_convnext_convnext_3_dwconv_Conv_output_0_nfc_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__sentence_encoder_convnext_convnext_3_dwconv_Conv_output_0_nfc[] = { "_sentence_encoder_convnext_convnext_3_dwconv_Conv_output_0" }; uint32_t dimensions__sentence_encoder_convnext_convnext_3_dwconv_Conv_output_0_nfc[] = {1, 129, 64}; Qnn_Tensor_t outputs__sentence_encoder_convnext_convnext_3_dwconv_Conv_output_0_nfc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_3_dwconv_Conv_output_0_nfc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000398972042603f, .offset= -17677}}}, .rank= 3, .dimensions=dimensions__sentence_encoder_convnext_convnext_3_dwconv_Conv_output_0_nfc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_convnext_convnext_3_dwconv_Conv_output_0_nfc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__sentence_encoder_convnext_convnext_3_dwconv_Conv_output_0_nfc, // Node Params 1, // Num Node Params inputs__sentence_encoder_convnext_convnext_3_dwconv_Conv_output_0_nfc, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_convnext_convnext_3_dwconv_Conv_output_0_nfc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_convnext_convnext_3_Mul_1(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_convnext_convnext_3_Mul_1 */ Qnn_Param_t params__sentence_encoder_convnext_convnext_3_Mul_1[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__sentence_encoder_convnext_convnext_3_Mul_1[] = { "_sentence_encoder_convnext_convnext_3_dwconv_Conv_output_0_nfc", "_sentence_encoder_Concat_2_output_0" }; uint32_t dimensions__sentence_encoder_convnext_convnext_3_norm_Transpose_output_0[] = {1, 129, 64}; Qnn_Tensor_t outputs__sentence_encoder_convnext_convnext_3_Mul_1[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_3_norm_Transpose_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000398972042603f, .offset= -17677}}}, .rank= 3, .dimensions=dimensions__sentence_encoder_convnext_convnext_3_norm_Transpose_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_convnext_convnext_3_Mul_1", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__sentence_encoder_convnext_convnext_3_Mul_1, // Node Params 1, // Num Node Params inputs__sentence_encoder_convnext_convnext_3_Mul_1, // Input Tensor Names 2, // Num Input Tensor Names outputs__sentence_encoder_convnext_convnext_3_Mul_1, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_dp_sentence_encoder_convnext_convnext_3_norm_norm_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_dp_sentence_encoder_convnext_convnext_3_norm_norm_weight[] = {64}; VALIDATE(model.addTensor("tts_dp_sentence_encoder_convnext_convnext_3_norm_norm_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_dp_sentence_encoder_convnext_convnext_3_norm_norm_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0037777547258884f, .offset= 0}}}, .rank= 1, .dimensions=dimensions_tts_dp_sentence_encoder_convnext_convnext_3_norm_norm_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_dp_sentence_encoder_convnext_convnext_3_norm_norm_weight), .dataSize=BINLEN(tts_dp_sentence_encoder_convnext_convnext_3_norm_norm_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_dp_sentence_encoder_convnext_convnext_3_norm_norm_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_dp_sentence_encoder_convnext_convnext_3_norm_norm_bias[] = {64}; VALIDATE(model.addTensor("tts_dp_sentence_encoder_convnext_convnext_3_norm_norm_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_dp_sentence_encoder_convnext_convnext_3_norm_norm_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0017404851969332f, .offset= -106}}}, .rank= 1, .dimensions=dimensions_tts_dp_sentence_encoder_convnext_convnext_3_norm_norm_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_dp_sentence_encoder_convnext_convnext_3_norm_norm_bias), .dataSize=BINLEN(tts_dp_sentence_encoder_convnext_convnext_3_norm_norm_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__sentence_encoder_convnext_convnext_3_norm_norm_LayerNormalization(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_convnext_convnext_3_norm_norm_LayerNormalization */ uint32_t dimensions__sentence_encoder_convnext_convnext_3_norm_norm_LayerNormalization_axes[] = {1}; uint32_t _sentence_encoder_convnext_convnext_3_norm_norm_LayerNormalization_axes[] = {2}; Qnn_Param_t params__sentence_encoder_convnext_convnext_3_norm_norm_LayerNormalization[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="axes", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_3_norm_norm_LayerNormalization_axes", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__sentence_encoder_convnext_convnext_3_norm_norm_LayerNormalization_axes, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_convnext_convnext_3_norm_norm_LayerNormalization_axes, .dataSize=4}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="epsilon", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_FLOAT_32, {.floatValue = 0.000001000000f}}}} }; const char* inputs__sentence_encoder_convnext_convnext_3_norm_norm_LayerNormalization[] = { "_sentence_encoder_convnext_convnext_3_norm_Transpose_output_0", "tts_dp_sentence_encoder_convnext_convnext_3_norm_norm_weight", "tts_dp_sentence_encoder_convnext_convnext_3_norm_norm_bias" }; uint32_t dimensions__sentence_encoder_convnext_convnext_3_norm_Transpose_1_output_0[] = {1, 129, 64}; Qnn_Tensor_t outputs__sentence_encoder_convnext_convnext_3_norm_norm_LayerNormalization[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_3_norm_Transpose_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000743411073927f, .offset= -39085}}}, .rank= 3, .dimensions=dimensions__sentence_encoder_convnext_convnext_3_norm_Transpose_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_convnext_convnext_3_norm_norm_LayerNormalization", // Node Name "qti.aisw", // Package Name "LayerNorm", // Qnn Node Type params__sentence_encoder_convnext_convnext_3_norm_norm_LayerNormalization, // Node Params 2, // Num Node Params inputs__sentence_encoder_convnext_convnext_3_norm_norm_LayerNormalization, // Input Tensor Names 3, // Num Input Tensor Names outputs__sentence_encoder_convnext_convnext_3_norm_norm_LayerNormalization, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_convnext_convnext_3_norm_Transpose_1_output_0_ncf(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_convnext_convnext_3_norm_Transpose_1_output_0_ncf */ uint32_t dimensions__sentence_encoder_convnext_convnext_3_norm_Transpose_1_output_0_ncf_perm[] = {3}; uint32_t _sentence_encoder_convnext_convnext_3_norm_Transpose_1_output_0_ncf_perm[] = {0, 2, 1}; Qnn_Param_t params__sentence_encoder_convnext_convnext_3_norm_Transpose_1_output_0_ncf[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_3_norm_Transpose_1_output_0_ncf_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__sentence_encoder_convnext_convnext_3_norm_Transpose_1_output_0_ncf_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_convnext_convnext_3_norm_Transpose_1_output_0_ncf_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__sentence_encoder_convnext_convnext_3_norm_Transpose_1_output_0_ncf[] = { "_sentence_encoder_convnext_convnext_3_norm_Transpose_1_output_0" }; uint32_t dimensions__sentence_encoder_convnext_convnext_3_norm_Transpose_1_output_0_ncf[] = {1, 64, 129}; Qnn_Tensor_t outputs__sentence_encoder_convnext_convnext_3_norm_Transpose_1_output_0_ncf[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_3_norm_Transpose_1_output_0_ncf", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000743411073927f, .offset= -39085}}}, .rank= 3, .dimensions=dimensions__sentence_encoder_convnext_convnext_3_norm_Transpose_1_output_0_ncf, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_convnext_convnext_3_norm_Transpose_1_output_0_ncf", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__sentence_encoder_convnext_convnext_3_norm_Transpose_1_output_0_ncf, // Node Params 1, // Num Node Params inputs__sentence_encoder_convnext_convnext_3_norm_Transpose_1_output_0_ncf, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_convnext_convnext_3_norm_Transpose_1_output_0_ncf, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_convnext_convnext_3_pwconv1_Conv_reshape_to_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_convnext_convnext_3_pwconv1_Conv_reshape_to_2d */ const char* inputs__sentence_encoder_convnext_convnext_3_pwconv1_Conv_reshape_to_2d[] = { "_sentence_encoder_convnext_convnext_3_norm_Transpose_1_output_0_ncf" }; uint32_t dimensions__sentence_encoder_convnext_convnext_3_pwconv1_Conv_reshape_to_2d[] = {1, 64, 1, 129}; Qnn_Tensor_t outputs__sentence_encoder_convnext_convnext_3_pwconv1_Conv_reshape_to_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_3_pwconv1_Conv_reshape_to_2d", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000743411073927f, .offset= -39085}}}, .rank= 4, .dimensions=dimensions__sentence_encoder_convnext_convnext_3_pwconv1_Conv_reshape_to_2d, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_convnext_convnext_3_pwconv1_Conv_reshape_to_2d", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__sentence_encoder_convnext_convnext_3_pwconv1_Conv_reshape_to_2d, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_convnext_convnext_3_pwconv1_Conv_reshape_to_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_convnext_convnext_3_pwconv1_Conv_reshape_to_2d_nhwc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_convnext_convnext_3_pwconv1_Conv_reshape_to_2d_nhwc */ uint32_t dimensions__sentence_encoder_convnext_convnext_3_pwconv1_Conv_reshape_to_2d_nhwc_perm[] = {4}; uint32_t _sentence_encoder_convnext_convnext_3_pwconv1_Conv_reshape_to_2d_nhwc_perm[] = {0, 2, 3, 1}; Qnn_Param_t params__sentence_encoder_convnext_convnext_3_pwconv1_Conv_reshape_to_2d_nhwc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_3_pwconv1_Conv_reshape_to_2d_nhwc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__sentence_encoder_convnext_convnext_3_pwconv1_Conv_reshape_to_2d_nhwc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_convnext_convnext_3_pwconv1_Conv_reshape_to_2d_nhwc_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__sentence_encoder_convnext_convnext_3_pwconv1_Conv_reshape_to_2d_nhwc[] = { "_sentence_encoder_convnext_convnext_3_pwconv1_Conv_reshape_to_2d" }; uint32_t dimensions__sentence_encoder_convnext_convnext_3_pwconv1_Conv_reshape_to_2d_nhwc[] = {1, 1, 129, 64}; Qnn_Tensor_t outputs__sentence_encoder_convnext_convnext_3_pwconv1_Conv_reshape_to_2d_nhwc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_3_pwconv1_Conv_reshape_to_2d_nhwc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000743411073927f, .offset= -39085}}}, .rank= 4, .dimensions=dimensions__sentence_encoder_convnext_convnext_3_pwconv1_Conv_reshape_to_2d_nhwc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_convnext_convnext_3_pwconv1_Conv_reshape_to_2d_nhwc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__sentence_encoder_convnext_convnext_3_pwconv1_Conv_reshape_to_2d_nhwc, // Node Params 1, // Num Node Params inputs__sentence_encoder_convnext_convnext_3_pwconv1_Conv_reshape_to_2d_nhwc, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_convnext_convnext_3_pwconv1_Conv_reshape_to_2d_nhwc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_dp_sentence_encoder_convnext_convnext_3_pwconv1_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_dp_sentence_encoder_convnext_convnext_3_pwconv1_weight[] = {1, 1, 64, 256}; VALIDATE(model.addTensor("tts_dp_sentence_encoder_convnext_convnext_3_pwconv1_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_dp_sentence_encoder_convnext_convnext_3_pwconv1_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0041969628073275f, .offset= -133}}}, .rank= 4, .dimensions=dimensions_tts_dp_sentence_encoder_convnext_convnext_3_pwconv1_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_dp_sentence_encoder_convnext_convnext_3_pwconv1_weight), .dataSize=BINLEN(tts_dp_sentence_encoder_convnext_convnext_3_pwconv1_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_dp_sentence_encoder_convnext_convnext_3_pwconv1_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_dp_sentence_encoder_convnext_convnext_3_pwconv1_bias[] = {256}; VALIDATE(model.addTensor("tts_dp_sentence_encoder_convnext_convnext_3_pwconv1_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_dp_sentence_encoder_convnext_convnext_3_pwconv1_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0021934390533715f, .offset= -227}}}, .rank= 1, .dimensions=dimensions_tts_dp_sentence_encoder_convnext_convnext_3_pwconv1_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_dp_sentence_encoder_convnext_convnext_3_pwconv1_bias), .dataSize=BINLEN(tts_dp_sentence_encoder_convnext_convnext_3_pwconv1_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__sentence_encoder_convnext_convnext_3_pwconv1_Conv_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_convnext_convnext_3_pwconv1_Conv_2d */ uint32_t dimensions__sentence_encoder_convnext_convnext_3_pwconv1_Conv_2d_dilation[] = {2}; uint32_t _sentence_encoder_convnext_convnext_3_pwconv1_Conv_2d_dilation[] = {1, 1}; uint32_t dimensions__sentence_encoder_convnext_convnext_3_pwconv1_Conv_2d_pad_amount[] = {2, 2}; uint32_t _sentence_encoder_convnext_convnext_3_pwconv1_Conv_2d_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions__sentence_encoder_convnext_convnext_3_pwconv1_Conv_2d_stride[] = {2}; uint32_t _sentence_encoder_convnext_convnext_3_pwconv1_Conv_2d_stride[] = {1, 1}; Qnn_Param_t params__sentence_encoder_convnext_convnext_3_pwconv1_Conv_2d[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_3_pwconv1_Conv_2d_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__sentence_encoder_convnext_convnext_3_pwconv1_Conv_2d_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_convnext_convnext_3_pwconv1_Conv_2d_dilation, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_3_pwconv1_Conv_2d_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__sentence_encoder_convnext_convnext_3_pwconv1_Conv_2d_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_convnext_convnext_3_pwconv1_Conv_2d_pad_amount, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_3_pwconv1_Conv_2d_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__sentence_encoder_convnext_convnext_3_pwconv1_Conv_2d_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_convnext_convnext_3_pwconv1_Conv_2d_stride, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="group", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="reuse_sparse_indices", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}} }; const char* inputs__sentence_encoder_convnext_convnext_3_pwconv1_Conv_2d[] = { "_sentence_encoder_convnext_convnext_3_pwconv1_Conv_reshape_to_2d_nhwc", "tts_dp_sentence_encoder_convnext_convnext_3_pwconv1_weight", "tts_dp_sentence_encoder_convnext_convnext_3_pwconv1_bias" }; uint32_t dimensions__sentence_encoder_convnext_convnext_3_pwconv1_Conv_intermediate[] = {1, 1, 129, 256}; Qnn_Tensor_t outputs__sentence_encoder_convnext_convnext_3_pwconv1_Conv_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_3_pwconv1_Conv_intermediate", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000859289139044f, .offset= -50951}}}, .rank= 4, .dimensions=dimensions__sentence_encoder_convnext_convnext_3_pwconv1_Conv_intermediate, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_convnext_convnext_3_pwconv1_Conv_2d", // Node Name "qti.aisw", // Package Name "Conv2d", // Qnn Node Type params__sentence_encoder_convnext_convnext_3_pwconv1_Conv_2d, // Node Params 5, // Num Node Params inputs__sentence_encoder_convnext_convnext_3_pwconv1_Conv_2d, // Input Tensor Names 3, // Num Input Tensor Names outputs__sentence_encoder_convnext_convnext_3_pwconv1_Conv_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_convnext_convnext_3_pwconv1_Conv_intermediate_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_convnext_convnext_3_pwconv1_Conv_intermediate_nchw */ uint32_t dimensions__sentence_encoder_convnext_convnext_3_pwconv1_Conv_intermediate_nchw_perm[] = {4}; uint32_t _sentence_encoder_convnext_convnext_3_pwconv1_Conv_intermediate_nchw_perm[] = {0, 3, 1, 2}; Qnn_Param_t params__sentence_encoder_convnext_convnext_3_pwconv1_Conv_intermediate_nchw[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_3_pwconv1_Conv_intermediate_nchw_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__sentence_encoder_convnext_convnext_3_pwconv1_Conv_intermediate_nchw_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_convnext_convnext_3_pwconv1_Conv_intermediate_nchw_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__sentence_encoder_convnext_convnext_3_pwconv1_Conv_intermediate_nchw[] = { "_sentence_encoder_convnext_convnext_3_pwconv1_Conv_intermediate" }; uint32_t dimensions__sentence_encoder_convnext_convnext_3_pwconv1_Conv_intermediate_nchw[] = {1, 256, 1, 129}; Qnn_Tensor_t outputs__sentence_encoder_convnext_convnext_3_pwconv1_Conv_intermediate_nchw[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_3_pwconv1_Conv_intermediate_nchw", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000859289139044f, .offset= -50951}}}, .rank= 4, .dimensions=dimensions__sentence_encoder_convnext_convnext_3_pwconv1_Conv_intermediate_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_convnext_convnext_3_pwconv1_Conv_intermediate_nchw", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__sentence_encoder_convnext_convnext_3_pwconv1_Conv_intermediate_nchw, // Node Params 1, // Num Node Params inputs__sentence_encoder_convnext_convnext_3_pwconv1_Conv_intermediate_nchw, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_convnext_convnext_3_pwconv1_Conv_intermediate_nchw, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_convnext_convnext_3_pwconv1_Conv_intermediate(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_convnext_convnext_3_pwconv1_Conv_intermediate */ const char* inputs__sentence_encoder_convnext_convnext_3_pwconv1_Conv_intermediate[] = { "_sentence_encoder_convnext_convnext_3_pwconv1_Conv_intermediate_nchw" }; uint32_t dimensions__sentence_encoder_convnext_convnext_3_pwconv1_Conv_output_0[] = {1, 256, 129}; Qnn_Tensor_t outputs__sentence_encoder_convnext_convnext_3_pwconv1_Conv_intermediate[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_3_pwconv1_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000859289139044f, .offset= -50951}}}, .rank= 3, .dimensions=dimensions__sentence_encoder_convnext_convnext_3_pwconv1_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_convnext_convnext_3_pwconv1_Conv_intermediate", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__sentence_encoder_convnext_convnext_3_pwconv1_Conv_intermediate, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_convnext_convnext_3_pwconv1_Conv_intermediate, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__elementwiseneuron_8(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _elementwiseneuron_8 */ Qnn_Param_t params__elementwiseneuron_8[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}} }; const char* inputs__elementwiseneuron_8[] = { "_sentence_encoder_convnext_convnext_3_pwconv1_Conv_output_0" }; uint32_t dimensions__sentence_encoder_convnext_convnext_3_act_Mul_1_output_0[] = {1, 256, 129}; Qnn_Tensor_t outputs__elementwiseneuron_8[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_3_act_Mul_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000197068602574f, .offset= -8625}}}, .rank= 3, .dimensions=dimensions__sentence_encoder_convnext_convnext_3_act_Mul_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_elementwiseneuron_8", // Node Name "qti.aisw", // Package Name "ElementWiseNeuron", // Qnn Node Type params__elementwiseneuron_8, // Node Params 1, // Num Node Params inputs__elementwiseneuron_8, // Input Tensor Names 1, // Num Input Tensor Names outputs__elementwiseneuron_8, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_convnext_convnext_3_pwconv2_Conv_reshape_to_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_convnext_convnext_3_pwconv2_Conv_reshape_to_2d */ const char* inputs__sentence_encoder_convnext_convnext_3_pwconv2_Conv_reshape_to_2d[] = { "_sentence_encoder_convnext_convnext_3_act_Mul_1_output_0" }; uint32_t dimensions__sentence_encoder_convnext_convnext_3_pwconv2_Conv_reshape_to_2d[] = {1, 256, 1, 129}; Qnn_Tensor_t outputs__sentence_encoder_convnext_convnext_3_pwconv2_Conv_reshape_to_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_3_pwconv2_Conv_reshape_to_2d", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000197068602574f, .offset= -8625}}}, .rank= 4, .dimensions=dimensions__sentence_encoder_convnext_convnext_3_pwconv2_Conv_reshape_to_2d, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_convnext_convnext_3_pwconv2_Conv_reshape_to_2d", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__sentence_encoder_convnext_convnext_3_pwconv2_Conv_reshape_to_2d, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_convnext_convnext_3_pwconv2_Conv_reshape_to_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_convnext_convnext_3_pwconv2_Conv_reshape_to_2d_nhwc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_convnext_convnext_3_pwconv2_Conv_reshape_to_2d_nhwc */ uint32_t dimensions__sentence_encoder_convnext_convnext_3_pwconv2_Conv_reshape_to_2d_nhwc_perm[] = {4}; uint32_t _sentence_encoder_convnext_convnext_3_pwconv2_Conv_reshape_to_2d_nhwc_perm[] = {0, 2, 3, 1}; Qnn_Param_t params__sentence_encoder_convnext_convnext_3_pwconv2_Conv_reshape_to_2d_nhwc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_3_pwconv2_Conv_reshape_to_2d_nhwc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__sentence_encoder_convnext_convnext_3_pwconv2_Conv_reshape_to_2d_nhwc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_convnext_convnext_3_pwconv2_Conv_reshape_to_2d_nhwc_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__sentence_encoder_convnext_convnext_3_pwconv2_Conv_reshape_to_2d_nhwc[] = { "_sentence_encoder_convnext_convnext_3_pwconv2_Conv_reshape_to_2d" }; uint32_t dimensions__sentence_encoder_convnext_convnext_3_pwconv2_Conv_reshape_to_2d_nhwc[] = {1, 1, 129, 256}; Qnn_Tensor_t outputs__sentence_encoder_convnext_convnext_3_pwconv2_Conv_reshape_to_2d_nhwc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_3_pwconv2_Conv_reshape_to_2d_nhwc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000197068602574f, .offset= -8625}}}, .rank= 4, .dimensions=dimensions__sentence_encoder_convnext_convnext_3_pwconv2_Conv_reshape_to_2d_nhwc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_convnext_convnext_3_pwconv2_Conv_reshape_to_2d_nhwc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__sentence_encoder_convnext_convnext_3_pwconv2_Conv_reshape_to_2d_nhwc, // Node Params 1, // Num Node Params inputs__sentence_encoder_convnext_convnext_3_pwconv2_Conv_reshape_to_2d_nhwc, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_convnext_convnext_3_pwconv2_Conv_reshape_to_2d_nhwc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_dp_sentence_encoder_convnext_convnext_3_pwconv2_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_dp_sentence_encoder_convnext_convnext_3_pwconv2_weight[] = {1, 1, 256, 64}; VALIDATE(model.addTensor("tts_dp_sentence_encoder_convnext_convnext_3_pwconv2_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_dp_sentence_encoder_convnext_convnext_3_pwconv2_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0029293266125023f, .offset= -129}}}, .rank= 4, .dimensions=dimensions_tts_dp_sentence_encoder_convnext_convnext_3_pwconv2_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_dp_sentence_encoder_convnext_convnext_3_pwconv2_weight), .dataSize=BINLEN(tts_dp_sentence_encoder_convnext_convnext_3_pwconv2_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_dp_sentence_encoder_convnext_convnext_3_pwconv2_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_dp_sentence_encoder_convnext_convnext_3_pwconv2_bias[] = {64}; VALIDATE(model.addTensor("tts_dp_sentence_encoder_convnext_convnext_3_pwconv2_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_dp_sentence_encoder_convnext_convnext_3_pwconv2_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0011005521519110f, .offset= -103}}}, .rank= 1, .dimensions=dimensions_tts_dp_sentence_encoder_convnext_convnext_3_pwconv2_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_dp_sentence_encoder_convnext_convnext_3_pwconv2_bias), .dataSize=BINLEN(tts_dp_sentence_encoder_convnext_convnext_3_pwconv2_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__sentence_encoder_convnext_convnext_3_pwconv2_Conv_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_convnext_convnext_3_pwconv2_Conv_2d */ uint32_t dimensions__sentence_encoder_convnext_convnext_3_pwconv2_Conv_2d_dilation[] = {2}; uint32_t _sentence_encoder_convnext_convnext_3_pwconv2_Conv_2d_dilation[] = {1, 1}; uint32_t dimensions__sentence_encoder_convnext_convnext_3_pwconv2_Conv_2d_pad_amount[] = {2, 2}; uint32_t _sentence_encoder_convnext_convnext_3_pwconv2_Conv_2d_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions__sentence_encoder_convnext_convnext_3_pwconv2_Conv_2d_stride[] = {2}; uint32_t _sentence_encoder_convnext_convnext_3_pwconv2_Conv_2d_stride[] = {1, 1}; Qnn_Param_t params__sentence_encoder_convnext_convnext_3_pwconv2_Conv_2d[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_3_pwconv2_Conv_2d_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__sentence_encoder_convnext_convnext_3_pwconv2_Conv_2d_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_convnext_convnext_3_pwconv2_Conv_2d_dilation, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_3_pwconv2_Conv_2d_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__sentence_encoder_convnext_convnext_3_pwconv2_Conv_2d_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_convnext_convnext_3_pwconv2_Conv_2d_pad_amount, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_3_pwconv2_Conv_2d_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__sentence_encoder_convnext_convnext_3_pwconv2_Conv_2d_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_convnext_convnext_3_pwconv2_Conv_2d_stride, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="group", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="reuse_sparse_indices", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}} }; const char* inputs__sentence_encoder_convnext_convnext_3_pwconv2_Conv_2d[] = { "_sentence_encoder_convnext_convnext_3_pwconv2_Conv_reshape_to_2d_nhwc", "tts_dp_sentence_encoder_convnext_convnext_3_pwconv2_weight", "tts_dp_sentence_encoder_convnext_convnext_3_pwconv2_bias" }; uint32_t dimensions__sentence_encoder_convnext_convnext_3_pwconv2_Conv_intermediate[] = {1, 1, 129, 64}; Qnn_Tensor_t outputs__sentence_encoder_convnext_convnext_3_pwconv2_Conv_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_3_pwconv2_Conv_intermediate", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000482191207993f, .offset= -28914}}}, .rank= 4, .dimensions=dimensions__sentence_encoder_convnext_convnext_3_pwconv2_Conv_intermediate, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_convnext_convnext_3_pwconv2_Conv_2d", // Node Name "qti.aisw", // Package Name "Conv2d", // Qnn Node Type params__sentence_encoder_convnext_convnext_3_pwconv2_Conv_2d, // Node Params 5, // Num Node Params inputs__sentence_encoder_convnext_convnext_3_pwconv2_Conv_2d, // Input Tensor Names 3, // Num Input Tensor Names outputs__sentence_encoder_convnext_convnext_3_pwconv2_Conv_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_convnext_convnext_3_pwconv2_Conv_intermediate_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_convnext_convnext_3_pwconv2_Conv_intermediate_nchw */ uint32_t dimensions__sentence_encoder_convnext_convnext_3_pwconv2_Conv_intermediate_nchw_perm[] = {4}; uint32_t _sentence_encoder_convnext_convnext_3_pwconv2_Conv_intermediate_nchw_perm[] = {0, 3, 1, 2}; Qnn_Param_t params__sentence_encoder_convnext_convnext_3_pwconv2_Conv_intermediate_nchw[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_3_pwconv2_Conv_intermediate_nchw_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__sentence_encoder_convnext_convnext_3_pwconv2_Conv_intermediate_nchw_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_convnext_convnext_3_pwconv2_Conv_intermediate_nchw_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__sentence_encoder_convnext_convnext_3_pwconv2_Conv_intermediate_nchw[] = { "_sentence_encoder_convnext_convnext_3_pwconv2_Conv_intermediate" }; uint32_t dimensions__sentence_encoder_convnext_convnext_3_pwconv2_Conv_intermediate_nchw[] = {1, 64, 1, 129}; Qnn_Tensor_t outputs__sentence_encoder_convnext_convnext_3_pwconv2_Conv_intermediate_nchw[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_3_pwconv2_Conv_intermediate_nchw", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000482191207993f, .offset= -28914}}}, .rank= 4, .dimensions=dimensions__sentence_encoder_convnext_convnext_3_pwconv2_Conv_intermediate_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_convnext_convnext_3_pwconv2_Conv_intermediate_nchw", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__sentence_encoder_convnext_convnext_3_pwconv2_Conv_intermediate_nchw, // Node Params 1, // Num Node Params inputs__sentence_encoder_convnext_convnext_3_pwconv2_Conv_intermediate_nchw, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_convnext_convnext_3_pwconv2_Conv_intermediate_nchw, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_convnext_convnext_3_pwconv2_Conv_intermediate(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_convnext_convnext_3_pwconv2_Conv_intermediate */ const char* inputs__sentence_encoder_convnext_convnext_3_pwconv2_Conv_intermediate[] = { "_sentence_encoder_convnext_convnext_3_pwconv2_Conv_intermediate_nchw" }; uint32_t dimensions__sentence_encoder_convnext_convnext_3_pwconv2_Conv_output_0[] = {1, 64, 129}; Qnn_Tensor_t outputs__sentence_encoder_convnext_convnext_3_pwconv2_Conv_intermediate[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_3_pwconv2_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000482191207993f, .offset= -28914}}}, .rank= 3, .dimensions=dimensions__sentence_encoder_convnext_convnext_3_pwconv2_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_convnext_convnext_3_pwconv2_Conv_intermediate", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__sentence_encoder_convnext_convnext_3_pwconv2_Conv_intermediate, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_convnext_convnext_3_pwconv2_Conv_intermediate, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_convnext_convnext_3_pwconv2_Conv_output_0_nfc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_convnext_convnext_3_pwconv2_Conv_output_0_nfc */ uint32_t dimensions__sentence_encoder_convnext_convnext_3_pwconv2_Conv_output_0_nfc_perm[] = {3}; uint32_t _sentence_encoder_convnext_convnext_3_pwconv2_Conv_output_0_nfc_perm[] = {0, 2, 1}; Qnn_Param_t params__sentence_encoder_convnext_convnext_3_pwconv2_Conv_output_0_nfc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_3_pwconv2_Conv_output_0_nfc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__sentence_encoder_convnext_convnext_3_pwconv2_Conv_output_0_nfc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_convnext_convnext_3_pwconv2_Conv_output_0_nfc_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__sentence_encoder_convnext_convnext_3_pwconv2_Conv_output_0_nfc[] = { "_sentence_encoder_convnext_convnext_3_pwconv2_Conv_output_0" }; uint32_t dimensions__sentence_encoder_convnext_convnext_3_pwconv2_Conv_output_0_nfc[] = {1, 129, 64}; Qnn_Tensor_t outputs__sentence_encoder_convnext_convnext_3_pwconv2_Conv_output_0_nfc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_3_pwconv2_Conv_output_0_nfc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000482191207993f, .offset= -28914}}}, .rank= 3, .dimensions=dimensions__sentence_encoder_convnext_convnext_3_pwconv2_Conv_output_0_nfc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_convnext_convnext_3_pwconv2_Conv_output_0_nfc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__sentence_encoder_convnext_convnext_3_pwconv2_Conv_output_0_nfc, // Node Params 1, // Num Node Params inputs__sentence_encoder_convnext_convnext_3_pwconv2_Conv_output_0_nfc, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_convnext_convnext_3_pwconv2_Conv_output_0_nfc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_dp_sentence_encoder_convnext_convnext_3_gamma(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_dp_sentence_encoder_convnext_convnext_3_gamma[] = {1, 1, 64}; VALIDATE(model.addTensor("tts_dp_sentence_encoder_convnext_convnext_3_gamma", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_dp_sentence_encoder_convnext_convnext_3_gamma", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000015097792812f, .offset= -1724}}}, .rank= 3, .dimensions=dimensions_tts_dp_sentence_encoder_convnext_convnext_3_gamma, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_dp_sentence_encoder_convnext_convnext_3_gamma), .dataSize=BINLEN(tts_dp_sentence_encoder_convnext_convnext_3_gamma)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__sentence_encoder_convnext_convnext_3_Mul_2(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_convnext_convnext_3_Mul_2 */ Qnn_Param_t params__sentence_encoder_convnext_convnext_3_Mul_2[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__sentence_encoder_convnext_convnext_3_Mul_2[] = { "tts_dp_sentence_encoder_convnext_convnext_3_gamma", "_sentence_encoder_convnext_convnext_3_pwconv2_Conv_output_0_nfc" }; uint32_t dimensions__sentence_encoder_convnext_convnext_3_Mul_2_output_0[] = {1, 129, 64}; Qnn_Tensor_t outputs__sentence_encoder_convnext_convnext_3_Mul_2[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_3_Mul_2_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000036706876472f, .offset= -31779}}}, .rank= 3, .dimensions=dimensions__sentence_encoder_convnext_convnext_3_Mul_2_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_convnext_convnext_3_Mul_2", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__sentence_encoder_convnext_convnext_3_Mul_2, // Node Params 1, // Num Node Params inputs__sentence_encoder_convnext_convnext_3_Mul_2, // Input Tensor Names 2, // Num Input Tensor Names outputs__sentence_encoder_convnext_convnext_3_Mul_2, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_convnext_convnext_3_Add(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_convnext_convnext_3_Add */ Qnn_Param_t params__sentence_encoder_convnext_convnext_3_Add[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}} }; const char* inputs__sentence_encoder_convnext_convnext_3_Add[] = { "_sentence_encoder_convnext_convnext_3_Mul_output_0", "_sentence_encoder_convnext_convnext_3_Mul_2_output_0" }; uint32_t dimensions__sentence_encoder_convnext_convnext_3_Add_output_0[] = {1, 129, 64}; Qnn_Tensor_t outputs__sentence_encoder_convnext_convnext_3_Add[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_3_Add_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000551761040697f, .offset= -29697}}}, .rank= 3, .dimensions=dimensions__sentence_encoder_convnext_convnext_3_Add_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_convnext_convnext_3_Add", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__sentence_encoder_convnext_convnext_3_Add, // Node Params 1, // Num Node Params inputs__sentence_encoder_convnext_convnext_3_Add, // Input Tensor Names 2, // Num Input Tensor Names outputs__sentence_encoder_convnext_convnext_3_Add, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_convnext_convnext_3_Mul_3(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_convnext_convnext_3_Mul_3 */ Qnn_Param_t params__sentence_encoder_convnext_convnext_3_Mul_3[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__sentence_encoder_convnext_convnext_3_Mul_3[] = { "_sentence_encoder_convnext_convnext_3_Add_output_0", "_sentence_encoder_Concat_2_output_0" }; uint32_t dimensions__sentence_encoder_convnext_convnext_3_Mul_3_output_0[] = {1, 129, 64}; Qnn_Tensor_t outputs__sentence_encoder_convnext_convnext_3_Mul_3[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_3_Mul_3_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000551761040697f, .offset= -29697}}}, .rank= 3, .dimensions=dimensions__sentence_encoder_convnext_convnext_3_Mul_3_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_convnext_convnext_3_Mul_3", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__sentence_encoder_convnext_convnext_3_Mul_3, // Node Params 1, // Num Node Params inputs__sentence_encoder_convnext_convnext_3_Mul_3, // Input Tensor Names 2, // Num Input Tensor Names outputs__sentence_encoder_convnext_convnext_3_Mul_3, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_convnext_convnext_4_Mul(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_convnext_convnext_4_Mul */ Qnn_Param_t params__sentence_encoder_convnext_convnext_4_Mul[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__sentence_encoder_convnext_convnext_4_Mul[] = { "_sentence_encoder_convnext_convnext_3_Mul_3_output_0", "_sentence_encoder_Concat_2_output_0" }; uint32_t dimensions__sentence_encoder_convnext_convnext_4_Mul_output_0[] = {1, 129, 64}; Qnn_Tensor_t outputs__sentence_encoder_convnext_convnext_4_Mul[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_4_Mul_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000551761040697f, .offset= -29697}}}, .rank= 3, .dimensions=dimensions__sentence_encoder_convnext_convnext_4_Mul_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_convnext_convnext_4_Mul", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__sentence_encoder_convnext_convnext_4_Mul, // Node Params 1, // Num Node Params inputs__sentence_encoder_convnext_convnext_4_Mul, // Input Tensor Names 2, // Num Input Tensor Names outputs__sentence_encoder_convnext_convnext_4_Mul, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_convnext_convnext_4_dwconv_Pad(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_convnext_convnext_4_dwconv_Pad */ uint32_t dimensions__sentence_encoder_convnext_convnext_4_dwconv_Pad_pad_amount[] = {3, 2}; uint32_t _sentence_encoder_convnext_convnext_4_dwconv_Pad_pad_amount[] = {0, 0, 2, 2, 0, 0}; Qnn_Param_t params__sentence_encoder_convnext_convnext_4_dwconv_Pad[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_4_dwconv_Pad_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__sentence_encoder_convnext_convnext_4_dwconv_Pad_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_convnext_convnext_4_dwconv_Pad_pad_amount, .dataSize=24}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="scheme", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 3}}}} }; const char* inputs__sentence_encoder_convnext_convnext_4_dwconv_Pad[] = { "_sentence_encoder_convnext_convnext_4_Mul_output_0" }; uint32_t dimensions__sentence_encoder_convnext_convnext_4_dwconv_Pad_output_0[] = {1, 133, 64}; Qnn_Tensor_t outputs__sentence_encoder_convnext_convnext_4_dwconv_Pad[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_4_dwconv_Pad_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000551761040697f, .offset= -29697}}}, .rank= 3, .dimensions=dimensions__sentence_encoder_convnext_convnext_4_dwconv_Pad_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_convnext_convnext_4_dwconv_Pad", // Node Name "qti.aisw", // Package Name "Pad", // Qnn Node Type params__sentence_encoder_convnext_convnext_4_dwconv_Pad, // Node Params 2, // Num Node Params inputs__sentence_encoder_convnext_convnext_4_dwconv_Pad, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_convnext_convnext_4_dwconv_Pad, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_convnext_convnext_4_dwconv_Pad_output_0_ncf(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_convnext_convnext_4_dwconv_Pad_output_0_ncf */ uint32_t dimensions__sentence_encoder_convnext_convnext_4_dwconv_Pad_output_0_ncf_perm[] = {3}; uint32_t _sentence_encoder_convnext_convnext_4_dwconv_Pad_output_0_ncf_perm[] = {0, 2, 1}; Qnn_Param_t params__sentence_encoder_convnext_convnext_4_dwconv_Pad_output_0_ncf[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_4_dwconv_Pad_output_0_ncf_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__sentence_encoder_convnext_convnext_4_dwconv_Pad_output_0_ncf_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_convnext_convnext_4_dwconv_Pad_output_0_ncf_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__sentence_encoder_convnext_convnext_4_dwconv_Pad_output_0_ncf[] = { "_sentence_encoder_convnext_convnext_4_dwconv_Pad_output_0" }; uint32_t dimensions__sentence_encoder_convnext_convnext_4_dwconv_Pad_output_0_ncf[] = {1, 64, 133}; Qnn_Tensor_t outputs__sentence_encoder_convnext_convnext_4_dwconv_Pad_output_0_ncf[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_4_dwconv_Pad_output_0_ncf", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000551761040697f, .offset= -29697}}}, .rank= 3, .dimensions=dimensions__sentence_encoder_convnext_convnext_4_dwconv_Pad_output_0_ncf, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_convnext_convnext_4_dwconv_Pad_output_0_ncf", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__sentence_encoder_convnext_convnext_4_dwconv_Pad_output_0_ncf, // Node Params 1, // Num Node Params inputs__sentence_encoder_convnext_convnext_4_dwconv_Pad_output_0_ncf, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_convnext_convnext_4_dwconv_Pad_output_0_ncf, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_convnext_convnext_4_dwconv_Conv_reshape_to_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_convnext_convnext_4_dwconv_Conv_reshape_to_2d */ const char* inputs__sentence_encoder_convnext_convnext_4_dwconv_Conv_reshape_to_2d[] = { "_sentence_encoder_convnext_convnext_4_dwconv_Pad_output_0_ncf" }; uint32_t dimensions__sentence_encoder_convnext_convnext_4_dwconv_Conv_reshape_to_2d[] = {1, 64, 1, 133}; Qnn_Tensor_t outputs__sentence_encoder_convnext_convnext_4_dwconv_Conv_reshape_to_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_4_dwconv_Conv_reshape_to_2d", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000551761040697f, .offset= -29697}}}, .rank= 4, .dimensions=dimensions__sentence_encoder_convnext_convnext_4_dwconv_Conv_reshape_to_2d, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_convnext_convnext_4_dwconv_Conv_reshape_to_2d", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__sentence_encoder_convnext_convnext_4_dwconv_Conv_reshape_to_2d, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_convnext_convnext_4_dwconv_Conv_reshape_to_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_convnext_convnext_4_dwconv_Conv_reshape_to_2d_nhwc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_convnext_convnext_4_dwconv_Conv_reshape_to_2d_nhwc */ uint32_t dimensions__sentence_encoder_convnext_convnext_4_dwconv_Conv_reshape_to_2d_nhwc_perm[] = {4}; uint32_t _sentence_encoder_convnext_convnext_4_dwconv_Conv_reshape_to_2d_nhwc_perm[] = {0, 2, 3, 1}; Qnn_Param_t params__sentence_encoder_convnext_convnext_4_dwconv_Conv_reshape_to_2d_nhwc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_4_dwconv_Conv_reshape_to_2d_nhwc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__sentence_encoder_convnext_convnext_4_dwconv_Conv_reshape_to_2d_nhwc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_convnext_convnext_4_dwconv_Conv_reshape_to_2d_nhwc_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__sentence_encoder_convnext_convnext_4_dwconv_Conv_reshape_to_2d_nhwc[] = { "_sentence_encoder_convnext_convnext_4_dwconv_Conv_reshape_to_2d" }; uint32_t dimensions__sentence_encoder_convnext_convnext_4_dwconv_Conv_reshape_to_2d_nhwc[] = {1, 1, 133, 64}; Qnn_Tensor_t outputs__sentence_encoder_convnext_convnext_4_dwconv_Conv_reshape_to_2d_nhwc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_4_dwconv_Conv_reshape_to_2d_nhwc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000551761040697f, .offset= -29697}}}, .rank= 4, .dimensions=dimensions__sentence_encoder_convnext_convnext_4_dwconv_Conv_reshape_to_2d_nhwc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_convnext_convnext_4_dwconv_Conv_reshape_to_2d_nhwc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__sentence_encoder_convnext_convnext_4_dwconv_Conv_reshape_to_2d_nhwc, // Node Params 1, // Num Node Params inputs__sentence_encoder_convnext_convnext_4_dwconv_Conv_reshape_to_2d_nhwc, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_convnext_convnext_4_dwconv_Conv_reshape_to_2d_nhwc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_dp_sentence_encoder_convnext_convnext_4_dwconv_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_dp_sentence_encoder_convnext_convnext_4_dwconv_weight[] = {1, 5, 1, 64}; VALIDATE(model.addTensor("tts_dp_sentence_encoder_convnext_convnext_4_dwconv_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_dp_sentence_encoder_convnext_convnext_4_dwconv_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0049884109757841f, .offset= -130}}}, .rank= 4, .dimensions=dimensions_tts_dp_sentence_encoder_convnext_convnext_4_dwconv_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_dp_sentence_encoder_convnext_convnext_4_dwconv_weight), .dataSize=BINLEN(tts_dp_sentence_encoder_convnext_convnext_4_dwconv_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_dp_sentence_encoder_convnext_convnext_4_dwconv_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_dp_sentence_encoder_convnext_convnext_4_dwconv_bias[] = {64}; VALIDATE(model.addTensor("tts_dp_sentence_encoder_convnext_convnext_4_dwconv_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_dp_sentence_encoder_convnext_convnext_4_dwconv_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0010519382776693f, .offset= -139}}}, .rank= 1, .dimensions=dimensions_tts_dp_sentence_encoder_convnext_convnext_4_dwconv_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_dp_sentence_encoder_convnext_convnext_4_dwconv_bias), .dataSize=BINLEN(tts_dp_sentence_encoder_convnext_convnext_4_dwconv_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__sentence_encoder_convnext_convnext_4_dwconv_Conv_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_convnext_convnext_4_dwconv_Conv_2d */ uint32_t dimensions__sentence_encoder_convnext_convnext_4_dwconv_Conv_2d_dilation[] = {2}; uint32_t _sentence_encoder_convnext_convnext_4_dwconv_Conv_2d_dilation[] = {1, 1}; uint32_t dimensions__sentence_encoder_convnext_convnext_4_dwconv_Conv_2d_pad_amount[] = {2, 2}; uint32_t _sentence_encoder_convnext_convnext_4_dwconv_Conv_2d_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions__sentence_encoder_convnext_convnext_4_dwconv_Conv_2d_stride[] = {2}; uint32_t _sentence_encoder_convnext_convnext_4_dwconv_Conv_2d_stride[] = {1, 1}; Qnn_Param_t params__sentence_encoder_convnext_convnext_4_dwconv_Conv_2d[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_4_dwconv_Conv_2d_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__sentence_encoder_convnext_convnext_4_dwconv_Conv_2d_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_convnext_convnext_4_dwconv_Conv_2d_dilation, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_4_dwconv_Conv_2d_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__sentence_encoder_convnext_convnext_4_dwconv_Conv_2d_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_convnext_convnext_4_dwconv_Conv_2d_pad_amount, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_4_dwconv_Conv_2d_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__sentence_encoder_convnext_convnext_4_dwconv_Conv_2d_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_convnext_convnext_4_dwconv_Conv_2d_stride, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__sentence_encoder_convnext_convnext_4_dwconv_Conv_2d[] = { "_sentence_encoder_convnext_convnext_4_dwconv_Conv_reshape_to_2d_nhwc", "tts_dp_sentence_encoder_convnext_convnext_4_dwconv_weight", "tts_dp_sentence_encoder_convnext_convnext_4_dwconv_bias" }; uint32_t dimensions__sentence_encoder_convnext_convnext_4_dwconv_Conv_intermediate[] = {1, 1, 129, 64}; Qnn_Tensor_t outputs__sentence_encoder_convnext_convnext_4_dwconv_Conv_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_4_dwconv_Conv_intermediate", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000460280098196f, .offset= -28080}}}, .rank= 4, .dimensions=dimensions__sentence_encoder_convnext_convnext_4_dwconv_Conv_intermediate, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_convnext_convnext_4_dwconv_Conv_2d", // Node Name "qti.aisw", // Package Name "DepthWiseConv2d", // Qnn Node Type params__sentence_encoder_convnext_convnext_4_dwconv_Conv_2d, // Node Params 3, // Num Node Params inputs__sentence_encoder_convnext_convnext_4_dwconv_Conv_2d, // Input Tensor Names 3, // Num Input Tensor Names outputs__sentence_encoder_convnext_convnext_4_dwconv_Conv_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_convnext_convnext_4_dwconv_Conv_intermediate_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_convnext_convnext_4_dwconv_Conv_intermediate_nchw */ uint32_t dimensions__sentence_encoder_convnext_convnext_4_dwconv_Conv_intermediate_nchw_perm[] = {4}; uint32_t _sentence_encoder_convnext_convnext_4_dwconv_Conv_intermediate_nchw_perm[] = {0, 3, 1, 2}; Qnn_Param_t params__sentence_encoder_convnext_convnext_4_dwconv_Conv_intermediate_nchw[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_4_dwconv_Conv_intermediate_nchw_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__sentence_encoder_convnext_convnext_4_dwconv_Conv_intermediate_nchw_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_convnext_convnext_4_dwconv_Conv_intermediate_nchw_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__sentence_encoder_convnext_convnext_4_dwconv_Conv_intermediate_nchw[] = { "_sentence_encoder_convnext_convnext_4_dwconv_Conv_intermediate" }; uint32_t dimensions__sentence_encoder_convnext_convnext_4_dwconv_Conv_intermediate_nchw[] = {1, 64, 1, 129}; Qnn_Tensor_t outputs__sentence_encoder_convnext_convnext_4_dwconv_Conv_intermediate_nchw[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_4_dwconv_Conv_intermediate_nchw", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000460280098196f, .offset= -28080}}}, .rank= 4, .dimensions=dimensions__sentence_encoder_convnext_convnext_4_dwconv_Conv_intermediate_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_convnext_convnext_4_dwconv_Conv_intermediate_nchw", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__sentence_encoder_convnext_convnext_4_dwconv_Conv_intermediate_nchw, // Node Params 1, // Num Node Params inputs__sentence_encoder_convnext_convnext_4_dwconv_Conv_intermediate_nchw, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_convnext_convnext_4_dwconv_Conv_intermediate_nchw, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_convnext_convnext_4_dwconv_Conv_intermediate(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_convnext_convnext_4_dwconv_Conv_intermediate */ const char* inputs__sentence_encoder_convnext_convnext_4_dwconv_Conv_intermediate[] = { "_sentence_encoder_convnext_convnext_4_dwconv_Conv_intermediate_nchw" }; uint32_t dimensions__sentence_encoder_convnext_convnext_4_dwconv_Conv_output_0[] = {1, 64, 129}; Qnn_Tensor_t outputs__sentence_encoder_convnext_convnext_4_dwconv_Conv_intermediate[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_4_dwconv_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000460280098196f, .offset= -28080}}}, .rank= 3, .dimensions=dimensions__sentence_encoder_convnext_convnext_4_dwconv_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_convnext_convnext_4_dwconv_Conv_intermediate", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__sentence_encoder_convnext_convnext_4_dwconv_Conv_intermediate, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_convnext_convnext_4_dwconv_Conv_intermediate, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_convnext_convnext_4_dwconv_Conv_output_0_nfc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_convnext_convnext_4_dwconv_Conv_output_0_nfc */ uint32_t dimensions__sentence_encoder_convnext_convnext_4_dwconv_Conv_output_0_nfc_perm[] = {3}; uint32_t _sentence_encoder_convnext_convnext_4_dwconv_Conv_output_0_nfc_perm[] = {0, 2, 1}; Qnn_Param_t params__sentence_encoder_convnext_convnext_4_dwconv_Conv_output_0_nfc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_4_dwconv_Conv_output_0_nfc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__sentence_encoder_convnext_convnext_4_dwconv_Conv_output_0_nfc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_convnext_convnext_4_dwconv_Conv_output_0_nfc_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__sentence_encoder_convnext_convnext_4_dwconv_Conv_output_0_nfc[] = { "_sentence_encoder_convnext_convnext_4_dwconv_Conv_output_0" }; uint32_t dimensions__sentence_encoder_convnext_convnext_4_dwconv_Conv_output_0_nfc[] = {1, 129, 64}; Qnn_Tensor_t outputs__sentence_encoder_convnext_convnext_4_dwconv_Conv_output_0_nfc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_4_dwconv_Conv_output_0_nfc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000460280098196f, .offset= -28080}}}, .rank= 3, .dimensions=dimensions__sentence_encoder_convnext_convnext_4_dwconv_Conv_output_0_nfc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_convnext_convnext_4_dwconv_Conv_output_0_nfc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__sentence_encoder_convnext_convnext_4_dwconv_Conv_output_0_nfc, // Node Params 1, // Num Node Params inputs__sentence_encoder_convnext_convnext_4_dwconv_Conv_output_0_nfc, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_convnext_convnext_4_dwconv_Conv_output_0_nfc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_convnext_convnext_4_Mul_1(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_convnext_convnext_4_Mul_1 */ Qnn_Param_t params__sentence_encoder_convnext_convnext_4_Mul_1[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__sentence_encoder_convnext_convnext_4_Mul_1[] = { "_sentence_encoder_convnext_convnext_4_dwconv_Conv_output_0_nfc", "_sentence_encoder_Concat_2_output_0" }; uint32_t dimensions__sentence_encoder_convnext_convnext_4_norm_Transpose_output_0[] = {1, 129, 64}; Qnn_Tensor_t outputs__sentence_encoder_convnext_convnext_4_Mul_1[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_4_norm_Transpose_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000460280098196f, .offset= -28080}}}, .rank= 3, .dimensions=dimensions__sentence_encoder_convnext_convnext_4_norm_Transpose_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_convnext_convnext_4_Mul_1", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__sentence_encoder_convnext_convnext_4_Mul_1, // Node Params 1, // Num Node Params inputs__sentence_encoder_convnext_convnext_4_Mul_1, // Input Tensor Names 2, // Num Input Tensor Names outputs__sentence_encoder_convnext_convnext_4_Mul_1, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_dp_sentence_encoder_convnext_convnext_4_norm_norm_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_dp_sentence_encoder_convnext_convnext_4_norm_norm_weight[] = {64}; VALIDATE(model.addTensor("tts_dp_sentence_encoder_convnext_convnext_4_norm_norm_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_dp_sentence_encoder_convnext_convnext_4_norm_norm_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0035300324670970f, .offset= 0}}}, .rank= 1, .dimensions=dimensions_tts_dp_sentence_encoder_convnext_convnext_4_norm_norm_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_dp_sentence_encoder_convnext_convnext_4_norm_norm_weight), .dataSize=BINLEN(tts_dp_sentence_encoder_convnext_convnext_4_norm_norm_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_dp_sentence_encoder_convnext_convnext_4_norm_norm_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_dp_sentence_encoder_convnext_convnext_4_norm_norm_bias[] = {64}; VALIDATE(model.addTensor("tts_dp_sentence_encoder_convnext_convnext_4_norm_norm_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_dp_sentence_encoder_convnext_convnext_4_norm_norm_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0020046103745699f, .offset= -159}}}, .rank= 1, .dimensions=dimensions_tts_dp_sentence_encoder_convnext_convnext_4_norm_norm_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_dp_sentence_encoder_convnext_convnext_4_norm_norm_bias), .dataSize=BINLEN(tts_dp_sentence_encoder_convnext_convnext_4_norm_norm_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__sentence_encoder_convnext_convnext_4_norm_norm_LayerNormalization(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_convnext_convnext_4_norm_norm_LayerNormalization */ uint32_t dimensions__sentence_encoder_convnext_convnext_4_norm_norm_LayerNormalization_axes[] = {1}; uint32_t _sentence_encoder_convnext_convnext_4_norm_norm_LayerNormalization_axes[] = {2}; Qnn_Param_t params__sentence_encoder_convnext_convnext_4_norm_norm_LayerNormalization[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="axes", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_4_norm_norm_LayerNormalization_axes", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__sentence_encoder_convnext_convnext_4_norm_norm_LayerNormalization_axes, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_convnext_convnext_4_norm_norm_LayerNormalization_axes, .dataSize=4}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="epsilon", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_FLOAT_32, {.floatValue = 0.000001000000f}}}} }; const char* inputs__sentence_encoder_convnext_convnext_4_norm_norm_LayerNormalization[] = { "_sentence_encoder_convnext_convnext_4_norm_Transpose_output_0", "tts_dp_sentence_encoder_convnext_convnext_4_norm_norm_weight", "tts_dp_sentence_encoder_convnext_convnext_4_norm_norm_bias" }; uint32_t dimensions__sentence_encoder_convnext_convnext_4_norm_Transpose_1_output_0[] = {1, 129, 64}; Qnn_Tensor_t outputs__sentence_encoder_convnext_convnext_4_norm_norm_LayerNormalization[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_4_norm_Transpose_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000758997266530f, .offset= -25952}}}, .rank= 3, .dimensions=dimensions__sentence_encoder_convnext_convnext_4_norm_Transpose_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_convnext_convnext_4_norm_norm_LayerNormalization", // Node Name "qti.aisw", // Package Name "LayerNorm", // Qnn Node Type params__sentence_encoder_convnext_convnext_4_norm_norm_LayerNormalization, // Node Params 2, // Num Node Params inputs__sentence_encoder_convnext_convnext_4_norm_norm_LayerNormalization, // Input Tensor Names 3, // Num Input Tensor Names outputs__sentence_encoder_convnext_convnext_4_norm_norm_LayerNormalization, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_convnext_convnext_4_norm_Transpose_1_output_0_ncf(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_convnext_convnext_4_norm_Transpose_1_output_0_ncf */ uint32_t dimensions__sentence_encoder_convnext_convnext_4_norm_Transpose_1_output_0_ncf_perm[] = {3}; uint32_t _sentence_encoder_convnext_convnext_4_norm_Transpose_1_output_0_ncf_perm[] = {0, 2, 1}; Qnn_Param_t params__sentence_encoder_convnext_convnext_4_norm_Transpose_1_output_0_ncf[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_4_norm_Transpose_1_output_0_ncf_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__sentence_encoder_convnext_convnext_4_norm_Transpose_1_output_0_ncf_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_convnext_convnext_4_norm_Transpose_1_output_0_ncf_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__sentence_encoder_convnext_convnext_4_norm_Transpose_1_output_0_ncf[] = { "_sentence_encoder_convnext_convnext_4_norm_Transpose_1_output_0" }; uint32_t dimensions__sentence_encoder_convnext_convnext_4_norm_Transpose_1_output_0_ncf[] = {1, 64, 129}; Qnn_Tensor_t outputs__sentence_encoder_convnext_convnext_4_norm_Transpose_1_output_0_ncf[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_4_norm_Transpose_1_output_0_ncf", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000758997266530f, .offset= -25952}}}, .rank= 3, .dimensions=dimensions__sentence_encoder_convnext_convnext_4_norm_Transpose_1_output_0_ncf, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_convnext_convnext_4_norm_Transpose_1_output_0_ncf", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__sentence_encoder_convnext_convnext_4_norm_Transpose_1_output_0_ncf, // Node Params 1, // Num Node Params inputs__sentence_encoder_convnext_convnext_4_norm_Transpose_1_output_0_ncf, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_convnext_convnext_4_norm_Transpose_1_output_0_ncf, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_convnext_convnext_4_pwconv1_Conv_reshape_to_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_convnext_convnext_4_pwconv1_Conv_reshape_to_2d */ const char* inputs__sentence_encoder_convnext_convnext_4_pwconv1_Conv_reshape_to_2d[] = { "_sentence_encoder_convnext_convnext_4_norm_Transpose_1_output_0_ncf" }; uint32_t dimensions__sentence_encoder_convnext_convnext_4_pwconv1_Conv_reshape_to_2d[] = {1, 64, 1, 129}; Qnn_Tensor_t outputs__sentence_encoder_convnext_convnext_4_pwconv1_Conv_reshape_to_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_4_pwconv1_Conv_reshape_to_2d", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000758997266530f, .offset= -25952}}}, .rank= 4, .dimensions=dimensions__sentence_encoder_convnext_convnext_4_pwconv1_Conv_reshape_to_2d, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_convnext_convnext_4_pwconv1_Conv_reshape_to_2d", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__sentence_encoder_convnext_convnext_4_pwconv1_Conv_reshape_to_2d, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_convnext_convnext_4_pwconv1_Conv_reshape_to_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_convnext_convnext_4_pwconv1_Conv_reshape_to_2d_nhwc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_convnext_convnext_4_pwconv1_Conv_reshape_to_2d_nhwc */ uint32_t dimensions__sentence_encoder_convnext_convnext_4_pwconv1_Conv_reshape_to_2d_nhwc_perm[] = {4}; uint32_t _sentence_encoder_convnext_convnext_4_pwconv1_Conv_reshape_to_2d_nhwc_perm[] = {0, 2, 3, 1}; Qnn_Param_t params__sentence_encoder_convnext_convnext_4_pwconv1_Conv_reshape_to_2d_nhwc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_4_pwconv1_Conv_reshape_to_2d_nhwc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__sentence_encoder_convnext_convnext_4_pwconv1_Conv_reshape_to_2d_nhwc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_convnext_convnext_4_pwconv1_Conv_reshape_to_2d_nhwc_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__sentence_encoder_convnext_convnext_4_pwconv1_Conv_reshape_to_2d_nhwc[] = { "_sentence_encoder_convnext_convnext_4_pwconv1_Conv_reshape_to_2d" }; uint32_t dimensions__sentence_encoder_convnext_convnext_4_pwconv1_Conv_reshape_to_2d_nhwc[] = {1, 1, 129, 64}; Qnn_Tensor_t outputs__sentence_encoder_convnext_convnext_4_pwconv1_Conv_reshape_to_2d_nhwc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_4_pwconv1_Conv_reshape_to_2d_nhwc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000758997266530f, .offset= -25952}}}, .rank= 4, .dimensions=dimensions__sentence_encoder_convnext_convnext_4_pwconv1_Conv_reshape_to_2d_nhwc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_convnext_convnext_4_pwconv1_Conv_reshape_to_2d_nhwc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__sentence_encoder_convnext_convnext_4_pwconv1_Conv_reshape_to_2d_nhwc, // Node Params 1, // Num Node Params inputs__sentence_encoder_convnext_convnext_4_pwconv1_Conv_reshape_to_2d_nhwc, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_convnext_convnext_4_pwconv1_Conv_reshape_to_2d_nhwc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_dp_sentence_encoder_convnext_convnext_4_pwconv1_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_dp_sentence_encoder_convnext_convnext_4_pwconv1_weight[] = {1, 1, 64, 256}; VALIDATE(model.addTensor("tts_dp_sentence_encoder_convnext_convnext_4_pwconv1_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_dp_sentence_encoder_convnext_convnext_4_pwconv1_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0046466593630612f, .offset= -129}}}, .rank= 4, .dimensions=dimensions_tts_dp_sentence_encoder_convnext_convnext_4_pwconv1_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_dp_sentence_encoder_convnext_convnext_4_pwconv1_weight), .dataSize=BINLEN(tts_dp_sentence_encoder_convnext_convnext_4_pwconv1_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_dp_sentence_encoder_convnext_convnext_4_pwconv1_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_dp_sentence_encoder_convnext_convnext_4_pwconv1_bias[] = {256}; VALIDATE(model.addTensor("tts_dp_sentence_encoder_convnext_convnext_4_pwconv1_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_dp_sentence_encoder_convnext_convnext_4_pwconv1_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0023837212938815f, .offset= -229}}}, .rank= 1, .dimensions=dimensions_tts_dp_sentence_encoder_convnext_convnext_4_pwconv1_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_dp_sentence_encoder_convnext_convnext_4_pwconv1_bias), .dataSize=BINLEN(tts_dp_sentence_encoder_convnext_convnext_4_pwconv1_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__sentence_encoder_convnext_convnext_4_pwconv1_Conv_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_convnext_convnext_4_pwconv1_Conv_2d */ uint32_t dimensions__sentence_encoder_convnext_convnext_4_pwconv1_Conv_2d_dilation[] = {2}; uint32_t _sentence_encoder_convnext_convnext_4_pwconv1_Conv_2d_dilation[] = {1, 1}; uint32_t dimensions__sentence_encoder_convnext_convnext_4_pwconv1_Conv_2d_pad_amount[] = {2, 2}; uint32_t _sentence_encoder_convnext_convnext_4_pwconv1_Conv_2d_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions__sentence_encoder_convnext_convnext_4_pwconv1_Conv_2d_stride[] = {2}; uint32_t _sentence_encoder_convnext_convnext_4_pwconv1_Conv_2d_stride[] = {1, 1}; Qnn_Param_t params__sentence_encoder_convnext_convnext_4_pwconv1_Conv_2d[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_4_pwconv1_Conv_2d_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__sentence_encoder_convnext_convnext_4_pwconv1_Conv_2d_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_convnext_convnext_4_pwconv1_Conv_2d_dilation, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_4_pwconv1_Conv_2d_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__sentence_encoder_convnext_convnext_4_pwconv1_Conv_2d_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_convnext_convnext_4_pwconv1_Conv_2d_pad_amount, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_4_pwconv1_Conv_2d_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__sentence_encoder_convnext_convnext_4_pwconv1_Conv_2d_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_convnext_convnext_4_pwconv1_Conv_2d_stride, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="group", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="reuse_sparse_indices", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}} }; const char* inputs__sentence_encoder_convnext_convnext_4_pwconv1_Conv_2d[] = { "_sentence_encoder_convnext_convnext_4_pwconv1_Conv_reshape_to_2d_nhwc", "tts_dp_sentence_encoder_convnext_convnext_4_pwconv1_weight", "tts_dp_sentence_encoder_convnext_convnext_4_pwconv1_bias" }; uint32_t dimensions__sentence_encoder_convnext_convnext_4_pwconv1_Conv_intermediate[] = {1, 1, 129, 256}; Qnn_Tensor_t outputs__sentence_encoder_convnext_convnext_4_pwconv1_Conv_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_4_pwconv1_Conv_intermediate", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000904673870536f, .offset= -49324}}}, .rank= 4, .dimensions=dimensions__sentence_encoder_convnext_convnext_4_pwconv1_Conv_intermediate, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_convnext_convnext_4_pwconv1_Conv_2d", // Node Name "qti.aisw", // Package Name "Conv2d", // Qnn Node Type params__sentence_encoder_convnext_convnext_4_pwconv1_Conv_2d, // Node Params 5, // Num Node Params inputs__sentence_encoder_convnext_convnext_4_pwconv1_Conv_2d, // Input Tensor Names 3, // Num Input Tensor Names outputs__sentence_encoder_convnext_convnext_4_pwconv1_Conv_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_convnext_convnext_4_pwconv1_Conv_intermediate_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_convnext_convnext_4_pwconv1_Conv_intermediate_nchw */ uint32_t dimensions__sentence_encoder_convnext_convnext_4_pwconv1_Conv_intermediate_nchw_perm[] = {4}; uint32_t _sentence_encoder_convnext_convnext_4_pwconv1_Conv_intermediate_nchw_perm[] = {0, 3, 1, 2}; Qnn_Param_t params__sentence_encoder_convnext_convnext_4_pwconv1_Conv_intermediate_nchw[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_4_pwconv1_Conv_intermediate_nchw_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__sentence_encoder_convnext_convnext_4_pwconv1_Conv_intermediate_nchw_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_convnext_convnext_4_pwconv1_Conv_intermediate_nchw_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__sentence_encoder_convnext_convnext_4_pwconv1_Conv_intermediate_nchw[] = { "_sentence_encoder_convnext_convnext_4_pwconv1_Conv_intermediate" }; uint32_t dimensions__sentence_encoder_convnext_convnext_4_pwconv1_Conv_intermediate_nchw[] = {1, 256, 1, 129}; Qnn_Tensor_t outputs__sentence_encoder_convnext_convnext_4_pwconv1_Conv_intermediate_nchw[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_4_pwconv1_Conv_intermediate_nchw", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000904673870536f, .offset= -49324}}}, .rank= 4, .dimensions=dimensions__sentence_encoder_convnext_convnext_4_pwconv1_Conv_intermediate_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_convnext_convnext_4_pwconv1_Conv_intermediate_nchw", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__sentence_encoder_convnext_convnext_4_pwconv1_Conv_intermediate_nchw, // Node Params 1, // Num Node Params inputs__sentence_encoder_convnext_convnext_4_pwconv1_Conv_intermediate_nchw, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_convnext_convnext_4_pwconv1_Conv_intermediate_nchw, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_convnext_convnext_4_pwconv1_Conv_intermediate(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_convnext_convnext_4_pwconv1_Conv_intermediate */ const char* inputs__sentence_encoder_convnext_convnext_4_pwconv1_Conv_intermediate[] = { "_sentence_encoder_convnext_convnext_4_pwconv1_Conv_intermediate_nchw" }; uint32_t dimensions__sentence_encoder_convnext_convnext_4_pwconv1_Conv_output_0[] = {1, 256, 129}; Qnn_Tensor_t outputs__sentence_encoder_convnext_convnext_4_pwconv1_Conv_intermediate[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_4_pwconv1_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000904673870536f, .offset= -49324}}}, .rank= 3, .dimensions=dimensions__sentence_encoder_convnext_convnext_4_pwconv1_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_convnext_convnext_4_pwconv1_Conv_intermediate", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__sentence_encoder_convnext_convnext_4_pwconv1_Conv_intermediate, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_convnext_convnext_4_pwconv1_Conv_intermediate, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__elementwiseneuron_10(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _elementwiseneuron_10 */ Qnn_Param_t params__elementwiseneuron_10[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}} }; const char* inputs__elementwiseneuron_10[] = { "_sentence_encoder_convnext_convnext_4_pwconv1_Conv_output_0" }; uint32_t dimensions__sentence_encoder_convnext_convnext_4_act_Mul_1_output_0[] = {1, 256, 129}; Qnn_Tensor_t outputs__elementwiseneuron_10[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_4_act_Mul_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000233782739087f, .offset= -7270}}}, .rank= 3, .dimensions=dimensions__sentence_encoder_convnext_convnext_4_act_Mul_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_elementwiseneuron_10", // Node Name "qti.aisw", // Package Name "ElementWiseNeuron", // Qnn Node Type params__elementwiseneuron_10, // Node Params 1, // Num Node Params inputs__elementwiseneuron_10, // Input Tensor Names 1, // Num Input Tensor Names outputs__elementwiseneuron_10, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_convnext_convnext_4_pwconv2_Conv_reshape_to_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_convnext_convnext_4_pwconv2_Conv_reshape_to_2d */ const char* inputs__sentence_encoder_convnext_convnext_4_pwconv2_Conv_reshape_to_2d[] = { "_sentence_encoder_convnext_convnext_4_act_Mul_1_output_0" }; uint32_t dimensions__sentence_encoder_convnext_convnext_4_pwconv2_Conv_reshape_to_2d[] = {1, 256, 1, 129}; Qnn_Tensor_t outputs__sentence_encoder_convnext_convnext_4_pwconv2_Conv_reshape_to_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_4_pwconv2_Conv_reshape_to_2d", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000233782739087f, .offset= -7270}}}, .rank= 4, .dimensions=dimensions__sentence_encoder_convnext_convnext_4_pwconv2_Conv_reshape_to_2d, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_convnext_convnext_4_pwconv2_Conv_reshape_to_2d", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__sentence_encoder_convnext_convnext_4_pwconv2_Conv_reshape_to_2d, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_convnext_convnext_4_pwconv2_Conv_reshape_to_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_convnext_convnext_4_pwconv2_Conv_reshape_to_2d_nhwc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_convnext_convnext_4_pwconv2_Conv_reshape_to_2d_nhwc */ uint32_t dimensions__sentence_encoder_convnext_convnext_4_pwconv2_Conv_reshape_to_2d_nhwc_perm[] = {4}; uint32_t _sentence_encoder_convnext_convnext_4_pwconv2_Conv_reshape_to_2d_nhwc_perm[] = {0, 2, 3, 1}; Qnn_Param_t params__sentence_encoder_convnext_convnext_4_pwconv2_Conv_reshape_to_2d_nhwc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_4_pwconv2_Conv_reshape_to_2d_nhwc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__sentence_encoder_convnext_convnext_4_pwconv2_Conv_reshape_to_2d_nhwc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_convnext_convnext_4_pwconv2_Conv_reshape_to_2d_nhwc_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__sentence_encoder_convnext_convnext_4_pwconv2_Conv_reshape_to_2d_nhwc[] = { "_sentence_encoder_convnext_convnext_4_pwconv2_Conv_reshape_to_2d" }; uint32_t dimensions__sentence_encoder_convnext_convnext_4_pwconv2_Conv_reshape_to_2d_nhwc[] = {1, 1, 129, 256}; Qnn_Tensor_t outputs__sentence_encoder_convnext_convnext_4_pwconv2_Conv_reshape_to_2d_nhwc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_4_pwconv2_Conv_reshape_to_2d_nhwc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000233782739087f, .offset= -7270}}}, .rank= 4, .dimensions=dimensions__sentence_encoder_convnext_convnext_4_pwconv2_Conv_reshape_to_2d_nhwc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_convnext_convnext_4_pwconv2_Conv_reshape_to_2d_nhwc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__sentence_encoder_convnext_convnext_4_pwconv2_Conv_reshape_to_2d_nhwc, // Node Params 1, // Num Node Params inputs__sentence_encoder_convnext_convnext_4_pwconv2_Conv_reshape_to_2d_nhwc, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_convnext_convnext_4_pwconv2_Conv_reshape_to_2d_nhwc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_dp_sentence_encoder_convnext_convnext_4_pwconv2_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_dp_sentence_encoder_convnext_convnext_4_pwconv2_weight[] = {1, 1, 256, 64}; VALIDATE(model.addTensor("tts_dp_sentence_encoder_convnext_convnext_4_pwconv2_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_dp_sentence_encoder_convnext_convnext_4_pwconv2_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0034159286879003f, .offset= -111}}}, .rank= 4, .dimensions=dimensions_tts_dp_sentence_encoder_convnext_convnext_4_pwconv2_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_dp_sentence_encoder_convnext_convnext_4_pwconv2_weight), .dataSize=BINLEN(tts_dp_sentence_encoder_convnext_convnext_4_pwconv2_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_dp_sentence_encoder_convnext_convnext_4_pwconv2_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_dp_sentence_encoder_convnext_convnext_4_pwconv2_bias[] = {64}; VALIDATE(model.addTensor("tts_dp_sentence_encoder_convnext_convnext_4_pwconv2_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_dp_sentence_encoder_convnext_convnext_4_pwconv2_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0011897583026439f, .offset= -117}}}, .rank= 1, .dimensions=dimensions_tts_dp_sentence_encoder_convnext_convnext_4_pwconv2_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_dp_sentence_encoder_convnext_convnext_4_pwconv2_bias), .dataSize=BINLEN(tts_dp_sentence_encoder_convnext_convnext_4_pwconv2_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__sentence_encoder_convnext_convnext_4_pwconv2_Conv_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_convnext_convnext_4_pwconv2_Conv_2d */ uint32_t dimensions__sentence_encoder_convnext_convnext_4_pwconv2_Conv_2d_dilation[] = {2}; uint32_t _sentence_encoder_convnext_convnext_4_pwconv2_Conv_2d_dilation[] = {1, 1}; uint32_t dimensions__sentence_encoder_convnext_convnext_4_pwconv2_Conv_2d_pad_amount[] = {2, 2}; uint32_t _sentence_encoder_convnext_convnext_4_pwconv2_Conv_2d_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions__sentence_encoder_convnext_convnext_4_pwconv2_Conv_2d_stride[] = {2}; uint32_t _sentence_encoder_convnext_convnext_4_pwconv2_Conv_2d_stride[] = {1, 1}; Qnn_Param_t params__sentence_encoder_convnext_convnext_4_pwconv2_Conv_2d[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_4_pwconv2_Conv_2d_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__sentence_encoder_convnext_convnext_4_pwconv2_Conv_2d_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_convnext_convnext_4_pwconv2_Conv_2d_dilation, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_4_pwconv2_Conv_2d_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__sentence_encoder_convnext_convnext_4_pwconv2_Conv_2d_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_convnext_convnext_4_pwconv2_Conv_2d_pad_amount, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_4_pwconv2_Conv_2d_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__sentence_encoder_convnext_convnext_4_pwconv2_Conv_2d_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_convnext_convnext_4_pwconv2_Conv_2d_stride, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="group", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="reuse_sparse_indices", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}} }; const char* inputs__sentence_encoder_convnext_convnext_4_pwconv2_Conv_2d[] = { "_sentence_encoder_convnext_convnext_4_pwconv2_Conv_reshape_to_2d_nhwc", "tts_dp_sentence_encoder_convnext_convnext_4_pwconv2_weight", "tts_dp_sentence_encoder_convnext_convnext_4_pwconv2_bias" }; uint32_t dimensions__sentence_encoder_convnext_convnext_4_pwconv2_Conv_intermediate[] = {1, 1, 129, 64}; Qnn_Tensor_t outputs__sentence_encoder_convnext_convnext_4_pwconv2_Conv_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_4_pwconv2_Conv_intermediate", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000532267222297f, .offset= -27462}}}, .rank= 4, .dimensions=dimensions__sentence_encoder_convnext_convnext_4_pwconv2_Conv_intermediate, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_convnext_convnext_4_pwconv2_Conv_2d", // Node Name "qti.aisw", // Package Name "Conv2d", // Qnn Node Type params__sentence_encoder_convnext_convnext_4_pwconv2_Conv_2d, // Node Params 5, // Num Node Params inputs__sentence_encoder_convnext_convnext_4_pwconv2_Conv_2d, // Input Tensor Names 3, // Num Input Tensor Names outputs__sentence_encoder_convnext_convnext_4_pwconv2_Conv_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_convnext_convnext_4_pwconv2_Conv_intermediate_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_convnext_convnext_4_pwconv2_Conv_intermediate_nchw */ uint32_t dimensions__sentence_encoder_convnext_convnext_4_pwconv2_Conv_intermediate_nchw_perm[] = {4}; uint32_t _sentence_encoder_convnext_convnext_4_pwconv2_Conv_intermediate_nchw_perm[] = {0, 3, 1, 2}; Qnn_Param_t params__sentence_encoder_convnext_convnext_4_pwconv2_Conv_intermediate_nchw[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_4_pwconv2_Conv_intermediate_nchw_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__sentence_encoder_convnext_convnext_4_pwconv2_Conv_intermediate_nchw_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_convnext_convnext_4_pwconv2_Conv_intermediate_nchw_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__sentence_encoder_convnext_convnext_4_pwconv2_Conv_intermediate_nchw[] = { "_sentence_encoder_convnext_convnext_4_pwconv2_Conv_intermediate" }; uint32_t dimensions__sentence_encoder_convnext_convnext_4_pwconv2_Conv_intermediate_nchw[] = {1, 64, 1, 129}; Qnn_Tensor_t outputs__sentence_encoder_convnext_convnext_4_pwconv2_Conv_intermediate_nchw[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_4_pwconv2_Conv_intermediate_nchw", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000532267222297f, .offset= -27462}}}, .rank= 4, .dimensions=dimensions__sentence_encoder_convnext_convnext_4_pwconv2_Conv_intermediate_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_convnext_convnext_4_pwconv2_Conv_intermediate_nchw", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__sentence_encoder_convnext_convnext_4_pwconv2_Conv_intermediate_nchw, // Node Params 1, // Num Node Params inputs__sentence_encoder_convnext_convnext_4_pwconv2_Conv_intermediate_nchw, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_convnext_convnext_4_pwconv2_Conv_intermediate_nchw, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_convnext_convnext_4_pwconv2_Conv_intermediate(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_convnext_convnext_4_pwconv2_Conv_intermediate */ const char* inputs__sentence_encoder_convnext_convnext_4_pwconv2_Conv_intermediate[] = { "_sentence_encoder_convnext_convnext_4_pwconv2_Conv_intermediate_nchw" }; uint32_t dimensions__sentence_encoder_convnext_convnext_4_pwconv2_Conv_output_0[] = {1, 64, 129}; Qnn_Tensor_t outputs__sentence_encoder_convnext_convnext_4_pwconv2_Conv_intermediate[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_4_pwconv2_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000532267222297f, .offset= -27462}}}, .rank= 3, .dimensions=dimensions__sentence_encoder_convnext_convnext_4_pwconv2_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_convnext_convnext_4_pwconv2_Conv_intermediate", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__sentence_encoder_convnext_convnext_4_pwconv2_Conv_intermediate, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_convnext_convnext_4_pwconv2_Conv_intermediate, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_convnext_convnext_4_pwconv2_Conv_output_0_nfc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_convnext_convnext_4_pwconv2_Conv_output_0_nfc */ uint32_t dimensions__sentence_encoder_convnext_convnext_4_pwconv2_Conv_output_0_nfc_perm[] = {3}; uint32_t _sentence_encoder_convnext_convnext_4_pwconv2_Conv_output_0_nfc_perm[] = {0, 2, 1}; Qnn_Param_t params__sentence_encoder_convnext_convnext_4_pwconv2_Conv_output_0_nfc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_4_pwconv2_Conv_output_0_nfc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__sentence_encoder_convnext_convnext_4_pwconv2_Conv_output_0_nfc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_convnext_convnext_4_pwconv2_Conv_output_0_nfc_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__sentence_encoder_convnext_convnext_4_pwconv2_Conv_output_0_nfc[] = { "_sentence_encoder_convnext_convnext_4_pwconv2_Conv_output_0" }; uint32_t dimensions__sentence_encoder_convnext_convnext_4_pwconv2_Conv_output_0_nfc[] = {1, 129, 64}; Qnn_Tensor_t outputs__sentence_encoder_convnext_convnext_4_pwconv2_Conv_output_0_nfc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_4_pwconv2_Conv_output_0_nfc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000532267222297f, .offset= -27462}}}, .rank= 3, .dimensions=dimensions__sentence_encoder_convnext_convnext_4_pwconv2_Conv_output_0_nfc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_convnext_convnext_4_pwconv2_Conv_output_0_nfc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__sentence_encoder_convnext_convnext_4_pwconv2_Conv_output_0_nfc, // Node Params 1, // Num Node Params inputs__sentence_encoder_convnext_convnext_4_pwconv2_Conv_output_0_nfc, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_convnext_convnext_4_pwconv2_Conv_output_0_nfc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_dp_sentence_encoder_convnext_convnext_4_gamma(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_dp_sentence_encoder_convnext_convnext_4_gamma[] = {1, 1, 64}; VALIDATE(model.addTensor("tts_dp_sentence_encoder_convnext_convnext_4_gamma", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_dp_sentence_encoder_convnext_convnext_4_gamma", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000015449254533f, .offset= -3573}}}, .rank= 3, .dimensions=dimensions_tts_dp_sentence_encoder_convnext_convnext_4_gamma, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_dp_sentence_encoder_convnext_convnext_4_gamma), .dataSize=BINLEN(tts_dp_sentence_encoder_convnext_convnext_4_gamma)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__sentence_encoder_convnext_convnext_4_Mul_2(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_convnext_convnext_4_Mul_2 */ Qnn_Param_t params__sentence_encoder_convnext_convnext_4_Mul_2[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__sentence_encoder_convnext_convnext_4_Mul_2[] = { "tts_dp_sentence_encoder_convnext_convnext_4_gamma", "_sentence_encoder_convnext_convnext_4_pwconv2_Conv_output_0_nfc" }; uint32_t dimensions__sentence_encoder_convnext_convnext_4_Mul_2_output_0[] = {1, 129, 64}; Qnn_Tensor_t outputs__sentence_encoder_convnext_convnext_4_Mul_2[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_4_Mul_2_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000045368997235f, .offset= -30841}}}, .rank= 3, .dimensions=dimensions__sentence_encoder_convnext_convnext_4_Mul_2_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_convnext_convnext_4_Mul_2", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__sentence_encoder_convnext_convnext_4_Mul_2, // Node Params 1, // Num Node Params inputs__sentence_encoder_convnext_convnext_4_Mul_2, // Input Tensor Names 2, // Num Input Tensor Names outputs__sentence_encoder_convnext_convnext_4_Mul_2, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_convnext_convnext_4_Add(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_convnext_convnext_4_Add */ Qnn_Param_t params__sentence_encoder_convnext_convnext_4_Add[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}} }; const char* inputs__sentence_encoder_convnext_convnext_4_Add[] = { "_sentence_encoder_convnext_convnext_4_Mul_output_0", "_sentence_encoder_convnext_convnext_4_Mul_2_output_0" }; uint32_t dimensions__sentence_encoder_convnext_convnext_4_Add_output_0[] = {1, 129, 64}; Qnn_Tensor_t outputs__sentence_encoder_convnext_convnext_4_Add[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_4_Add_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000551762313989f, .offset= -29697}}}, .rank= 3, .dimensions=dimensions__sentence_encoder_convnext_convnext_4_Add_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_convnext_convnext_4_Add", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__sentence_encoder_convnext_convnext_4_Add, // Node Params 1, // Num Node Params inputs__sentence_encoder_convnext_convnext_4_Add, // Input Tensor Names 2, // Num Input Tensor Names outputs__sentence_encoder_convnext_convnext_4_Add, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_convnext_convnext_4_Mul_3(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_convnext_convnext_4_Mul_3 */ Qnn_Param_t params__sentence_encoder_convnext_convnext_4_Mul_3[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__sentence_encoder_convnext_convnext_4_Mul_3[] = { "_sentence_encoder_convnext_convnext_4_Add_output_0", "_sentence_encoder_Concat_2_output_0" }; uint32_t dimensions__sentence_encoder_convnext_convnext_4_Mul_3_output_0[] = {1, 129, 64}; Qnn_Tensor_t outputs__sentence_encoder_convnext_convnext_4_Mul_3[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_4_Mul_3_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000551762313989f, .offset= -29697}}}, .rank= 3, .dimensions=dimensions__sentence_encoder_convnext_convnext_4_Mul_3_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_convnext_convnext_4_Mul_3", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__sentence_encoder_convnext_convnext_4_Mul_3, // Node Params 1, // Num Node Params inputs__sentence_encoder_convnext_convnext_4_Mul_3, // Input Tensor Names 2, // Num Input Tensor Names outputs__sentence_encoder_convnext_convnext_4_Mul_3, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_convnext_convnext_5_Mul(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_convnext_convnext_5_Mul */ Qnn_Param_t params__sentence_encoder_convnext_convnext_5_Mul[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__sentence_encoder_convnext_convnext_5_Mul[] = { "_sentence_encoder_convnext_convnext_4_Mul_3_output_0", "_sentence_encoder_Concat_2_output_0" }; uint32_t dimensions__sentence_encoder_convnext_convnext_5_Mul_output_0[] = {1, 129, 64}; Qnn_Tensor_t outputs__sentence_encoder_convnext_convnext_5_Mul[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_5_Mul_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000551762313989f, .offset= -29697}}}, .rank= 3, .dimensions=dimensions__sentence_encoder_convnext_convnext_5_Mul_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_convnext_convnext_5_Mul", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__sentence_encoder_convnext_convnext_5_Mul, // Node Params 1, // Num Node Params inputs__sentence_encoder_convnext_convnext_5_Mul, // Input Tensor Names 2, // Num Input Tensor Names outputs__sentence_encoder_convnext_convnext_5_Mul, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_convnext_convnext_5_dwconv_Pad(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_convnext_convnext_5_dwconv_Pad */ uint32_t dimensions__sentence_encoder_convnext_convnext_5_dwconv_Pad_pad_amount[] = {3, 2}; uint32_t _sentence_encoder_convnext_convnext_5_dwconv_Pad_pad_amount[] = {0, 0, 2, 2, 0, 0}; Qnn_Param_t params__sentence_encoder_convnext_convnext_5_dwconv_Pad[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_5_dwconv_Pad_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__sentence_encoder_convnext_convnext_5_dwconv_Pad_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_convnext_convnext_5_dwconv_Pad_pad_amount, .dataSize=24}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="scheme", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 3}}}} }; const char* inputs__sentence_encoder_convnext_convnext_5_dwconv_Pad[] = { "_sentence_encoder_convnext_convnext_5_Mul_output_0" }; uint32_t dimensions__sentence_encoder_convnext_convnext_5_dwconv_Pad_output_0[] = {1, 133, 64}; Qnn_Tensor_t outputs__sentence_encoder_convnext_convnext_5_dwconv_Pad[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_5_dwconv_Pad_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000551762313989f, .offset= -29697}}}, .rank= 3, .dimensions=dimensions__sentence_encoder_convnext_convnext_5_dwconv_Pad_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_convnext_convnext_5_dwconv_Pad", // Node Name "qti.aisw", // Package Name "Pad", // Qnn Node Type params__sentence_encoder_convnext_convnext_5_dwconv_Pad, // Node Params 2, // Num Node Params inputs__sentence_encoder_convnext_convnext_5_dwconv_Pad, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_convnext_convnext_5_dwconv_Pad, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_convnext_convnext_5_dwconv_Pad_output_0_ncf(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_convnext_convnext_5_dwconv_Pad_output_0_ncf */ uint32_t dimensions__sentence_encoder_convnext_convnext_5_dwconv_Pad_output_0_ncf_perm[] = {3}; uint32_t _sentence_encoder_convnext_convnext_5_dwconv_Pad_output_0_ncf_perm[] = {0, 2, 1}; Qnn_Param_t params__sentence_encoder_convnext_convnext_5_dwconv_Pad_output_0_ncf[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_5_dwconv_Pad_output_0_ncf_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__sentence_encoder_convnext_convnext_5_dwconv_Pad_output_0_ncf_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_convnext_convnext_5_dwconv_Pad_output_0_ncf_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__sentence_encoder_convnext_convnext_5_dwconv_Pad_output_0_ncf[] = { "_sentence_encoder_convnext_convnext_5_dwconv_Pad_output_0" }; uint32_t dimensions__sentence_encoder_convnext_convnext_5_dwconv_Pad_output_0_ncf[] = {1, 64, 133}; Qnn_Tensor_t outputs__sentence_encoder_convnext_convnext_5_dwconv_Pad_output_0_ncf[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_5_dwconv_Pad_output_0_ncf", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000551762313989f, .offset= -29697}}}, .rank= 3, .dimensions=dimensions__sentence_encoder_convnext_convnext_5_dwconv_Pad_output_0_ncf, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_convnext_convnext_5_dwconv_Pad_output_0_ncf", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__sentence_encoder_convnext_convnext_5_dwconv_Pad_output_0_ncf, // Node Params 1, // Num Node Params inputs__sentence_encoder_convnext_convnext_5_dwconv_Pad_output_0_ncf, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_convnext_convnext_5_dwconv_Pad_output_0_ncf, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_convnext_convnext_5_dwconv_Conv_reshape_to_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_convnext_convnext_5_dwconv_Conv_reshape_to_2d */ const char* inputs__sentence_encoder_convnext_convnext_5_dwconv_Conv_reshape_to_2d[] = { "_sentence_encoder_convnext_convnext_5_dwconv_Pad_output_0_ncf" }; uint32_t dimensions__sentence_encoder_convnext_convnext_5_dwconv_Conv_reshape_to_2d[] = {1, 64, 1, 133}; Qnn_Tensor_t outputs__sentence_encoder_convnext_convnext_5_dwconv_Conv_reshape_to_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_5_dwconv_Conv_reshape_to_2d", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000551762313989f, .offset= -29697}}}, .rank= 4, .dimensions=dimensions__sentence_encoder_convnext_convnext_5_dwconv_Conv_reshape_to_2d, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_convnext_convnext_5_dwconv_Conv_reshape_to_2d", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__sentence_encoder_convnext_convnext_5_dwconv_Conv_reshape_to_2d, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_convnext_convnext_5_dwconv_Conv_reshape_to_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_convnext_convnext_5_dwconv_Conv_reshape_to_2d_nhwc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_convnext_convnext_5_dwconv_Conv_reshape_to_2d_nhwc */ uint32_t dimensions__sentence_encoder_convnext_convnext_5_dwconv_Conv_reshape_to_2d_nhwc_perm[] = {4}; uint32_t _sentence_encoder_convnext_convnext_5_dwconv_Conv_reshape_to_2d_nhwc_perm[] = {0, 2, 3, 1}; Qnn_Param_t params__sentence_encoder_convnext_convnext_5_dwconv_Conv_reshape_to_2d_nhwc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_5_dwconv_Conv_reshape_to_2d_nhwc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__sentence_encoder_convnext_convnext_5_dwconv_Conv_reshape_to_2d_nhwc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_convnext_convnext_5_dwconv_Conv_reshape_to_2d_nhwc_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__sentence_encoder_convnext_convnext_5_dwconv_Conv_reshape_to_2d_nhwc[] = { "_sentence_encoder_convnext_convnext_5_dwconv_Conv_reshape_to_2d" }; uint32_t dimensions__sentence_encoder_convnext_convnext_5_dwconv_Conv_reshape_to_2d_nhwc[] = {1, 1, 133, 64}; Qnn_Tensor_t outputs__sentence_encoder_convnext_convnext_5_dwconv_Conv_reshape_to_2d_nhwc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_5_dwconv_Conv_reshape_to_2d_nhwc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000551762313989f, .offset= -29697}}}, .rank= 4, .dimensions=dimensions__sentence_encoder_convnext_convnext_5_dwconv_Conv_reshape_to_2d_nhwc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_convnext_convnext_5_dwconv_Conv_reshape_to_2d_nhwc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__sentence_encoder_convnext_convnext_5_dwconv_Conv_reshape_to_2d_nhwc, // Node Params 1, // Num Node Params inputs__sentence_encoder_convnext_convnext_5_dwconv_Conv_reshape_to_2d_nhwc, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_convnext_convnext_5_dwconv_Conv_reshape_to_2d_nhwc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_dp_sentence_encoder_convnext_convnext_5_dwconv_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_dp_sentence_encoder_convnext_convnext_5_dwconv_weight[] = {1, 5, 1, 64}; VALIDATE(model.addTensor("tts_dp_sentence_encoder_convnext_convnext_5_dwconv_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_dp_sentence_encoder_convnext_convnext_5_dwconv_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0049632163718343f, .offset= -131}}}, .rank= 4, .dimensions=dimensions_tts_dp_sentence_encoder_convnext_convnext_5_dwconv_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_dp_sentence_encoder_convnext_convnext_5_dwconv_weight), .dataSize=BINLEN(tts_dp_sentence_encoder_convnext_convnext_5_dwconv_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_dp_sentence_encoder_convnext_convnext_5_dwconv_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_dp_sentence_encoder_convnext_convnext_5_dwconv_bias[] = {64}; VALIDATE(model.addTensor("tts_dp_sentence_encoder_convnext_convnext_5_dwconv_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_dp_sentence_encoder_convnext_convnext_5_dwconv_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0010680896230042f, .offset= -156}}}, .rank= 1, .dimensions=dimensions_tts_dp_sentence_encoder_convnext_convnext_5_dwconv_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_dp_sentence_encoder_convnext_convnext_5_dwconv_bias), .dataSize=BINLEN(tts_dp_sentence_encoder_convnext_convnext_5_dwconv_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__sentence_encoder_convnext_convnext_5_dwconv_Conv_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_convnext_convnext_5_dwconv_Conv_2d */ uint32_t dimensions__sentence_encoder_convnext_convnext_5_dwconv_Conv_2d_dilation[] = {2}; uint32_t _sentence_encoder_convnext_convnext_5_dwconv_Conv_2d_dilation[] = {1, 1}; uint32_t dimensions__sentence_encoder_convnext_convnext_5_dwconv_Conv_2d_pad_amount[] = {2, 2}; uint32_t _sentence_encoder_convnext_convnext_5_dwconv_Conv_2d_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions__sentence_encoder_convnext_convnext_5_dwconv_Conv_2d_stride[] = {2}; uint32_t _sentence_encoder_convnext_convnext_5_dwconv_Conv_2d_stride[] = {1, 1}; Qnn_Param_t params__sentence_encoder_convnext_convnext_5_dwconv_Conv_2d[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_5_dwconv_Conv_2d_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__sentence_encoder_convnext_convnext_5_dwconv_Conv_2d_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_convnext_convnext_5_dwconv_Conv_2d_dilation, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_5_dwconv_Conv_2d_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__sentence_encoder_convnext_convnext_5_dwconv_Conv_2d_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_convnext_convnext_5_dwconv_Conv_2d_pad_amount, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_5_dwconv_Conv_2d_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__sentence_encoder_convnext_convnext_5_dwconv_Conv_2d_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_convnext_convnext_5_dwconv_Conv_2d_stride, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__sentence_encoder_convnext_convnext_5_dwconv_Conv_2d[] = { "_sentence_encoder_convnext_convnext_5_dwconv_Conv_reshape_to_2d_nhwc", "tts_dp_sentence_encoder_convnext_convnext_5_dwconv_weight", "tts_dp_sentence_encoder_convnext_convnext_5_dwconv_bias" }; uint32_t dimensions__sentence_encoder_convnext_convnext_5_dwconv_Conv_intermediate[] = {1, 1, 129, 64}; Qnn_Tensor_t outputs__sentence_encoder_convnext_convnext_5_dwconv_Conv_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_5_dwconv_Conv_intermediate", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000461362360511f, .offset= -24474}}}, .rank= 4, .dimensions=dimensions__sentence_encoder_convnext_convnext_5_dwconv_Conv_intermediate, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_convnext_convnext_5_dwconv_Conv_2d", // Node Name "qti.aisw", // Package Name "DepthWiseConv2d", // Qnn Node Type params__sentence_encoder_convnext_convnext_5_dwconv_Conv_2d, // Node Params 3, // Num Node Params inputs__sentence_encoder_convnext_convnext_5_dwconv_Conv_2d, // Input Tensor Names 3, // Num Input Tensor Names outputs__sentence_encoder_convnext_convnext_5_dwconv_Conv_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_convnext_convnext_5_dwconv_Conv_intermediate_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_convnext_convnext_5_dwconv_Conv_intermediate_nchw */ uint32_t dimensions__sentence_encoder_convnext_convnext_5_dwconv_Conv_intermediate_nchw_perm[] = {4}; uint32_t _sentence_encoder_convnext_convnext_5_dwconv_Conv_intermediate_nchw_perm[] = {0, 3, 1, 2}; Qnn_Param_t params__sentence_encoder_convnext_convnext_5_dwconv_Conv_intermediate_nchw[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_5_dwconv_Conv_intermediate_nchw_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__sentence_encoder_convnext_convnext_5_dwconv_Conv_intermediate_nchw_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_convnext_convnext_5_dwconv_Conv_intermediate_nchw_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__sentence_encoder_convnext_convnext_5_dwconv_Conv_intermediate_nchw[] = { "_sentence_encoder_convnext_convnext_5_dwconv_Conv_intermediate" }; uint32_t dimensions__sentence_encoder_convnext_convnext_5_dwconv_Conv_intermediate_nchw[] = {1, 64, 1, 129}; Qnn_Tensor_t outputs__sentence_encoder_convnext_convnext_5_dwconv_Conv_intermediate_nchw[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_5_dwconv_Conv_intermediate_nchw", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000461362360511f, .offset= -24474}}}, .rank= 4, .dimensions=dimensions__sentence_encoder_convnext_convnext_5_dwconv_Conv_intermediate_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_convnext_convnext_5_dwconv_Conv_intermediate_nchw", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__sentence_encoder_convnext_convnext_5_dwconv_Conv_intermediate_nchw, // Node Params 1, // Num Node Params inputs__sentence_encoder_convnext_convnext_5_dwconv_Conv_intermediate_nchw, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_convnext_convnext_5_dwconv_Conv_intermediate_nchw, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_convnext_convnext_5_dwconv_Conv_intermediate(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_convnext_convnext_5_dwconv_Conv_intermediate */ const char* inputs__sentence_encoder_convnext_convnext_5_dwconv_Conv_intermediate[] = { "_sentence_encoder_convnext_convnext_5_dwconv_Conv_intermediate_nchw" }; uint32_t dimensions__sentence_encoder_convnext_convnext_5_dwconv_Conv_output_0[] = {1, 64, 129}; Qnn_Tensor_t outputs__sentence_encoder_convnext_convnext_5_dwconv_Conv_intermediate[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_5_dwconv_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000461362360511f, .offset= -24474}}}, .rank= 3, .dimensions=dimensions__sentence_encoder_convnext_convnext_5_dwconv_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_convnext_convnext_5_dwconv_Conv_intermediate", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__sentence_encoder_convnext_convnext_5_dwconv_Conv_intermediate, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_convnext_convnext_5_dwconv_Conv_intermediate, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_convnext_convnext_5_dwconv_Conv_output_0_nfc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_convnext_convnext_5_dwconv_Conv_output_0_nfc */ uint32_t dimensions__sentence_encoder_convnext_convnext_5_dwconv_Conv_output_0_nfc_perm[] = {3}; uint32_t _sentence_encoder_convnext_convnext_5_dwconv_Conv_output_0_nfc_perm[] = {0, 2, 1}; Qnn_Param_t params__sentence_encoder_convnext_convnext_5_dwconv_Conv_output_0_nfc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_5_dwconv_Conv_output_0_nfc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__sentence_encoder_convnext_convnext_5_dwconv_Conv_output_0_nfc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_convnext_convnext_5_dwconv_Conv_output_0_nfc_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__sentence_encoder_convnext_convnext_5_dwconv_Conv_output_0_nfc[] = { "_sentence_encoder_convnext_convnext_5_dwconv_Conv_output_0" }; uint32_t dimensions__sentence_encoder_convnext_convnext_5_dwconv_Conv_output_0_nfc[] = {1, 129, 64}; Qnn_Tensor_t outputs__sentence_encoder_convnext_convnext_5_dwconv_Conv_output_0_nfc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_5_dwconv_Conv_output_0_nfc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000461362360511f, .offset= -24474}}}, .rank= 3, .dimensions=dimensions__sentence_encoder_convnext_convnext_5_dwconv_Conv_output_0_nfc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_convnext_convnext_5_dwconv_Conv_output_0_nfc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__sentence_encoder_convnext_convnext_5_dwconv_Conv_output_0_nfc, // Node Params 1, // Num Node Params inputs__sentence_encoder_convnext_convnext_5_dwconv_Conv_output_0_nfc, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_convnext_convnext_5_dwconv_Conv_output_0_nfc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_convnext_convnext_5_Mul_1(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_convnext_convnext_5_Mul_1 */ Qnn_Param_t params__sentence_encoder_convnext_convnext_5_Mul_1[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__sentence_encoder_convnext_convnext_5_Mul_1[] = { "_sentence_encoder_convnext_convnext_5_dwconv_Conv_output_0_nfc", "_sentence_encoder_Concat_2_output_0" }; uint32_t dimensions__sentence_encoder_convnext_convnext_5_norm_Transpose_output_0[] = {1, 129, 64}; Qnn_Tensor_t outputs__sentence_encoder_convnext_convnext_5_Mul_1[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_5_norm_Transpose_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000461362360511f, .offset= -24474}}}, .rank= 3, .dimensions=dimensions__sentence_encoder_convnext_convnext_5_norm_Transpose_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_convnext_convnext_5_Mul_1", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__sentence_encoder_convnext_convnext_5_Mul_1, // Node Params 1, // Num Node Params inputs__sentence_encoder_convnext_convnext_5_Mul_1, // Input Tensor Names 2, // Num Input Tensor Names outputs__sentence_encoder_convnext_convnext_5_Mul_1, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_dp_sentence_encoder_convnext_convnext_5_norm_norm_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_dp_sentence_encoder_convnext_convnext_5_norm_norm_weight[] = {64}; VALIDATE(model.addTensor("tts_dp_sentence_encoder_convnext_convnext_5_norm_norm_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_dp_sentence_encoder_convnext_convnext_5_norm_norm_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0035219348501414f, .offset= 0}}}, .rank= 1, .dimensions=dimensions_tts_dp_sentence_encoder_convnext_convnext_5_norm_norm_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_dp_sentence_encoder_convnext_convnext_5_norm_norm_weight), .dataSize=BINLEN(tts_dp_sentence_encoder_convnext_convnext_5_norm_norm_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_dp_sentence_encoder_convnext_convnext_5_norm_norm_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_dp_sentence_encoder_convnext_convnext_5_norm_norm_bias[] = {64}; VALIDATE(model.addTensor("tts_dp_sentence_encoder_convnext_convnext_5_norm_norm_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_dp_sentence_encoder_convnext_convnext_5_norm_norm_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0012862522853538f, .offset= -120}}}, .rank= 1, .dimensions=dimensions_tts_dp_sentence_encoder_convnext_convnext_5_norm_norm_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_dp_sentence_encoder_convnext_convnext_5_norm_norm_bias), .dataSize=BINLEN(tts_dp_sentence_encoder_convnext_convnext_5_norm_norm_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__sentence_encoder_convnext_convnext_5_norm_norm_LayerNormalization(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_convnext_convnext_5_norm_norm_LayerNormalization */ uint32_t dimensions__sentence_encoder_convnext_convnext_5_norm_norm_LayerNormalization_axes[] = {1}; uint32_t _sentence_encoder_convnext_convnext_5_norm_norm_LayerNormalization_axes[] = {2}; Qnn_Param_t params__sentence_encoder_convnext_convnext_5_norm_norm_LayerNormalization[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="axes", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_5_norm_norm_LayerNormalization_axes", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__sentence_encoder_convnext_convnext_5_norm_norm_LayerNormalization_axes, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_convnext_convnext_5_norm_norm_LayerNormalization_axes, .dataSize=4}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="epsilon", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_FLOAT_32, {.floatValue = 0.000001000000f}}}} }; const char* inputs__sentence_encoder_convnext_convnext_5_norm_norm_LayerNormalization[] = { "_sentence_encoder_convnext_convnext_5_norm_Transpose_output_0", "tts_dp_sentence_encoder_convnext_convnext_5_norm_norm_weight", "tts_dp_sentence_encoder_convnext_convnext_5_norm_norm_bias" }; uint32_t dimensions__sentence_encoder_convnext_convnext_5_norm_Transpose_1_output_0[] = {1, 129, 64}; Qnn_Tensor_t outputs__sentence_encoder_convnext_convnext_5_norm_norm_LayerNormalization[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_5_norm_Transpose_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0001119506341638f, .offset= -22381}}}, .rank= 3, .dimensions=dimensions__sentence_encoder_convnext_convnext_5_norm_Transpose_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_convnext_convnext_5_norm_norm_LayerNormalization", // Node Name "qti.aisw", // Package Name "LayerNorm", // Qnn Node Type params__sentence_encoder_convnext_convnext_5_norm_norm_LayerNormalization, // Node Params 2, // Num Node Params inputs__sentence_encoder_convnext_convnext_5_norm_norm_LayerNormalization, // Input Tensor Names 3, // Num Input Tensor Names outputs__sentence_encoder_convnext_convnext_5_norm_norm_LayerNormalization, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_convnext_convnext_5_norm_Transpose_1_output_0_ncf(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_convnext_convnext_5_norm_Transpose_1_output_0_ncf */ uint32_t dimensions__sentence_encoder_convnext_convnext_5_norm_Transpose_1_output_0_ncf_perm[] = {3}; uint32_t _sentence_encoder_convnext_convnext_5_norm_Transpose_1_output_0_ncf_perm[] = {0, 2, 1}; Qnn_Param_t params__sentence_encoder_convnext_convnext_5_norm_Transpose_1_output_0_ncf[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_5_norm_Transpose_1_output_0_ncf_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__sentence_encoder_convnext_convnext_5_norm_Transpose_1_output_0_ncf_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_convnext_convnext_5_norm_Transpose_1_output_0_ncf_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__sentence_encoder_convnext_convnext_5_norm_Transpose_1_output_0_ncf[] = { "_sentence_encoder_convnext_convnext_5_norm_Transpose_1_output_0" }; uint32_t dimensions__sentence_encoder_convnext_convnext_5_norm_Transpose_1_output_0_ncf[] = {1, 64, 129}; Qnn_Tensor_t outputs__sentence_encoder_convnext_convnext_5_norm_Transpose_1_output_0_ncf[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_5_norm_Transpose_1_output_0_ncf", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0001119506341638f, .offset= -22381}}}, .rank= 3, .dimensions=dimensions__sentence_encoder_convnext_convnext_5_norm_Transpose_1_output_0_ncf, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_convnext_convnext_5_norm_Transpose_1_output_0_ncf", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__sentence_encoder_convnext_convnext_5_norm_Transpose_1_output_0_ncf, // Node Params 1, // Num Node Params inputs__sentence_encoder_convnext_convnext_5_norm_Transpose_1_output_0_ncf, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_convnext_convnext_5_norm_Transpose_1_output_0_ncf, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_convnext_convnext_5_pwconv1_Conv_reshape_to_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_convnext_convnext_5_pwconv1_Conv_reshape_to_2d */ const char* inputs__sentence_encoder_convnext_convnext_5_pwconv1_Conv_reshape_to_2d[] = { "_sentence_encoder_convnext_convnext_5_norm_Transpose_1_output_0_ncf" }; uint32_t dimensions__sentence_encoder_convnext_convnext_5_pwconv1_Conv_reshape_to_2d[] = {1, 64, 1, 129}; Qnn_Tensor_t outputs__sentence_encoder_convnext_convnext_5_pwconv1_Conv_reshape_to_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_5_pwconv1_Conv_reshape_to_2d", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0001119506341638f, .offset= -22381}}}, .rank= 4, .dimensions=dimensions__sentence_encoder_convnext_convnext_5_pwconv1_Conv_reshape_to_2d, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_convnext_convnext_5_pwconv1_Conv_reshape_to_2d", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__sentence_encoder_convnext_convnext_5_pwconv1_Conv_reshape_to_2d, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_convnext_convnext_5_pwconv1_Conv_reshape_to_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_convnext_convnext_5_pwconv1_Conv_reshape_to_2d_nhwc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_convnext_convnext_5_pwconv1_Conv_reshape_to_2d_nhwc */ uint32_t dimensions__sentence_encoder_convnext_convnext_5_pwconv1_Conv_reshape_to_2d_nhwc_perm[] = {4}; uint32_t _sentence_encoder_convnext_convnext_5_pwconv1_Conv_reshape_to_2d_nhwc_perm[] = {0, 2, 3, 1}; Qnn_Param_t params__sentence_encoder_convnext_convnext_5_pwconv1_Conv_reshape_to_2d_nhwc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_5_pwconv1_Conv_reshape_to_2d_nhwc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__sentence_encoder_convnext_convnext_5_pwconv1_Conv_reshape_to_2d_nhwc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_convnext_convnext_5_pwconv1_Conv_reshape_to_2d_nhwc_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__sentence_encoder_convnext_convnext_5_pwconv1_Conv_reshape_to_2d_nhwc[] = { "_sentence_encoder_convnext_convnext_5_pwconv1_Conv_reshape_to_2d" }; uint32_t dimensions__sentence_encoder_convnext_convnext_5_pwconv1_Conv_reshape_to_2d_nhwc[] = {1, 1, 129, 64}; Qnn_Tensor_t outputs__sentence_encoder_convnext_convnext_5_pwconv1_Conv_reshape_to_2d_nhwc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_5_pwconv1_Conv_reshape_to_2d_nhwc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0001119506341638f, .offset= -22381}}}, .rank= 4, .dimensions=dimensions__sentence_encoder_convnext_convnext_5_pwconv1_Conv_reshape_to_2d_nhwc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_convnext_convnext_5_pwconv1_Conv_reshape_to_2d_nhwc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__sentence_encoder_convnext_convnext_5_pwconv1_Conv_reshape_to_2d_nhwc, // Node Params 1, // Num Node Params inputs__sentence_encoder_convnext_convnext_5_pwconv1_Conv_reshape_to_2d_nhwc, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_convnext_convnext_5_pwconv1_Conv_reshape_to_2d_nhwc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_dp_sentence_encoder_convnext_convnext_5_pwconv1_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_dp_sentence_encoder_convnext_convnext_5_pwconv1_weight[] = {1, 1, 64, 256}; VALIDATE(model.addTensor("tts_dp_sentence_encoder_convnext_convnext_5_pwconv1_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_dp_sentence_encoder_convnext_convnext_5_pwconv1_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0040733278729022f, .offset= -123}}}, .rank= 4, .dimensions=dimensions_tts_dp_sentence_encoder_convnext_convnext_5_pwconv1_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_dp_sentence_encoder_convnext_convnext_5_pwconv1_weight), .dataSize=BINLEN(tts_dp_sentence_encoder_convnext_convnext_5_pwconv1_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_dp_sentence_encoder_convnext_convnext_5_pwconv1_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_dp_sentence_encoder_convnext_convnext_5_pwconv1_bias[] = {256}; VALIDATE(model.addTensor("tts_dp_sentence_encoder_convnext_convnext_5_pwconv1_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_dp_sentence_encoder_convnext_convnext_5_pwconv1_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0018355000065640f, .offset= -224}}}, .rank= 1, .dimensions=dimensions_tts_dp_sentence_encoder_convnext_convnext_5_pwconv1_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_dp_sentence_encoder_convnext_convnext_5_pwconv1_bias), .dataSize=BINLEN(tts_dp_sentence_encoder_convnext_convnext_5_pwconv1_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__sentence_encoder_convnext_convnext_5_pwconv1_Conv_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_convnext_convnext_5_pwconv1_Conv_2d */ uint32_t dimensions__sentence_encoder_convnext_convnext_5_pwconv1_Conv_2d_dilation[] = {2}; uint32_t _sentence_encoder_convnext_convnext_5_pwconv1_Conv_2d_dilation[] = {1, 1}; uint32_t dimensions__sentence_encoder_convnext_convnext_5_pwconv1_Conv_2d_pad_amount[] = {2, 2}; uint32_t _sentence_encoder_convnext_convnext_5_pwconv1_Conv_2d_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions__sentence_encoder_convnext_convnext_5_pwconv1_Conv_2d_stride[] = {2}; uint32_t _sentence_encoder_convnext_convnext_5_pwconv1_Conv_2d_stride[] = {1, 1}; Qnn_Param_t params__sentence_encoder_convnext_convnext_5_pwconv1_Conv_2d[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_5_pwconv1_Conv_2d_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__sentence_encoder_convnext_convnext_5_pwconv1_Conv_2d_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_convnext_convnext_5_pwconv1_Conv_2d_dilation, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_5_pwconv1_Conv_2d_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__sentence_encoder_convnext_convnext_5_pwconv1_Conv_2d_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_convnext_convnext_5_pwconv1_Conv_2d_pad_amount, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_5_pwconv1_Conv_2d_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__sentence_encoder_convnext_convnext_5_pwconv1_Conv_2d_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_convnext_convnext_5_pwconv1_Conv_2d_stride, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="group", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="reuse_sparse_indices", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}} }; const char* inputs__sentence_encoder_convnext_convnext_5_pwconv1_Conv_2d[] = { "_sentence_encoder_convnext_convnext_5_pwconv1_Conv_reshape_to_2d_nhwc", "tts_dp_sentence_encoder_convnext_convnext_5_pwconv1_weight", "tts_dp_sentence_encoder_convnext_convnext_5_pwconv1_bias" }; uint32_t dimensions__sentence_encoder_convnext_convnext_5_pwconv1_Conv_intermediate[] = {1, 1, 129, 256}; Qnn_Tensor_t outputs__sentence_encoder_convnext_convnext_5_pwconv1_Conv_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_5_pwconv1_Conv_intermediate", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0001061833027052f, .offset= -38023}}}, .rank= 4, .dimensions=dimensions__sentence_encoder_convnext_convnext_5_pwconv1_Conv_intermediate, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_convnext_convnext_5_pwconv1_Conv_2d", // Node Name "qti.aisw", // Package Name "Conv2d", // Qnn Node Type params__sentence_encoder_convnext_convnext_5_pwconv1_Conv_2d, // Node Params 5, // Num Node Params inputs__sentence_encoder_convnext_convnext_5_pwconv1_Conv_2d, // Input Tensor Names 3, // Num Input Tensor Names outputs__sentence_encoder_convnext_convnext_5_pwconv1_Conv_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_convnext_convnext_5_pwconv1_Conv_intermediate_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_convnext_convnext_5_pwconv1_Conv_intermediate_nchw */ uint32_t dimensions__sentence_encoder_convnext_convnext_5_pwconv1_Conv_intermediate_nchw_perm[] = {4}; uint32_t _sentence_encoder_convnext_convnext_5_pwconv1_Conv_intermediate_nchw_perm[] = {0, 3, 1, 2}; Qnn_Param_t params__sentence_encoder_convnext_convnext_5_pwconv1_Conv_intermediate_nchw[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_5_pwconv1_Conv_intermediate_nchw_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__sentence_encoder_convnext_convnext_5_pwconv1_Conv_intermediate_nchw_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_convnext_convnext_5_pwconv1_Conv_intermediate_nchw_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__sentence_encoder_convnext_convnext_5_pwconv1_Conv_intermediate_nchw[] = { "_sentence_encoder_convnext_convnext_5_pwconv1_Conv_intermediate" }; uint32_t dimensions__sentence_encoder_convnext_convnext_5_pwconv1_Conv_intermediate_nchw[] = {1, 256, 1, 129}; Qnn_Tensor_t outputs__sentence_encoder_convnext_convnext_5_pwconv1_Conv_intermediate_nchw[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_5_pwconv1_Conv_intermediate_nchw", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0001061833027052f, .offset= -38023}}}, .rank= 4, .dimensions=dimensions__sentence_encoder_convnext_convnext_5_pwconv1_Conv_intermediate_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_convnext_convnext_5_pwconv1_Conv_intermediate_nchw", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__sentence_encoder_convnext_convnext_5_pwconv1_Conv_intermediate_nchw, // Node Params 1, // Num Node Params inputs__sentence_encoder_convnext_convnext_5_pwconv1_Conv_intermediate_nchw, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_convnext_convnext_5_pwconv1_Conv_intermediate_nchw, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_convnext_convnext_5_pwconv1_Conv_intermediate(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_convnext_convnext_5_pwconv1_Conv_intermediate */ const char* inputs__sentence_encoder_convnext_convnext_5_pwconv1_Conv_intermediate[] = { "_sentence_encoder_convnext_convnext_5_pwconv1_Conv_intermediate_nchw" }; uint32_t dimensions__sentence_encoder_convnext_convnext_5_pwconv1_Conv_output_0[] = {1, 256, 129}; Qnn_Tensor_t outputs__sentence_encoder_convnext_convnext_5_pwconv1_Conv_intermediate[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_5_pwconv1_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0001061833027052f, .offset= -38023}}}, .rank= 3, .dimensions=dimensions__sentence_encoder_convnext_convnext_5_pwconv1_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_convnext_convnext_5_pwconv1_Conv_intermediate", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__sentence_encoder_convnext_convnext_5_pwconv1_Conv_intermediate, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_convnext_convnext_5_pwconv1_Conv_intermediate, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__elementwiseneuron_12(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _elementwiseneuron_12 */ Qnn_Param_t params__elementwiseneuron_12[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}} }; const char* inputs__elementwiseneuron_12[] = { "_sentence_encoder_convnext_convnext_5_pwconv1_Conv_output_0" }; uint32_t dimensions__sentence_encoder_convnext_convnext_5_act_Mul_1_output_0[] = {1, 256, 129}; Qnn_Tensor_t outputs__elementwiseneuron_12[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_5_act_Mul_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000470926643175f, .offset= -3609}}}, .rank= 3, .dimensions=dimensions__sentence_encoder_convnext_convnext_5_act_Mul_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_elementwiseneuron_12", // Node Name "qti.aisw", // Package Name "ElementWiseNeuron", // Qnn Node Type params__elementwiseneuron_12, // Node Params 1, // Num Node Params inputs__elementwiseneuron_12, // Input Tensor Names 1, // Num Input Tensor Names outputs__elementwiseneuron_12, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_convnext_convnext_5_pwconv2_Conv_reshape_to_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_convnext_convnext_5_pwconv2_Conv_reshape_to_2d */ const char* inputs__sentence_encoder_convnext_convnext_5_pwconv2_Conv_reshape_to_2d[] = { "_sentence_encoder_convnext_convnext_5_act_Mul_1_output_0" }; uint32_t dimensions__sentence_encoder_convnext_convnext_5_pwconv2_Conv_reshape_to_2d[] = {1, 256, 1, 129}; Qnn_Tensor_t outputs__sentence_encoder_convnext_convnext_5_pwconv2_Conv_reshape_to_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_5_pwconv2_Conv_reshape_to_2d", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000470926643175f, .offset= -3609}}}, .rank= 4, .dimensions=dimensions__sentence_encoder_convnext_convnext_5_pwconv2_Conv_reshape_to_2d, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_convnext_convnext_5_pwconv2_Conv_reshape_to_2d", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__sentence_encoder_convnext_convnext_5_pwconv2_Conv_reshape_to_2d, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_convnext_convnext_5_pwconv2_Conv_reshape_to_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_convnext_convnext_5_pwconv2_Conv_reshape_to_2d_nhwc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_convnext_convnext_5_pwconv2_Conv_reshape_to_2d_nhwc */ uint32_t dimensions__sentence_encoder_convnext_convnext_5_pwconv2_Conv_reshape_to_2d_nhwc_perm[] = {4}; uint32_t _sentence_encoder_convnext_convnext_5_pwconv2_Conv_reshape_to_2d_nhwc_perm[] = {0, 2, 3, 1}; Qnn_Param_t params__sentence_encoder_convnext_convnext_5_pwconv2_Conv_reshape_to_2d_nhwc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_5_pwconv2_Conv_reshape_to_2d_nhwc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__sentence_encoder_convnext_convnext_5_pwconv2_Conv_reshape_to_2d_nhwc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_convnext_convnext_5_pwconv2_Conv_reshape_to_2d_nhwc_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__sentence_encoder_convnext_convnext_5_pwconv2_Conv_reshape_to_2d_nhwc[] = { "_sentence_encoder_convnext_convnext_5_pwconv2_Conv_reshape_to_2d" }; uint32_t dimensions__sentence_encoder_convnext_convnext_5_pwconv2_Conv_reshape_to_2d_nhwc[] = {1, 1, 129, 256}; Qnn_Tensor_t outputs__sentence_encoder_convnext_convnext_5_pwconv2_Conv_reshape_to_2d_nhwc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_5_pwconv2_Conv_reshape_to_2d_nhwc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000470926643175f, .offset= -3609}}}, .rank= 4, .dimensions=dimensions__sentence_encoder_convnext_convnext_5_pwconv2_Conv_reshape_to_2d_nhwc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_convnext_convnext_5_pwconv2_Conv_reshape_to_2d_nhwc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__sentence_encoder_convnext_convnext_5_pwconv2_Conv_reshape_to_2d_nhwc, // Node Params 1, // Num Node Params inputs__sentence_encoder_convnext_convnext_5_pwconv2_Conv_reshape_to_2d_nhwc, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_convnext_convnext_5_pwconv2_Conv_reshape_to_2d_nhwc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_dp_sentence_encoder_convnext_convnext_5_pwconv2_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_dp_sentence_encoder_convnext_convnext_5_pwconv2_weight[] = {1, 1, 256, 64}; VALIDATE(model.addTensor("tts_dp_sentence_encoder_convnext_convnext_5_pwconv2_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_dp_sentence_encoder_convnext_convnext_5_pwconv2_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0036293452139944f, .offset= -126}}}, .rank= 4, .dimensions=dimensions_tts_dp_sentence_encoder_convnext_convnext_5_pwconv2_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_dp_sentence_encoder_convnext_convnext_5_pwconv2_weight), .dataSize=BINLEN(tts_dp_sentence_encoder_convnext_convnext_5_pwconv2_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_dp_sentence_encoder_convnext_convnext_5_pwconv2_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_dp_sentence_encoder_convnext_convnext_5_pwconv2_bias[] = {64}; VALIDATE(model.addTensor("tts_dp_sentence_encoder_convnext_convnext_5_pwconv2_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_dp_sentence_encoder_convnext_convnext_5_pwconv2_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0011624993057922f, .offset= -104}}}, .rank= 1, .dimensions=dimensions_tts_dp_sentence_encoder_convnext_convnext_5_pwconv2_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_dp_sentence_encoder_convnext_convnext_5_pwconv2_bias), .dataSize=BINLEN(tts_dp_sentence_encoder_convnext_convnext_5_pwconv2_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__sentence_encoder_convnext_convnext_5_pwconv2_Conv_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_convnext_convnext_5_pwconv2_Conv_2d */ uint32_t dimensions__sentence_encoder_convnext_convnext_5_pwconv2_Conv_2d_dilation[] = {2}; uint32_t _sentence_encoder_convnext_convnext_5_pwconv2_Conv_2d_dilation[] = {1, 1}; uint32_t dimensions__sentence_encoder_convnext_convnext_5_pwconv2_Conv_2d_pad_amount[] = {2, 2}; uint32_t _sentence_encoder_convnext_convnext_5_pwconv2_Conv_2d_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions__sentence_encoder_convnext_convnext_5_pwconv2_Conv_2d_stride[] = {2}; uint32_t _sentence_encoder_convnext_convnext_5_pwconv2_Conv_2d_stride[] = {1, 1}; Qnn_Param_t params__sentence_encoder_convnext_convnext_5_pwconv2_Conv_2d[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_5_pwconv2_Conv_2d_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__sentence_encoder_convnext_convnext_5_pwconv2_Conv_2d_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_convnext_convnext_5_pwconv2_Conv_2d_dilation, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_5_pwconv2_Conv_2d_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__sentence_encoder_convnext_convnext_5_pwconv2_Conv_2d_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_convnext_convnext_5_pwconv2_Conv_2d_pad_amount, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_5_pwconv2_Conv_2d_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__sentence_encoder_convnext_convnext_5_pwconv2_Conv_2d_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_convnext_convnext_5_pwconv2_Conv_2d_stride, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="group", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="reuse_sparse_indices", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}} }; const char* inputs__sentence_encoder_convnext_convnext_5_pwconv2_Conv_2d[] = { "_sentence_encoder_convnext_convnext_5_pwconv2_Conv_reshape_to_2d_nhwc", "tts_dp_sentence_encoder_convnext_convnext_5_pwconv2_weight", "tts_dp_sentence_encoder_convnext_convnext_5_pwconv2_bias" }; uint32_t dimensions__sentence_encoder_convnext_convnext_5_pwconv2_Conv_intermediate[] = {1, 1, 129, 64}; Qnn_Tensor_t outputs__sentence_encoder_convnext_convnext_5_pwconv2_Conv_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_5_pwconv2_Conv_intermediate", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0001288320199819f, .offset= -31633}}}, .rank= 4, .dimensions=dimensions__sentence_encoder_convnext_convnext_5_pwconv2_Conv_intermediate, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_convnext_convnext_5_pwconv2_Conv_2d", // Node Name "qti.aisw", // Package Name "Conv2d", // Qnn Node Type params__sentence_encoder_convnext_convnext_5_pwconv2_Conv_2d, // Node Params 5, // Num Node Params inputs__sentence_encoder_convnext_convnext_5_pwconv2_Conv_2d, // Input Tensor Names 3, // Num Input Tensor Names outputs__sentence_encoder_convnext_convnext_5_pwconv2_Conv_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_convnext_convnext_5_pwconv2_Conv_intermediate_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_convnext_convnext_5_pwconv2_Conv_intermediate_nchw */ uint32_t dimensions__sentence_encoder_convnext_convnext_5_pwconv2_Conv_intermediate_nchw_perm[] = {4}; uint32_t _sentence_encoder_convnext_convnext_5_pwconv2_Conv_intermediate_nchw_perm[] = {0, 3, 1, 2}; Qnn_Param_t params__sentence_encoder_convnext_convnext_5_pwconv2_Conv_intermediate_nchw[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_5_pwconv2_Conv_intermediate_nchw_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__sentence_encoder_convnext_convnext_5_pwconv2_Conv_intermediate_nchw_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_convnext_convnext_5_pwconv2_Conv_intermediate_nchw_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__sentence_encoder_convnext_convnext_5_pwconv2_Conv_intermediate_nchw[] = { "_sentence_encoder_convnext_convnext_5_pwconv2_Conv_intermediate" }; uint32_t dimensions__sentence_encoder_convnext_convnext_5_pwconv2_Conv_intermediate_nchw[] = {1, 64, 1, 129}; Qnn_Tensor_t outputs__sentence_encoder_convnext_convnext_5_pwconv2_Conv_intermediate_nchw[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_5_pwconv2_Conv_intermediate_nchw", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0001288320199819f, .offset= -31633}}}, .rank= 4, .dimensions=dimensions__sentence_encoder_convnext_convnext_5_pwconv2_Conv_intermediate_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_convnext_convnext_5_pwconv2_Conv_intermediate_nchw", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__sentence_encoder_convnext_convnext_5_pwconv2_Conv_intermediate_nchw, // Node Params 1, // Num Node Params inputs__sentence_encoder_convnext_convnext_5_pwconv2_Conv_intermediate_nchw, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_convnext_convnext_5_pwconv2_Conv_intermediate_nchw, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_convnext_convnext_5_pwconv2_Conv_intermediate(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_convnext_convnext_5_pwconv2_Conv_intermediate */ const char* inputs__sentence_encoder_convnext_convnext_5_pwconv2_Conv_intermediate[] = { "_sentence_encoder_convnext_convnext_5_pwconv2_Conv_intermediate_nchw" }; uint32_t dimensions__sentence_encoder_convnext_convnext_5_pwconv2_Conv_output_0[] = {1, 64, 129}; Qnn_Tensor_t outputs__sentence_encoder_convnext_convnext_5_pwconv2_Conv_intermediate[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_5_pwconv2_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0001288320199819f, .offset= -31633}}}, .rank= 3, .dimensions=dimensions__sentence_encoder_convnext_convnext_5_pwconv2_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_convnext_convnext_5_pwconv2_Conv_intermediate", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__sentence_encoder_convnext_convnext_5_pwconv2_Conv_intermediate, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_convnext_convnext_5_pwconv2_Conv_intermediate, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_convnext_convnext_5_pwconv2_Conv_output_0_nfc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_convnext_convnext_5_pwconv2_Conv_output_0_nfc */ uint32_t dimensions__sentence_encoder_convnext_convnext_5_pwconv2_Conv_output_0_nfc_perm[] = {3}; uint32_t _sentence_encoder_convnext_convnext_5_pwconv2_Conv_output_0_nfc_perm[] = {0, 2, 1}; Qnn_Param_t params__sentence_encoder_convnext_convnext_5_pwconv2_Conv_output_0_nfc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_5_pwconv2_Conv_output_0_nfc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__sentence_encoder_convnext_convnext_5_pwconv2_Conv_output_0_nfc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_convnext_convnext_5_pwconv2_Conv_output_0_nfc_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__sentence_encoder_convnext_convnext_5_pwconv2_Conv_output_0_nfc[] = { "_sentence_encoder_convnext_convnext_5_pwconv2_Conv_output_0" }; uint32_t dimensions__sentence_encoder_convnext_convnext_5_pwconv2_Conv_output_0_nfc[] = {1, 129, 64}; Qnn_Tensor_t outputs__sentence_encoder_convnext_convnext_5_pwconv2_Conv_output_0_nfc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_5_pwconv2_Conv_output_0_nfc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0001288320199819f, .offset= -31633}}}, .rank= 3, .dimensions=dimensions__sentence_encoder_convnext_convnext_5_pwconv2_Conv_output_0_nfc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_convnext_convnext_5_pwconv2_Conv_output_0_nfc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__sentence_encoder_convnext_convnext_5_pwconv2_Conv_output_0_nfc, // Node Params 1, // Num Node Params inputs__sentence_encoder_convnext_convnext_5_pwconv2_Conv_output_0_nfc, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_convnext_convnext_5_pwconv2_Conv_output_0_nfc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_dp_sentence_encoder_convnext_convnext_5_gamma(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_dp_sentence_encoder_convnext_convnext_5_gamma[] = {1, 1, 64}; VALIDATE(model.addTensor("tts_dp_sentence_encoder_convnext_convnext_5_gamma", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_dp_sentence_encoder_convnext_convnext_5_gamma", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000029486798212f, .offset= -762}}}, .rank= 3, .dimensions=dimensions_tts_dp_sentence_encoder_convnext_convnext_5_gamma, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_dp_sentence_encoder_convnext_convnext_5_gamma), .dataSize=BINLEN(tts_dp_sentence_encoder_convnext_convnext_5_gamma)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__sentence_encoder_convnext_convnext_5_Mul_2(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_convnext_convnext_5_Mul_2 */ Qnn_Param_t params__sentence_encoder_convnext_convnext_5_Mul_2[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__sentence_encoder_convnext_convnext_5_Mul_2[] = { "tts_dp_sentence_encoder_convnext_convnext_5_gamma", "_sentence_encoder_convnext_convnext_5_pwconv2_Conv_output_0_nfc" }; uint32_t dimensions__sentence_encoder_convnext_convnext_5_Mul_2_output_0[] = {1, 129, 64}; Qnn_Tensor_t outputs__sentence_encoder_convnext_convnext_5_Mul_2[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_5_Mul_2_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000167146172316f, .offset= -27116}}}, .rank= 3, .dimensions=dimensions__sentence_encoder_convnext_convnext_5_Mul_2_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_convnext_convnext_5_Mul_2", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__sentence_encoder_convnext_convnext_5_Mul_2, // Node Params 1, // Num Node Params inputs__sentence_encoder_convnext_convnext_5_Mul_2, // Input Tensor Names 2, // Num Input Tensor Names outputs__sentence_encoder_convnext_convnext_5_Mul_2, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_convnext_convnext_5_Add(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_convnext_convnext_5_Add */ Qnn_Param_t params__sentence_encoder_convnext_convnext_5_Add[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}} }; const char* inputs__sentence_encoder_convnext_convnext_5_Add[] = { "_sentence_encoder_convnext_convnext_5_Mul_output_0", "_sentence_encoder_convnext_convnext_5_Mul_2_output_0" }; uint32_t dimensions__sentence_encoder_convnext_convnext_5_Add_output_0[] = {1, 129, 64}; Qnn_Tensor_t outputs__sentence_encoder_convnext_convnext_5_Add[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_5_Add_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000551828998141f, .offset= -29694}}}, .rank= 3, .dimensions=dimensions__sentence_encoder_convnext_convnext_5_Add_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_convnext_convnext_5_Add", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__sentence_encoder_convnext_convnext_5_Add, // Node Params 1, // Num Node Params inputs__sentence_encoder_convnext_convnext_5_Add, // Input Tensor Names 2, // Num Input Tensor Names outputs__sentence_encoder_convnext_convnext_5_Add, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_convnext_convnext_5_Mul_3(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_convnext_convnext_5_Mul_3 */ Qnn_Param_t params__sentence_encoder_convnext_convnext_5_Mul_3[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__sentence_encoder_convnext_convnext_5_Mul_3[] = { "_sentence_encoder_convnext_convnext_5_Add_output_0", "_sentence_encoder_Concat_2_output_0" }; uint32_t dimensions__sentence_encoder_convnext_convnext_5_Mul_3_output_0[] = {1, 129, 64}; Qnn_Tensor_t outputs__sentence_encoder_convnext_convnext_5_Mul_3[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_convnext_convnext_5_Mul_3_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000551828998141f, .offset= -29694}}}, .rank= 3, .dimensions=dimensions__sentence_encoder_convnext_convnext_5_Mul_3_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_convnext_convnext_5_Mul_3", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__sentence_encoder_convnext_convnext_5_Mul_3, // Node Params 1, // Num Node Params inputs__sentence_encoder_convnext_convnext_5_Mul_3, // Input Tensor Names 2, // Num Input Tensor Names outputs__sentence_encoder_convnext_convnext_5_Mul_3, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_attn_encoder_Mul_1(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_attn_encoder_Mul_1 */ Qnn_Param_t params__sentence_encoder_attn_encoder_Mul_1[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__sentence_encoder_attn_encoder_Mul_1[] = { "_sentence_encoder_convnext_convnext_5_Mul_3_output_0", "_sentence_encoder_Concat_2_output_0" }; uint32_t dimensions__sentence_encoder_attn_encoder_Mul_1_output_0[] = {1, 129, 64}; Qnn_Tensor_t outputs__sentence_encoder_attn_encoder_Mul_1[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_Mul_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000551828998141f, .offset= -29694}}}, .rank= 3, .dimensions=dimensions__sentence_encoder_attn_encoder_Mul_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_attn_encoder_Mul_1", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__sentence_encoder_attn_encoder_Mul_1, // Node Params 1, // Num Node Params inputs__sentence_encoder_attn_encoder_Mul_1, // Input Tensor Names 2, // Num Input Tensor Names outputs__sentence_encoder_attn_encoder_Mul_1, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_attn_encoder_Mul_1_output_0_ncf(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_attn_encoder_Mul_1_output_0_ncf */ uint32_t dimensions__sentence_encoder_attn_encoder_Mul_1_output_0_ncf_perm[] = {3}; uint32_t _sentence_encoder_attn_encoder_Mul_1_output_0_ncf_perm[] = {0, 2, 1}; Qnn_Param_t params__sentence_encoder_attn_encoder_Mul_1_output_0_ncf[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_Mul_1_output_0_ncf_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__sentence_encoder_attn_encoder_Mul_1_output_0_ncf_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_attn_encoder_Mul_1_output_0_ncf_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__sentence_encoder_attn_encoder_Mul_1_output_0_ncf[] = { "_sentence_encoder_attn_encoder_Mul_1_output_0" }; uint32_t dimensions__sentence_encoder_attn_encoder_Mul_1_output_0_ncf[] = {1, 64, 129}; Qnn_Tensor_t outputs__sentence_encoder_attn_encoder_Mul_1_output_0_ncf[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_Mul_1_output_0_ncf", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000551828998141f, .offset= -29694}}}, .rank= 3, .dimensions=dimensions__sentence_encoder_attn_encoder_Mul_1_output_0_ncf, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_attn_encoder_Mul_1_output_0_ncf", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__sentence_encoder_attn_encoder_Mul_1_output_0_ncf, // Node Params 1, // Num Node Params inputs__sentence_encoder_attn_encoder_Mul_1_output_0_ncf, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_attn_encoder_Mul_1_output_0_ncf, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_attn_encoder_attn_layers_0_conv_q_Conv_reshape_to_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_attn_encoder_attn_layers_0_conv_q_Conv_reshape_to_2d */ const char* inputs__sentence_encoder_attn_encoder_attn_layers_0_conv_q_Conv_reshape_to_2d[] = { "_sentence_encoder_attn_encoder_Mul_1_output_0_ncf" }; uint32_t dimensions__sentence_encoder_attn_encoder_attn_layers_0_conv_q_Conv_reshape_to_2d[] = {1, 64, 1, 129}; Qnn_Tensor_t outputs__sentence_encoder_attn_encoder_attn_layers_0_conv_q_Conv_reshape_to_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_attn_layers_0_conv_q_Conv_reshape_to_2d", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000551828998141f, .offset= -29694}}}, .rank= 4, .dimensions=dimensions__sentence_encoder_attn_encoder_attn_layers_0_conv_q_Conv_reshape_to_2d, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_attn_encoder_attn_layers_0_conv_q_Conv_reshape_to_2d", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__sentence_encoder_attn_encoder_attn_layers_0_conv_q_Conv_reshape_to_2d, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_attn_encoder_attn_layers_0_conv_q_Conv_reshape_to_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_attn_encoder_attn_layers_0_conv_q_Conv_reshape_to_2d_nhwc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_attn_encoder_attn_layers_0_conv_q_Conv_reshape_to_2d_nhwc */ uint32_t dimensions__sentence_encoder_attn_encoder_attn_layers_0_conv_q_Conv_reshape_to_2d_nhwc_perm[] = {4}; uint32_t _sentence_encoder_attn_encoder_attn_layers_0_conv_q_Conv_reshape_to_2d_nhwc_perm[] = {0, 2, 3, 1}; Qnn_Param_t params__sentence_encoder_attn_encoder_attn_layers_0_conv_q_Conv_reshape_to_2d_nhwc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_attn_layers_0_conv_q_Conv_reshape_to_2d_nhwc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__sentence_encoder_attn_encoder_attn_layers_0_conv_q_Conv_reshape_to_2d_nhwc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_attn_encoder_attn_layers_0_conv_q_Conv_reshape_to_2d_nhwc_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__sentence_encoder_attn_encoder_attn_layers_0_conv_q_Conv_reshape_to_2d_nhwc[] = { "_sentence_encoder_attn_encoder_attn_layers_0_conv_q_Conv_reshape_to_2d" }; uint32_t dimensions__sentence_encoder_attn_encoder_attn_layers_0_conv_q_Conv_reshape_to_2d_nhwc[] = {1, 1, 129, 64}; Qnn_Tensor_t outputs__sentence_encoder_attn_encoder_attn_layers_0_conv_q_Conv_reshape_to_2d_nhwc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_attn_layers_0_conv_q_Conv_reshape_to_2d_nhwc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000551828998141f, .offset= -29694}}}, .rank= 4, .dimensions=dimensions__sentence_encoder_attn_encoder_attn_layers_0_conv_q_Conv_reshape_to_2d_nhwc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_attn_encoder_attn_layers_0_conv_q_Conv_reshape_to_2d_nhwc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__sentence_encoder_attn_encoder_attn_layers_0_conv_q_Conv_reshape_to_2d_nhwc, // Node Params 1, // Num Node Params inputs__sentence_encoder_attn_encoder_attn_layers_0_conv_q_Conv_reshape_to_2d_nhwc, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_attn_encoder_attn_layers_0_conv_q_Conv_reshape_to_2d_nhwc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_dp_sentence_encoder_attn_encoder_attn_layers_0_conv_q_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_dp_sentence_encoder_attn_encoder_attn_layers_0_conv_q_weight[] = {1, 1, 64, 64}; VALIDATE(model.addTensor("tts_dp_sentence_encoder_attn_encoder_attn_layers_0_conv_q_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_dp_sentence_encoder_attn_encoder_attn_layers_0_conv_q_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0038937551435083f, .offset= -114}}}, .rank= 4, .dimensions=dimensions_tts_dp_sentence_encoder_attn_encoder_attn_layers_0_conv_q_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_dp_sentence_encoder_attn_encoder_attn_layers_0_conv_q_weight), .dataSize=BINLEN(tts_dp_sentence_encoder_attn_encoder_attn_layers_0_conv_q_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_dp_sentence_encoder_attn_encoder_attn_layers_0_conv_q_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_dp_sentence_encoder_attn_encoder_attn_layers_0_conv_q_bias[] = {64}; VALIDATE(model.addTensor("tts_dp_sentence_encoder_attn_encoder_attn_layers_0_conv_q_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_dp_sentence_encoder_attn_encoder_attn_layers_0_conv_q_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0026374792214483f, .offset= -132}}}, .rank= 1, .dimensions=dimensions_tts_dp_sentence_encoder_attn_encoder_attn_layers_0_conv_q_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_dp_sentence_encoder_attn_encoder_attn_layers_0_conv_q_bias), .dataSize=BINLEN(tts_dp_sentence_encoder_attn_encoder_attn_layers_0_conv_q_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__sentence_encoder_attn_encoder_attn_layers_0_conv_q_Conv_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_attn_encoder_attn_layers_0_conv_q_Conv_2d */ uint32_t dimensions__sentence_encoder_attn_encoder_attn_layers_0_conv_q_Conv_2d_dilation[] = {2}; uint32_t _sentence_encoder_attn_encoder_attn_layers_0_conv_q_Conv_2d_dilation[] = {1, 1}; uint32_t dimensions__sentence_encoder_attn_encoder_attn_layers_0_conv_q_Conv_2d_pad_amount[] = {2, 2}; uint32_t _sentence_encoder_attn_encoder_attn_layers_0_conv_q_Conv_2d_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions__sentence_encoder_attn_encoder_attn_layers_0_conv_q_Conv_2d_stride[] = {2}; uint32_t _sentence_encoder_attn_encoder_attn_layers_0_conv_q_Conv_2d_stride[] = {1, 1}; Qnn_Param_t params__sentence_encoder_attn_encoder_attn_layers_0_conv_q_Conv_2d[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_attn_layers_0_conv_q_Conv_2d_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__sentence_encoder_attn_encoder_attn_layers_0_conv_q_Conv_2d_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_attn_encoder_attn_layers_0_conv_q_Conv_2d_dilation, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_attn_layers_0_conv_q_Conv_2d_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__sentence_encoder_attn_encoder_attn_layers_0_conv_q_Conv_2d_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_attn_encoder_attn_layers_0_conv_q_Conv_2d_pad_amount, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_attn_layers_0_conv_q_Conv_2d_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__sentence_encoder_attn_encoder_attn_layers_0_conv_q_Conv_2d_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_attn_encoder_attn_layers_0_conv_q_Conv_2d_stride, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="group", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="reuse_sparse_indices", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}} }; const char* inputs__sentence_encoder_attn_encoder_attn_layers_0_conv_q_Conv_2d[] = { "_sentence_encoder_attn_encoder_attn_layers_0_conv_q_Conv_reshape_to_2d_nhwc", "tts_dp_sentence_encoder_attn_encoder_attn_layers_0_conv_q_weight", "tts_dp_sentence_encoder_attn_encoder_attn_layers_0_conv_q_bias" }; uint32_t dimensions__sentence_encoder_attn_encoder_attn_layers_0_conv_q_Conv_intermediate[] = {1, 1, 129, 64}; Qnn_Tensor_t outputs__sentence_encoder_attn_encoder_attn_layers_0_conv_q_Conv_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_attn_layers_0_conv_q_Conv_intermediate", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000543473106518f, .offset= -31676}}}, .rank= 4, .dimensions=dimensions__sentence_encoder_attn_encoder_attn_layers_0_conv_q_Conv_intermediate, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_attn_encoder_attn_layers_0_conv_q_Conv_2d", // Node Name "qti.aisw", // Package Name "Conv2d", // Qnn Node Type params__sentence_encoder_attn_encoder_attn_layers_0_conv_q_Conv_2d, // Node Params 5, // Num Node Params inputs__sentence_encoder_attn_encoder_attn_layers_0_conv_q_Conv_2d, // Input Tensor Names 3, // Num Input Tensor Names outputs__sentence_encoder_attn_encoder_attn_layers_0_conv_q_Conv_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_attn_encoder_attn_layers_0_conv_q_Conv_intermediate_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_attn_encoder_attn_layers_0_conv_q_Conv_intermediate_nchw */ uint32_t dimensions__sentence_encoder_attn_encoder_attn_layers_0_conv_q_Conv_intermediate_nchw_perm[] = {4}; uint32_t _sentence_encoder_attn_encoder_attn_layers_0_conv_q_Conv_intermediate_nchw_perm[] = {0, 3, 1, 2}; Qnn_Param_t params__sentence_encoder_attn_encoder_attn_layers_0_conv_q_Conv_intermediate_nchw[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_attn_layers_0_conv_q_Conv_intermediate_nchw_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__sentence_encoder_attn_encoder_attn_layers_0_conv_q_Conv_intermediate_nchw_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_attn_encoder_attn_layers_0_conv_q_Conv_intermediate_nchw_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__sentence_encoder_attn_encoder_attn_layers_0_conv_q_Conv_intermediate_nchw[] = { "_sentence_encoder_attn_encoder_attn_layers_0_conv_q_Conv_intermediate" }; uint32_t dimensions__sentence_encoder_attn_encoder_attn_layers_0_conv_q_Conv_intermediate_nchw[] = {1, 64, 1, 129}; Qnn_Tensor_t outputs__sentence_encoder_attn_encoder_attn_layers_0_conv_q_Conv_intermediate_nchw[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_attn_layers_0_conv_q_Conv_intermediate_nchw", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000543473106518f, .offset= -31676}}}, .rank= 4, .dimensions=dimensions__sentence_encoder_attn_encoder_attn_layers_0_conv_q_Conv_intermediate_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_attn_encoder_attn_layers_0_conv_q_Conv_intermediate_nchw", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__sentence_encoder_attn_encoder_attn_layers_0_conv_q_Conv_intermediate_nchw, // Node Params 1, // Num Node Params inputs__sentence_encoder_attn_encoder_attn_layers_0_conv_q_Conv_intermediate_nchw, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_attn_encoder_attn_layers_0_conv_q_Conv_intermediate_nchw, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_attn_encoder_attn_layers_0_conv_q_Conv_intermediate(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_attn_encoder_attn_layers_0_conv_q_Conv_intermediate */ const char* inputs__sentence_encoder_attn_encoder_attn_layers_0_conv_q_Conv_intermediate[] = { "_sentence_encoder_attn_encoder_attn_layers_0_conv_q_Conv_intermediate_nchw" }; uint32_t dimensions__sentence_encoder_attn_encoder_attn_layers_0_Reshape_output_0[] = {1, 2, 32, 129}; Qnn_Tensor_t outputs__sentence_encoder_attn_encoder_attn_layers_0_conv_q_Conv_intermediate[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_attn_layers_0_Reshape_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000543473106518f, .offset= -31676}}}, .rank= 4, .dimensions=dimensions__sentence_encoder_attn_encoder_attn_layers_0_Reshape_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_attn_encoder_attn_layers_0_conv_q_Conv_intermediate", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__sentence_encoder_attn_encoder_attn_layers_0_conv_q_Conv_intermediate, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_attn_encoder_attn_layers_0_conv_q_Conv_intermediate, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_attn_encoder_attn_layers_0_conv_k_Conv_reshape_to_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_attn_encoder_attn_layers_0_conv_k_Conv_reshape_to_2d */ const char* inputs__sentence_encoder_attn_encoder_attn_layers_0_conv_k_Conv_reshape_to_2d[] = { "_sentence_encoder_attn_encoder_Mul_1_output_0_ncf" }; uint32_t dimensions__sentence_encoder_attn_encoder_attn_layers_0_conv_k_Conv_reshape_to_2d[] = {1, 64, 1, 129}; Qnn_Tensor_t outputs__sentence_encoder_attn_encoder_attn_layers_0_conv_k_Conv_reshape_to_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_attn_layers_0_conv_k_Conv_reshape_to_2d", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000551828998141f, .offset= -29694}}}, .rank= 4, .dimensions=dimensions__sentence_encoder_attn_encoder_attn_layers_0_conv_k_Conv_reshape_to_2d, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_attn_encoder_attn_layers_0_conv_k_Conv_reshape_to_2d", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__sentence_encoder_attn_encoder_attn_layers_0_conv_k_Conv_reshape_to_2d, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_attn_encoder_attn_layers_0_conv_k_Conv_reshape_to_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_attn_encoder_attn_layers_0_conv_k_Conv_reshape_to_2d_nhwc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_attn_encoder_attn_layers_0_conv_k_Conv_reshape_to_2d_nhwc */ uint32_t dimensions__sentence_encoder_attn_encoder_attn_layers_0_conv_k_Conv_reshape_to_2d_nhwc_perm[] = {4}; uint32_t _sentence_encoder_attn_encoder_attn_layers_0_conv_k_Conv_reshape_to_2d_nhwc_perm[] = {0, 2, 3, 1}; Qnn_Param_t params__sentence_encoder_attn_encoder_attn_layers_0_conv_k_Conv_reshape_to_2d_nhwc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_attn_layers_0_conv_k_Conv_reshape_to_2d_nhwc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__sentence_encoder_attn_encoder_attn_layers_0_conv_k_Conv_reshape_to_2d_nhwc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_attn_encoder_attn_layers_0_conv_k_Conv_reshape_to_2d_nhwc_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__sentence_encoder_attn_encoder_attn_layers_0_conv_k_Conv_reshape_to_2d_nhwc[] = { "_sentence_encoder_attn_encoder_attn_layers_0_conv_k_Conv_reshape_to_2d" }; uint32_t dimensions__sentence_encoder_attn_encoder_attn_layers_0_conv_k_Conv_reshape_to_2d_nhwc[] = {1, 1, 129, 64}; Qnn_Tensor_t outputs__sentence_encoder_attn_encoder_attn_layers_0_conv_k_Conv_reshape_to_2d_nhwc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_attn_layers_0_conv_k_Conv_reshape_to_2d_nhwc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000551828998141f, .offset= -29694}}}, .rank= 4, .dimensions=dimensions__sentence_encoder_attn_encoder_attn_layers_0_conv_k_Conv_reshape_to_2d_nhwc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_attn_encoder_attn_layers_0_conv_k_Conv_reshape_to_2d_nhwc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__sentence_encoder_attn_encoder_attn_layers_0_conv_k_Conv_reshape_to_2d_nhwc, // Node Params 1, // Num Node Params inputs__sentence_encoder_attn_encoder_attn_layers_0_conv_k_Conv_reshape_to_2d_nhwc, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_attn_encoder_attn_layers_0_conv_k_Conv_reshape_to_2d_nhwc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_dp_sentence_encoder_attn_encoder_attn_layers_0_conv_k_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_dp_sentence_encoder_attn_encoder_attn_layers_0_conv_k_weight[] = {1, 1, 64, 64}; VALIDATE(model.addTensor("tts_dp_sentence_encoder_attn_encoder_attn_layers_0_conv_k_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_dp_sentence_encoder_attn_encoder_attn_layers_0_conv_k_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0040825703181326f, .offset= -122}}}, .rank= 4, .dimensions=dimensions_tts_dp_sentence_encoder_attn_encoder_attn_layers_0_conv_k_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_dp_sentence_encoder_attn_encoder_attn_layers_0_conv_k_weight), .dataSize=BINLEN(tts_dp_sentence_encoder_attn_encoder_attn_layers_0_conv_k_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_dp_sentence_encoder_attn_encoder_attn_layers_0_conv_k_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_dp_sentence_encoder_attn_encoder_attn_layers_0_conv_k_bias[] = {64}; VALIDATE(model.addTensor("tts_dp_sentence_encoder_attn_encoder_attn_layers_0_conv_k_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_dp_sentence_encoder_attn_encoder_attn_layers_0_conv_k_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0013204452116042f, .offset= -168}}}, .rank= 1, .dimensions=dimensions_tts_dp_sentence_encoder_attn_encoder_attn_layers_0_conv_k_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_dp_sentence_encoder_attn_encoder_attn_layers_0_conv_k_bias), .dataSize=BINLEN(tts_dp_sentence_encoder_attn_encoder_attn_layers_0_conv_k_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__sentence_encoder_attn_encoder_attn_layers_0_conv_k_Conv_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_attn_encoder_attn_layers_0_conv_k_Conv_2d */ uint32_t dimensions__sentence_encoder_attn_encoder_attn_layers_0_conv_k_Conv_2d_dilation[] = {2}; uint32_t _sentence_encoder_attn_encoder_attn_layers_0_conv_k_Conv_2d_dilation[] = {1, 1}; uint32_t dimensions__sentence_encoder_attn_encoder_attn_layers_0_conv_k_Conv_2d_pad_amount[] = {2, 2}; uint32_t _sentence_encoder_attn_encoder_attn_layers_0_conv_k_Conv_2d_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions__sentence_encoder_attn_encoder_attn_layers_0_conv_k_Conv_2d_stride[] = {2}; uint32_t _sentence_encoder_attn_encoder_attn_layers_0_conv_k_Conv_2d_stride[] = {1, 1}; Qnn_Param_t params__sentence_encoder_attn_encoder_attn_layers_0_conv_k_Conv_2d[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_attn_layers_0_conv_k_Conv_2d_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__sentence_encoder_attn_encoder_attn_layers_0_conv_k_Conv_2d_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_attn_encoder_attn_layers_0_conv_k_Conv_2d_dilation, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_attn_layers_0_conv_k_Conv_2d_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__sentence_encoder_attn_encoder_attn_layers_0_conv_k_Conv_2d_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_attn_encoder_attn_layers_0_conv_k_Conv_2d_pad_amount, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_attn_layers_0_conv_k_Conv_2d_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__sentence_encoder_attn_encoder_attn_layers_0_conv_k_Conv_2d_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_attn_encoder_attn_layers_0_conv_k_Conv_2d_stride, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="group", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="reuse_sparse_indices", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}} }; const char* inputs__sentence_encoder_attn_encoder_attn_layers_0_conv_k_Conv_2d[] = { "_sentence_encoder_attn_encoder_attn_layers_0_conv_k_Conv_reshape_to_2d_nhwc", "tts_dp_sentence_encoder_attn_encoder_attn_layers_0_conv_k_weight", "tts_dp_sentence_encoder_attn_encoder_attn_layers_0_conv_k_bias" }; uint32_t dimensions__sentence_encoder_attn_encoder_attn_layers_0_conv_k_Conv_intermediate[] = {1, 1, 129, 64}; Qnn_Tensor_t outputs__sentence_encoder_attn_encoder_attn_layers_0_conv_k_Conv_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_attn_layers_0_conv_k_Conv_intermediate", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000662880629534f, .offset= -32191}}}, .rank= 4, .dimensions=dimensions__sentence_encoder_attn_encoder_attn_layers_0_conv_k_Conv_intermediate, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_attn_encoder_attn_layers_0_conv_k_Conv_2d", // Node Name "qti.aisw", // Package Name "Conv2d", // Qnn Node Type params__sentence_encoder_attn_encoder_attn_layers_0_conv_k_Conv_2d, // Node Params 5, // Num Node Params inputs__sentence_encoder_attn_encoder_attn_layers_0_conv_k_Conv_2d, // Input Tensor Names 3, // Num Input Tensor Names outputs__sentence_encoder_attn_encoder_attn_layers_0_conv_k_Conv_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_attn_encoder_attn_layers_0_conv_k_Conv_intermediate_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_attn_encoder_attn_layers_0_conv_k_Conv_intermediate_nchw */ uint32_t dimensions__sentence_encoder_attn_encoder_attn_layers_0_conv_k_Conv_intermediate_nchw_perm[] = {4}; uint32_t _sentence_encoder_attn_encoder_attn_layers_0_conv_k_Conv_intermediate_nchw_perm[] = {0, 3, 1, 2}; Qnn_Param_t params__sentence_encoder_attn_encoder_attn_layers_0_conv_k_Conv_intermediate_nchw[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_attn_layers_0_conv_k_Conv_intermediate_nchw_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__sentence_encoder_attn_encoder_attn_layers_0_conv_k_Conv_intermediate_nchw_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_attn_encoder_attn_layers_0_conv_k_Conv_intermediate_nchw_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__sentence_encoder_attn_encoder_attn_layers_0_conv_k_Conv_intermediate_nchw[] = { "_sentence_encoder_attn_encoder_attn_layers_0_conv_k_Conv_intermediate" }; uint32_t dimensions__sentence_encoder_attn_encoder_attn_layers_0_conv_k_Conv_intermediate_nchw[] = {1, 64, 1, 129}; Qnn_Tensor_t outputs__sentence_encoder_attn_encoder_attn_layers_0_conv_k_Conv_intermediate_nchw[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_attn_layers_0_conv_k_Conv_intermediate_nchw", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000662880629534f, .offset= -32191}}}, .rank= 4, .dimensions=dimensions__sentence_encoder_attn_encoder_attn_layers_0_conv_k_Conv_intermediate_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_attn_encoder_attn_layers_0_conv_k_Conv_intermediate_nchw", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__sentence_encoder_attn_encoder_attn_layers_0_conv_k_Conv_intermediate_nchw, // Node Params 1, // Num Node Params inputs__sentence_encoder_attn_encoder_attn_layers_0_conv_k_Conv_intermediate_nchw, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_attn_encoder_attn_layers_0_conv_k_Conv_intermediate_nchw, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_attn_encoder_attn_layers_0_conv_k_Conv_intermediate(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_attn_encoder_attn_layers_0_conv_k_Conv_intermediate */ const char* inputs__sentence_encoder_attn_encoder_attn_layers_0_conv_k_Conv_intermediate[] = { "_sentence_encoder_attn_encoder_attn_layers_0_conv_k_Conv_intermediate_nchw" }; uint32_t dimensions__sentence_encoder_attn_encoder_attn_layers_0_Reshape_1_output_0[] = {1, 2, 32, 129}; Qnn_Tensor_t outputs__sentence_encoder_attn_encoder_attn_layers_0_conv_k_Conv_intermediate[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_attn_layers_0_Reshape_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000662880629534f, .offset= -32191}}}, .rank= 4, .dimensions=dimensions__sentence_encoder_attn_encoder_attn_layers_0_Reshape_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_attn_encoder_attn_layers_0_conv_k_Conv_intermediate", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__sentence_encoder_attn_encoder_attn_layers_0_conv_k_Conv_intermediate, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_attn_encoder_attn_layers_0_conv_k_Conv_intermediate, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_attn_encoder_attn_layers_0_Reshape_1_output_0_converted_UFIXED_POINT_8(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_attn_encoder_attn_layers_0_Reshape_1_output_0_converted_UFIXED_POINT_8 */ Qnn_Param_t params__sentence_encoder_attn_encoder_attn_layers_0_Reshape_1_output_0_converted_UFIXED_POINT_8[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="dynamic_input_data", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="dynamic_output_data", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}} }; const char* inputs__sentence_encoder_attn_encoder_attn_layers_0_Reshape_1_output_0_converted_UFIXED_POINT_8[] = { "_sentence_encoder_attn_encoder_attn_layers_0_Reshape_1_output_0" }; uint32_t dimensions__sentence_encoder_attn_encoder_attn_layers_0_Reshape_1_output_0_converted_UFIXED_POINT_8[] = {1, 2, 32, 129}; Qnn_Tensor_t outputs__sentence_encoder_attn_encoder_attn_layers_0_Reshape_1_output_0_converted_UFIXED_POINT_8[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_attn_layers_0_Reshape_1_output_0_converted_UFIXED_POINT_8", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0169697441160679f, .offset= -126}}}, .rank= 4, .dimensions=dimensions__sentence_encoder_attn_encoder_attn_layers_0_Reshape_1_output_0_converted_UFIXED_POINT_8, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_attn_encoder_attn_layers_0_Reshape_1_output_0_converted_UFIXED_POINT_8", // Node Name "qti.aisw", // Package Name "Convert", // Qnn Node Type params__sentence_encoder_attn_encoder_attn_layers_0_Reshape_1_output_0_converted_UFIXED_POINT_8, // Node Params 2, // Num Node Params inputs__sentence_encoder_attn_encoder_attn_layers_0_Reshape_1_output_0_converted_UFIXED_POINT_8, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_attn_encoder_attn_layers_0_Reshape_1_output_0_converted_UFIXED_POINT_8, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_attn_encoder_attn_layers_0_conv_v_Conv_reshape_to_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_attn_encoder_attn_layers_0_conv_v_Conv_reshape_to_2d */ const char* inputs__sentence_encoder_attn_encoder_attn_layers_0_conv_v_Conv_reshape_to_2d[] = { "_sentence_encoder_attn_encoder_Mul_1_output_0_ncf" }; uint32_t dimensions__sentence_encoder_attn_encoder_attn_layers_0_conv_v_Conv_reshape_to_2d[] = {1, 64, 1, 129}; Qnn_Tensor_t outputs__sentence_encoder_attn_encoder_attn_layers_0_conv_v_Conv_reshape_to_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_attn_layers_0_conv_v_Conv_reshape_to_2d", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000551828998141f, .offset= -29694}}}, .rank= 4, .dimensions=dimensions__sentence_encoder_attn_encoder_attn_layers_0_conv_v_Conv_reshape_to_2d, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_attn_encoder_attn_layers_0_conv_v_Conv_reshape_to_2d", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__sentence_encoder_attn_encoder_attn_layers_0_conv_v_Conv_reshape_to_2d, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_attn_encoder_attn_layers_0_conv_v_Conv_reshape_to_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_attn_encoder_attn_layers_0_conv_v_Conv_reshape_to_2d_nhwc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_attn_encoder_attn_layers_0_conv_v_Conv_reshape_to_2d_nhwc */ uint32_t dimensions__sentence_encoder_attn_encoder_attn_layers_0_conv_v_Conv_reshape_to_2d_nhwc_perm[] = {4}; uint32_t _sentence_encoder_attn_encoder_attn_layers_0_conv_v_Conv_reshape_to_2d_nhwc_perm[] = {0, 2, 3, 1}; Qnn_Param_t params__sentence_encoder_attn_encoder_attn_layers_0_conv_v_Conv_reshape_to_2d_nhwc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_attn_layers_0_conv_v_Conv_reshape_to_2d_nhwc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__sentence_encoder_attn_encoder_attn_layers_0_conv_v_Conv_reshape_to_2d_nhwc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_attn_encoder_attn_layers_0_conv_v_Conv_reshape_to_2d_nhwc_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__sentence_encoder_attn_encoder_attn_layers_0_conv_v_Conv_reshape_to_2d_nhwc[] = { "_sentence_encoder_attn_encoder_attn_layers_0_conv_v_Conv_reshape_to_2d" }; uint32_t dimensions__sentence_encoder_attn_encoder_attn_layers_0_conv_v_Conv_reshape_to_2d_nhwc[] = {1, 1, 129, 64}; Qnn_Tensor_t outputs__sentence_encoder_attn_encoder_attn_layers_0_conv_v_Conv_reshape_to_2d_nhwc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_attn_layers_0_conv_v_Conv_reshape_to_2d_nhwc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000551828998141f, .offset= -29694}}}, .rank= 4, .dimensions=dimensions__sentence_encoder_attn_encoder_attn_layers_0_conv_v_Conv_reshape_to_2d_nhwc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_attn_encoder_attn_layers_0_conv_v_Conv_reshape_to_2d_nhwc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__sentence_encoder_attn_encoder_attn_layers_0_conv_v_Conv_reshape_to_2d_nhwc, // Node Params 1, // Num Node Params inputs__sentence_encoder_attn_encoder_attn_layers_0_conv_v_Conv_reshape_to_2d_nhwc, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_attn_encoder_attn_layers_0_conv_v_Conv_reshape_to_2d_nhwc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_dp_sentence_encoder_attn_encoder_attn_layers_0_conv_v_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_dp_sentence_encoder_attn_encoder_attn_layers_0_conv_v_weight[] = {1, 1, 64, 64}; VALIDATE(model.addTensor("tts_dp_sentence_encoder_attn_encoder_attn_layers_0_conv_v_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_dp_sentence_encoder_attn_encoder_attn_layers_0_conv_v_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0045218830928206f, .offset= -129}}}, .rank= 4, .dimensions=dimensions_tts_dp_sentence_encoder_attn_encoder_attn_layers_0_conv_v_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_dp_sentence_encoder_attn_encoder_attn_layers_0_conv_v_weight), .dataSize=BINLEN(tts_dp_sentence_encoder_attn_encoder_attn_layers_0_conv_v_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_dp_sentence_encoder_attn_encoder_attn_layers_0_conv_v_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_dp_sentence_encoder_attn_encoder_attn_layers_0_conv_v_bias[] = {64}; VALIDATE(model.addTensor("tts_dp_sentence_encoder_attn_encoder_attn_layers_0_conv_v_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_dp_sentence_encoder_attn_encoder_attn_layers_0_conv_v_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0003906895581167f, .offset= -127}}}, .rank= 1, .dimensions=dimensions_tts_dp_sentence_encoder_attn_encoder_attn_layers_0_conv_v_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_dp_sentence_encoder_attn_encoder_attn_layers_0_conv_v_bias), .dataSize=BINLEN(tts_dp_sentence_encoder_attn_encoder_attn_layers_0_conv_v_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__sentence_encoder_attn_encoder_attn_layers_0_conv_v_Conv_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_attn_encoder_attn_layers_0_conv_v_Conv_2d */ uint32_t dimensions__sentence_encoder_attn_encoder_attn_layers_0_conv_v_Conv_2d_dilation[] = {2}; uint32_t _sentence_encoder_attn_encoder_attn_layers_0_conv_v_Conv_2d_dilation[] = {1, 1}; uint32_t dimensions__sentence_encoder_attn_encoder_attn_layers_0_conv_v_Conv_2d_pad_amount[] = {2, 2}; uint32_t _sentence_encoder_attn_encoder_attn_layers_0_conv_v_Conv_2d_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions__sentence_encoder_attn_encoder_attn_layers_0_conv_v_Conv_2d_stride[] = {2}; uint32_t _sentence_encoder_attn_encoder_attn_layers_0_conv_v_Conv_2d_stride[] = {1, 1}; Qnn_Param_t params__sentence_encoder_attn_encoder_attn_layers_0_conv_v_Conv_2d[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_attn_layers_0_conv_v_Conv_2d_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__sentence_encoder_attn_encoder_attn_layers_0_conv_v_Conv_2d_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_attn_encoder_attn_layers_0_conv_v_Conv_2d_dilation, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_attn_layers_0_conv_v_Conv_2d_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__sentence_encoder_attn_encoder_attn_layers_0_conv_v_Conv_2d_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_attn_encoder_attn_layers_0_conv_v_Conv_2d_pad_amount, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_attn_layers_0_conv_v_Conv_2d_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__sentence_encoder_attn_encoder_attn_layers_0_conv_v_Conv_2d_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_attn_encoder_attn_layers_0_conv_v_Conv_2d_stride, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="group", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="reuse_sparse_indices", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}} }; const char* inputs__sentence_encoder_attn_encoder_attn_layers_0_conv_v_Conv_2d[] = { "_sentence_encoder_attn_encoder_attn_layers_0_conv_v_Conv_reshape_to_2d_nhwc", "tts_dp_sentence_encoder_attn_encoder_attn_layers_0_conv_v_weight", "tts_dp_sentence_encoder_attn_encoder_attn_layers_0_conv_v_bias" }; uint32_t dimensions__sentence_encoder_attn_encoder_attn_layers_0_conv_v_Conv_intermediate[] = {1, 1, 129, 64}; Qnn_Tensor_t outputs__sentence_encoder_attn_encoder_attn_layers_0_conv_v_Conv_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_attn_layers_0_conv_v_Conv_intermediate", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0001119243461289f, .offset= -32516}}}, .rank= 4, .dimensions=dimensions__sentence_encoder_attn_encoder_attn_layers_0_conv_v_Conv_intermediate, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_attn_encoder_attn_layers_0_conv_v_Conv_2d", // Node Name "qti.aisw", // Package Name "Conv2d", // Qnn Node Type params__sentence_encoder_attn_encoder_attn_layers_0_conv_v_Conv_2d, // Node Params 5, // Num Node Params inputs__sentence_encoder_attn_encoder_attn_layers_0_conv_v_Conv_2d, // Input Tensor Names 3, // Num Input Tensor Names outputs__sentence_encoder_attn_encoder_attn_layers_0_conv_v_Conv_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_attn_encoder_attn_layers_0_conv_v_Conv_intermediate_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_attn_encoder_attn_layers_0_conv_v_Conv_intermediate_nchw */ uint32_t dimensions__sentence_encoder_attn_encoder_attn_layers_0_conv_v_Conv_intermediate_nchw_perm[] = {4}; uint32_t _sentence_encoder_attn_encoder_attn_layers_0_conv_v_Conv_intermediate_nchw_perm[] = {0, 3, 1, 2}; Qnn_Param_t params__sentence_encoder_attn_encoder_attn_layers_0_conv_v_Conv_intermediate_nchw[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_attn_layers_0_conv_v_Conv_intermediate_nchw_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__sentence_encoder_attn_encoder_attn_layers_0_conv_v_Conv_intermediate_nchw_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_attn_encoder_attn_layers_0_conv_v_Conv_intermediate_nchw_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__sentence_encoder_attn_encoder_attn_layers_0_conv_v_Conv_intermediate_nchw[] = { "_sentence_encoder_attn_encoder_attn_layers_0_conv_v_Conv_intermediate" }; uint32_t dimensions__sentence_encoder_attn_encoder_attn_layers_0_conv_v_Conv_intermediate_nchw[] = {1, 64, 1, 129}; Qnn_Tensor_t outputs__sentence_encoder_attn_encoder_attn_layers_0_conv_v_Conv_intermediate_nchw[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_attn_layers_0_conv_v_Conv_intermediate_nchw", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0001119243461289f, .offset= -32516}}}, .rank= 4, .dimensions=dimensions__sentence_encoder_attn_encoder_attn_layers_0_conv_v_Conv_intermediate_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_attn_encoder_attn_layers_0_conv_v_Conv_intermediate_nchw", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__sentence_encoder_attn_encoder_attn_layers_0_conv_v_Conv_intermediate_nchw, // Node Params 1, // Num Node Params inputs__sentence_encoder_attn_encoder_attn_layers_0_conv_v_Conv_intermediate_nchw, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_attn_encoder_attn_layers_0_conv_v_Conv_intermediate_nchw, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_attn_encoder_attn_layers_0_conv_v_Conv_intermediate(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_attn_encoder_attn_layers_0_conv_v_Conv_intermediate */ const char* inputs__sentence_encoder_attn_encoder_attn_layers_0_conv_v_Conv_intermediate[] = { "_sentence_encoder_attn_encoder_attn_layers_0_conv_v_Conv_intermediate_nchw" }; uint32_t dimensions__sentence_encoder_attn_encoder_attn_layers_0_Reshape_2_output_0[] = {1, 2, 32, 129}; Qnn_Tensor_t outputs__sentence_encoder_attn_encoder_attn_layers_0_conv_v_Conv_intermediate[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_attn_layers_0_Reshape_2_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0001119243461289f, .offset= -32516}}}, .rank= 4, .dimensions=dimensions__sentence_encoder_attn_encoder_attn_layers_0_Reshape_2_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_attn_encoder_attn_layers_0_conv_v_Conv_intermediate", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__sentence_encoder_attn_encoder_attn_layers_0_conv_v_Conv_intermediate, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_attn_encoder_attn_layers_0_conv_v_Conv_intermediate, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_attn_encoder_attn_layers_0_Transpose(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_attn_encoder_attn_layers_0_Transpose */ uint32_t dimensions__sentence_encoder_attn_encoder_attn_layers_0_Transpose_perm[] = {4}; uint32_t _sentence_encoder_attn_encoder_attn_layers_0_Transpose_perm[] = {0, 1, 3, 2}; Qnn_Param_t params__sentence_encoder_attn_encoder_attn_layers_0_Transpose[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_attn_layers_0_Transpose_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__sentence_encoder_attn_encoder_attn_layers_0_Transpose_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_attn_encoder_attn_layers_0_Transpose_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__sentence_encoder_attn_encoder_attn_layers_0_Transpose[] = { "_sentence_encoder_attn_encoder_attn_layers_0_Reshape_output_0" }; uint32_t dimensions__sentence_encoder_attn_encoder_attn_layers_0_Transpose_output_0[] = {1, 2, 129, 32}; Qnn_Tensor_t outputs__sentence_encoder_attn_encoder_attn_layers_0_Transpose[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_attn_layers_0_Transpose_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000543473106518f, .offset= -31676}}}, .rank= 4, .dimensions=dimensions__sentence_encoder_attn_encoder_attn_layers_0_Transpose_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_attn_encoder_attn_layers_0_Transpose", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__sentence_encoder_attn_encoder_attn_layers_0_Transpose, // Node Params 1, // Num Node Params inputs__sentence_encoder_attn_encoder_attn_layers_0_Transpose, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_attn_encoder_attn_layers_0_Transpose, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_attn_encoder_attn_layers_0_Transpose_1(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_attn_encoder_attn_layers_0_Transpose_1 */ uint32_t dimensions__sentence_encoder_attn_encoder_attn_layers_0_Transpose_1_perm[] = {4}; uint32_t _sentence_encoder_attn_encoder_attn_layers_0_Transpose_1_perm[] = {0, 1, 3, 2}; Qnn_Param_t params__sentence_encoder_attn_encoder_attn_layers_0_Transpose_1[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_attn_layers_0_Transpose_1_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__sentence_encoder_attn_encoder_attn_layers_0_Transpose_1_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_attn_encoder_attn_layers_0_Transpose_1_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__sentence_encoder_attn_encoder_attn_layers_0_Transpose_1[] = { "_sentence_encoder_attn_encoder_attn_layers_0_Reshape_2_output_0" }; uint32_t dimensions__sentence_encoder_attn_encoder_attn_layers_0_Transpose_1_output_0[] = {1, 2, 129, 32}; Qnn_Tensor_t outputs__sentence_encoder_attn_encoder_attn_layers_0_Transpose_1[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_attn_layers_0_Transpose_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0001119243461289f, .offset= -32516}}}, .rank= 4, .dimensions=dimensions__sentence_encoder_attn_encoder_attn_layers_0_Transpose_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_attn_encoder_attn_layers_0_Transpose_1", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__sentence_encoder_attn_encoder_attn_layers_0_Transpose_1, // Node Params 1, // Num Node Params inputs__sentence_encoder_attn_encoder_attn_layers_0_Transpose_1, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_attn_encoder_attn_layers_0_Transpose_1, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_attn_encoder_attn_layers_0_Transpose_1_output_0_converted_UFIXED_POINT_8(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_attn_encoder_attn_layers_0_Transpose_1_output_0_converted_UFIXED_POINT_8 */ Qnn_Param_t params__sentence_encoder_attn_encoder_attn_layers_0_Transpose_1_output_0_converted_UFIXED_POINT_8[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="dynamic_input_data", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="dynamic_output_data", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}} }; const char* inputs__sentence_encoder_attn_encoder_attn_layers_0_Transpose_1_output_0_converted_UFIXED_POINT_8[] = { "_sentence_encoder_attn_encoder_attn_layers_0_Transpose_1_output_0" }; uint32_t dimensions__sentence_encoder_attn_encoder_attn_layers_0_Transpose_1_output_0_converted_UFIXED_POINT_8[] = {1, 2, 129, 32}; Qnn_Tensor_t outputs__sentence_encoder_attn_encoder_attn_layers_0_Transpose_1_output_0_converted_UFIXED_POINT_8[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_attn_layers_0_Transpose_1_output_0_converted_UFIXED_POINT_8", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0286526326090097f, .offset= -127}}}, .rank= 4, .dimensions=dimensions__sentence_encoder_attn_encoder_attn_layers_0_Transpose_1_output_0_converted_UFIXED_POINT_8, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_attn_encoder_attn_layers_0_Transpose_1_output_0_converted_UFIXED_POINT_8", // Node Name "qti.aisw", // Package Name "Convert", // Qnn Node Type params__sentence_encoder_attn_encoder_attn_layers_0_Transpose_1_output_0_converted_UFIXED_POINT_8, // Node Params 2, // Num Node Params inputs__sentence_encoder_attn_encoder_attn_layers_0_Transpose_1_output_0_converted_UFIXED_POINT_8, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_attn_encoder_attn_layers_0_Transpose_1_output_0_converted_UFIXED_POINT_8, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor__sentence_encoder_attn_encoder_attn_layers_0_Constant_10_output_0(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions__sentence_encoder_attn_encoder_attn_layers_0_Constant_10_output_0[] = {1}; VALIDATE(model.addTensor("_sentence_encoder_attn_encoder_attn_layers_0_Constant_10_output_0", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_attn_layers_0_Constant_10_output_0", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000863180612214f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__sentence_encoder_attn_encoder_attn_layers_0_Constant_10_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(_sentence_encoder_attn_encoder_attn_layers_0_Constant_10_output_0), .dataSize=BINLEN(_sentence_encoder_attn_encoder_attn_layers_0_Constant_10_output_0)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__sentence_encoder_attn_encoder_attn_layers_0_Div(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_attn_encoder_attn_layers_0_Div */ Qnn_Param_t params__sentence_encoder_attn_encoder_attn_layers_0_Div[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 2}}}} }; const char* inputs__sentence_encoder_attn_encoder_attn_layers_0_Div[] = { "_sentence_encoder_attn_encoder_attn_layers_0_Transpose_output_0", "_sentence_encoder_attn_encoder_attn_layers_0_Constant_10_output_0" }; uint32_t dimensions__sentence_encoder_attn_encoder_attn_layers_0_Div_output_0[] = {1, 2, 129, 32}; Qnn_Tensor_t outputs__sentence_encoder_attn_encoder_attn_layers_0_Div[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_attn_layers_0_Div_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000096073381428f, .offset= -31676}}}, .rank= 4, .dimensions=dimensions__sentence_encoder_attn_encoder_attn_layers_0_Div_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_attn_encoder_attn_layers_0_Div", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__sentence_encoder_attn_encoder_attn_layers_0_Div, // Node Params 1, // Num Node Params inputs__sentence_encoder_attn_encoder_attn_layers_0_Div, // Input Tensor Names 2, // Num Input Tensor Names outputs__sentence_encoder_attn_encoder_attn_layers_0_Div, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_attn_encoder_attn_layers_0_MatMul(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_attn_encoder_attn_layers_0_MatMul */ Qnn_Param_t params__sentence_encoder_attn_encoder_attn_layers_0_MatMul[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="transpose_in0", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="transpose_in1", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}} }; const char* inputs__sentence_encoder_attn_encoder_attn_layers_0_MatMul[] = { "_sentence_encoder_attn_encoder_attn_layers_0_Div_output_0", "_sentence_encoder_attn_encoder_attn_layers_0_Reshape_1_output_0_converted_UFIXED_POINT_8" }; uint32_t dimensions__sentence_encoder_attn_encoder_attn_layers_0_MatMul_output_0[] = {1, 2, 129, 129}; Qnn_Tensor_t outputs__sentence_encoder_attn_encoder_attn_layers_0_MatMul[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_attn_layers_0_MatMul_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000606089743087f, .offset= -14380}}}, .rank= 4, .dimensions=dimensions__sentence_encoder_attn_encoder_attn_layers_0_MatMul_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_attn_encoder_attn_layers_0_MatMul", // Node Name "qti.aisw", // Package Name "MatMul", // Qnn Node Type params__sentence_encoder_attn_encoder_attn_layers_0_MatMul, // Node Params 2, // Num Node Params inputs__sentence_encoder_attn_encoder_attn_layers_0_MatMul, // Input Tensor Names 2, // Num Input Tensor Names outputs__sentence_encoder_attn_encoder_attn_layers_0_MatMul, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor__sentence_encoder_attn_encoder_attn_layers_0_Transpose_3_output_0_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions__sentence_encoder_attn_encoder_attn_layers_0_Transpose_3_output_0_nchw[] = {1, 1, 32, 257}; VALIDATE(model.addTensor("_sentence_encoder_attn_encoder_attn_layers_0_Transpose_3_output_0_nchw", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_attn_layers_0_Transpose_3_output_0_nchw", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0063549424521625f, .offset= -150}}}, .rank= 4, .dimensions=dimensions__sentence_encoder_attn_encoder_attn_layers_0_Transpose_3_output_0_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(_sentence_encoder_attn_encoder_attn_layers_0_Transpose_3_output_0_nchw), .dataSize=BINLEN(_sentence_encoder_attn_encoder_attn_layers_0_Transpose_3_output_0_nchw)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__sentence_encoder_attn_encoder_attn_layers_0_MatMul_1(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_attn_encoder_attn_layers_0_MatMul_1 */ Qnn_Param_t params__sentence_encoder_attn_encoder_attn_layers_0_MatMul_1[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="transpose_in0", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="transpose_in1", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}} }; const char* inputs__sentence_encoder_attn_encoder_attn_layers_0_MatMul_1[] = { "_sentence_encoder_attn_encoder_attn_layers_0_Div_output_0", "_sentence_encoder_attn_encoder_attn_layers_0_Transpose_3_output_0_nchw" }; uint32_t dimensions__sentence_encoder_attn_encoder_attn_layers_0_MatMul_1_output_0[] = {1, 2, 129, 257}; Qnn_Tensor_t outputs__sentence_encoder_attn_encoder_attn_layers_0_MatMul_1[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_attn_layers_0_MatMul_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000194863841898f, .offset= -50758}}}, .rank= 4, .dimensions=dimensions__sentence_encoder_attn_encoder_attn_layers_0_MatMul_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_attn_encoder_attn_layers_0_MatMul_1", // Node Name "qti.aisw", // Package Name "MatMul", // Qnn Node Type params__sentence_encoder_attn_encoder_attn_layers_0_MatMul_1, // Node Params 2, // Num Node Params inputs__sentence_encoder_attn_encoder_attn_layers_0_MatMul_1, // Input Tensor Names 2, // Num Input Tensor Names outputs__sentence_encoder_attn_encoder_attn_layers_0_MatMul_1, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_attn_encoder_attn_layers_0_MatMul_1_output_0_nhwc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_attn_encoder_attn_layers_0_MatMul_1_output_0_nhwc */ uint32_t dimensions__sentence_encoder_attn_encoder_attn_layers_0_MatMul_1_output_0_nhwc_perm[] = {4}; uint32_t _sentence_encoder_attn_encoder_attn_layers_0_MatMul_1_output_0_nhwc_perm[] = {0, 2, 3, 1}; Qnn_Param_t params__sentence_encoder_attn_encoder_attn_layers_0_MatMul_1_output_0_nhwc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_attn_layers_0_MatMul_1_output_0_nhwc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__sentence_encoder_attn_encoder_attn_layers_0_MatMul_1_output_0_nhwc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_attn_encoder_attn_layers_0_MatMul_1_output_0_nhwc_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__sentence_encoder_attn_encoder_attn_layers_0_MatMul_1_output_0_nhwc[] = { "_sentence_encoder_attn_encoder_attn_layers_0_MatMul_1_output_0" }; uint32_t dimensions__sentence_encoder_attn_encoder_attn_layers_0_MatMul_1_output_0_nhwc[] = {1, 129, 257, 2}; Qnn_Tensor_t outputs__sentence_encoder_attn_encoder_attn_layers_0_MatMul_1_output_0_nhwc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_attn_layers_0_MatMul_1_output_0_nhwc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000194863841898f, .offset= -50758}}}, .rank= 4, .dimensions=dimensions__sentence_encoder_attn_encoder_attn_layers_0_MatMul_1_output_0_nhwc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_attn_encoder_attn_layers_0_MatMul_1_output_0_nhwc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__sentence_encoder_attn_encoder_attn_layers_0_MatMul_1_output_0_nhwc, // Node Params 1, // Num Node Params inputs__sentence_encoder_attn_encoder_attn_layers_0_MatMul_1_output_0_nhwc, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_attn_encoder_attn_layers_0_MatMul_1_output_0_nhwc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_attn_encoder_attn_layers_0_Pad_1(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_attn_encoder_attn_layers_0_Pad_1 */ uint32_t dimensions__sentence_encoder_attn_encoder_attn_layers_0_Pad_1_pad_amount[] = {4, 2}; uint32_t _sentence_encoder_attn_encoder_attn_layers_0_Pad_1_pad_amount[] = {0, 0, 0, 0, 0, 1, 0, 0}; Qnn_Param_t params__sentence_encoder_attn_encoder_attn_layers_0_Pad_1[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_attn_layers_0_Pad_1_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__sentence_encoder_attn_encoder_attn_layers_0_Pad_1_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_attn_encoder_attn_layers_0_Pad_1_pad_amount, .dataSize=32}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="pad_constant_value", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_FLOAT_32, {.floatValue = 0.000000000000f}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="scheme", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}} }; const char* inputs__sentence_encoder_attn_encoder_attn_layers_0_Pad_1[] = { "_sentence_encoder_attn_encoder_attn_layers_0_MatMul_1_output_0_nhwc" }; uint32_t dimensions__sentence_encoder_attn_encoder_attn_layers_0_Pad_1_output_0[] = {1, 129, 258, 2}; Qnn_Tensor_t outputs__sentence_encoder_attn_encoder_attn_layers_0_Pad_1[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_attn_layers_0_Pad_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000194863841898f, .offset= -50758}}}, .rank= 4, .dimensions=dimensions__sentence_encoder_attn_encoder_attn_layers_0_Pad_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_attn_encoder_attn_layers_0_Pad_1", // Node Name "qti.aisw", // Package Name "Pad", // Qnn Node Type params__sentence_encoder_attn_encoder_attn_layers_0_Pad_1, // Node Params 3, // Num Node Params inputs__sentence_encoder_attn_encoder_attn_layers_0_Pad_1, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_attn_encoder_attn_layers_0_Pad_1, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_attn_encoder_attn_layers_0_Pad_1_output_0_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_attn_encoder_attn_layers_0_Pad_1_output_0_nchw */ uint32_t dimensions__sentence_encoder_attn_encoder_attn_layers_0_Pad_1_output_0_nchw_perm[] = {4}; uint32_t _sentence_encoder_attn_encoder_attn_layers_0_Pad_1_output_0_nchw_perm[] = {0, 3, 1, 2}; Qnn_Param_t params__sentence_encoder_attn_encoder_attn_layers_0_Pad_1_output_0_nchw[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_attn_layers_0_Pad_1_output_0_nchw_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__sentence_encoder_attn_encoder_attn_layers_0_Pad_1_output_0_nchw_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_attn_encoder_attn_layers_0_Pad_1_output_0_nchw_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__sentence_encoder_attn_encoder_attn_layers_0_Pad_1_output_0_nchw[] = { "_sentence_encoder_attn_encoder_attn_layers_0_Pad_1_output_0" }; uint32_t dimensions__sentence_encoder_attn_encoder_attn_layers_0_Pad_1_output_0_nchw[] = {1, 2, 129, 258}; Qnn_Tensor_t outputs__sentence_encoder_attn_encoder_attn_layers_0_Pad_1_output_0_nchw[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_attn_layers_0_Pad_1_output_0_nchw", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000194863841898f, .offset= -50758}}}, .rank= 4, .dimensions=dimensions__sentence_encoder_attn_encoder_attn_layers_0_Pad_1_output_0_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_attn_encoder_attn_layers_0_Pad_1_output_0_nchw", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__sentence_encoder_attn_encoder_attn_layers_0_Pad_1_output_0_nchw, // Node Params 1, // Num Node Params inputs__sentence_encoder_attn_encoder_attn_layers_0_Pad_1_output_0_nchw, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_attn_encoder_attn_layers_0_Pad_1_output_0_nchw, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_attn_encoder_attn_layers_0_Reshape_7(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_attn_encoder_attn_layers_0_Reshape_7 */ const char* inputs__sentence_encoder_attn_encoder_attn_layers_0_Reshape_7[] = { "_sentence_encoder_attn_encoder_attn_layers_0_Pad_1_output_0_nchw" }; uint32_t dimensions__sentence_encoder_attn_encoder_attn_layers_0_Reshape_7_output_0[] = {1, 2, 33282}; Qnn_Tensor_t outputs__sentence_encoder_attn_encoder_attn_layers_0_Reshape_7[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_attn_layers_0_Reshape_7_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000194863841898f, .offset= -50758}}}, .rank= 3, .dimensions=dimensions__sentence_encoder_attn_encoder_attn_layers_0_Reshape_7_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_attn_encoder_attn_layers_0_Reshape_7", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__sentence_encoder_attn_encoder_attn_layers_0_Reshape_7, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_attn_encoder_attn_layers_0_Reshape_7, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_attn_encoder_attn_layers_0_Reshape_7_output_0_nfc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_attn_encoder_attn_layers_0_Reshape_7_output_0_nfc */ uint32_t dimensions__sentence_encoder_attn_encoder_attn_layers_0_Reshape_7_output_0_nfc_perm[] = {3}; uint32_t _sentence_encoder_attn_encoder_attn_layers_0_Reshape_7_output_0_nfc_perm[] = {0, 2, 1}; Qnn_Param_t params__sentence_encoder_attn_encoder_attn_layers_0_Reshape_7_output_0_nfc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_attn_layers_0_Reshape_7_output_0_nfc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__sentence_encoder_attn_encoder_attn_layers_0_Reshape_7_output_0_nfc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_attn_encoder_attn_layers_0_Reshape_7_output_0_nfc_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__sentence_encoder_attn_encoder_attn_layers_0_Reshape_7_output_0_nfc[] = { "_sentence_encoder_attn_encoder_attn_layers_0_Reshape_7_output_0" }; uint32_t dimensions__sentence_encoder_attn_encoder_attn_layers_0_Reshape_7_output_0_nfc[] = {1, 33282, 2}; Qnn_Tensor_t outputs__sentence_encoder_attn_encoder_attn_layers_0_Reshape_7_output_0_nfc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_attn_layers_0_Reshape_7_output_0_nfc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000194863841898f, .offset= -50758}}}, .rank= 3, .dimensions=dimensions__sentence_encoder_attn_encoder_attn_layers_0_Reshape_7_output_0_nfc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_attn_encoder_attn_layers_0_Reshape_7_output_0_nfc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__sentence_encoder_attn_encoder_attn_layers_0_Reshape_7_output_0_nfc, // Node Params 1, // Num Node Params inputs__sentence_encoder_attn_encoder_attn_layers_0_Reshape_7_output_0_nfc, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_attn_encoder_attn_layers_0_Reshape_7_output_0_nfc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_attn_encoder_attn_layers_0_Pad_2(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_attn_encoder_attn_layers_0_Pad_2 */ uint32_t dimensions__sentence_encoder_attn_encoder_attn_layers_0_Pad_2_pad_amount[] = {3, 2}; uint32_t _sentence_encoder_attn_encoder_attn_layers_0_Pad_2_pad_amount[] = {0, 0, 0, 128, 0, 0}; Qnn_Param_t params__sentence_encoder_attn_encoder_attn_layers_0_Pad_2[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_attn_layers_0_Pad_2_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__sentence_encoder_attn_encoder_attn_layers_0_Pad_2_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_attn_encoder_attn_layers_0_Pad_2_pad_amount, .dataSize=24}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="pad_constant_value", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_FLOAT_32, {.floatValue = 0.000000000000f}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="scheme", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}} }; const char* inputs__sentence_encoder_attn_encoder_attn_layers_0_Pad_2[] = { "_sentence_encoder_attn_encoder_attn_layers_0_Reshape_7_output_0_nfc" }; uint32_t dimensions__sentence_encoder_attn_encoder_attn_layers_0_Pad_2_output_0[] = {1, 33410, 2}; Qnn_Tensor_t outputs__sentence_encoder_attn_encoder_attn_layers_0_Pad_2[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_attn_layers_0_Pad_2_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000194863841898f, .offset= -50758}}}, .rank= 3, .dimensions=dimensions__sentence_encoder_attn_encoder_attn_layers_0_Pad_2_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_attn_encoder_attn_layers_0_Pad_2", // Node Name "qti.aisw", // Package Name "Pad", // Qnn Node Type params__sentence_encoder_attn_encoder_attn_layers_0_Pad_2, // Node Params 3, // Num Node Params inputs__sentence_encoder_attn_encoder_attn_layers_0_Pad_2, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_attn_encoder_attn_layers_0_Pad_2, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_attn_encoder_attn_layers_0_Pad_2_output_0_ncf(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_attn_encoder_attn_layers_0_Pad_2_output_0_ncf */ uint32_t dimensions__sentence_encoder_attn_encoder_attn_layers_0_Pad_2_output_0_ncf_perm[] = {3}; uint32_t _sentence_encoder_attn_encoder_attn_layers_0_Pad_2_output_0_ncf_perm[] = {0, 2, 1}; Qnn_Param_t params__sentence_encoder_attn_encoder_attn_layers_0_Pad_2_output_0_ncf[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_attn_layers_0_Pad_2_output_0_ncf_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__sentence_encoder_attn_encoder_attn_layers_0_Pad_2_output_0_ncf_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_attn_encoder_attn_layers_0_Pad_2_output_0_ncf_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__sentence_encoder_attn_encoder_attn_layers_0_Pad_2_output_0_ncf[] = { "_sentence_encoder_attn_encoder_attn_layers_0_Pad_2_output_0" }; uint32_t dimensions__sentence_encoder_attn_encoder_attn_layers_0_Pad_2_output_0_ncf[] = {1, 2, 33410}; Qnn_Tensor_t outputs__sentence_encoder_attn_encoder_attn_layers_0_Pad_2_output_0_ncf[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_attn_layers_0_Pad_2_output_0_ncf", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000194863841898f, .offset= -50758}}}, .rank= 3, .dimensions=dimensions__sentence_encoder_attn_encoder_attn_layers_0_Pad_2_output_0_ncf, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_attn_encoder_attn_layers_0_Pad_2_output_0_ncf", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__sentence_encoder_attn_encoder_attn_layers_0_Pad_2_output_0_ncf, // Node Params 1, // Num Node Params inputs__sentence_encoder_attn_encoder_attn_layers_0_Pad_2_output_0_ncf, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_attn_encoder_attn_layers_0_Pad_2_output_0_ncf, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_attn_encoder_attn_layers_0_Reshape_10(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_attn_encoder_attn_layers_0_Reshape_10 */ const char* inputs__sentence_encoder_attn_encoder_attn_layers_0_Reshape_10[] = { "_sentence_encoder_attn_encoder_attn_layers_0_Pad_2_output_0_ncf" }; uint32_t dimensions__sentence_encoder_attn_encoder_attn_layers_0_Reshape_10_output_0[] = {1, 2, 130, 257}; Qnn_Tensor_t outputs__sentence_encoder_attn_encoder_attn_layers_0_Reshape_10[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_attn_layers_0_Reshape_10_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000194863841898f, .offset= -50758}}}, .rank= 4, .dimensions=dimensions__sentence_encoder_attn_encoder_attn_layers_0_Reshape_10_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_attn_encoder_attn_layers_0_Reshape_10", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__sentence_encoder_attn_encoder_attn_layers_0_Reshape_10, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_attn_encoder_attn_layers_0_Reshape_10, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode_Slice_0(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR Slice_0 */ uint32_t dimensions_Slice_0_ranges[] = {4, 3}; int32_t Slice_0_ranges[] = {0, 1, 1, 0, 2, 1, 0, 129, 1, 128, 257, 1}; Qnn_Param_t params_Slice_0[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="ranges", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "Slice_0_ranges", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_INT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions_Slice_0_ranges, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)Slice_0_ranges, .dataSize=48}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="begin_mask", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="end_mask", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="new_axes_mask", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="shrink_axes", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}} }; const char* inputs_Slice_0[] = { "_sentence_encoder_attn_encoder_attn_layers_0_Reshape_10_output_0" }; uint32_t dimensions__v_551[] = {1, 2, 129, 129}; Qnn_Tensor_t outputs_Slice_0[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_v_551", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000194863841898f, .offset= -50758}}}, .rank= 4, .dimensions=dimensions__v_551, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "Slice_0", // Node Name "qti.aisw", // Package Name "StridedSlice", // Qnn Node Type params_Slice_0, // Node Params 5, // Num Node Params inputs_Slice_0, // Input Tensor Names 1, // Num Input Tensor Names outputs_Slice_0, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_attn_encoder_attn_layers_0_Add_2(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_attn_encoder_attn_layers_0_Add_2 */ Qnn_Param_t params__sentence_encoder_attn_encoder_attn_layers_0_Add_2[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}} }; const char* inputs__sentence_encoder_attn_encoder_attn_layers_0_Add_2[] = { "_sentence_encoder_attn_encoder_attn_layers_0_MatMul_output_0", "_v_551" }; uint32_t dimensions__sentence_encoder_attn_encoder_attn_layers_0_Add_2_output_0[] = {1, 2, 129, 129}; Qnn_Tensor_t outputs__sentence_encoder_attn_encoder_attn_layers_0_Add_2[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_attn_layers_0_Add_2_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000686029598000f, .offset= -19230}}}, .rank= 4, .dimensions=dimensions__sentence_encoder_attn_encoder_attn_layers_0_Add_2_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_attn_encoder_attn_layers_0_Add_2", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__sentence_encoder_attn_encoder_attn_layers_0_Add_2, // Node Params 1, // Num Node Params inputs__sentence_encoder_attn_encoder_attn_layers_0_Add_2, // Input Tensor Names 2, // Num Input Tensor Names outputs__sentence_encoder_attn_encoder_attn_layers_0_Add_2, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor__sentence_encoder_attn_encoder_attn_layers_0_Constant_88_output_0(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions__sentence_encoder_attn_encoder_attn_layers_0_Constant_88_output_0[] = {1}; VALIDATE(model.addTensor("_sentence_encoder_attn_encoder_attn_layers_0_Constant_88_output_0", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_attn_layers_0_Constant_88_output_0", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.1525902152061462f, .offset= -65535}}}, .rank= 1, .dimensions=dimensions__sentence_encoder_attn_encoder_attn_layers_0_Constant_88_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(_sentence_encoder_attn_encoder_attn_layers_0_Constant_88_output_0), .dataSize=BINLEN(_sentence_encoder_attn_encoder_attn_layers_0_Constant_88_output_0)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__sentence_encoder_attn_encoder_attn_layers_0_Where(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_attn_encoder_attn_layers_0_Where */ const char* inputs__sentence_encoder_attn_encoder_attn_layers_0_Where[] = { "_sentence_encoder_attn_encoder_attn_layers_1_Cast_5_output_0", "_sentence_encoder_attn_encoder_attn_layers_0_Constant_88_output_0", "_sentence_encoder_attn_encoder_attn_layers_0_Add_2_output_0" }; uint32_t dimensions__sentence_encoder_attn_encoder_attn_layers_0_Where_output_0[] = {1, 2, 129, 129}; Qnn_Tensor_t outputs__sentence_encoder_attn_encoder_attn_layers_0_Where[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_attn_layers_0_Where_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.1526386886835098f, .offset= -65514}}}, .rank= 4, .dimensions=dimensions__sentence_encoder_attn_encoder_attn_layers_0_Where_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_attn_encoder_attn_layers_0_Where", // Node Name "qti.aisw", // Package Name "ElementWiseSelect", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__sentence_encoder_attn_encoder_attn_layers_0_Where, // Input Tensor Names 3, // Num Input Tensor Names outputs__sentence_encoder_attn_encoder_attn_layers_0_Where, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_attn_encoder_attn_layers_0_Softmax(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_attn_encoder_attn_layers_0_Softmax */ Qnn_Param_t params__sentence_encoder_attn_encoder_attn_layers_0_Softmax[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="axis", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 3}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="beta", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_FLOAT_32, {.floatValue = 1.000000000000f}}}} }; const char* inputs__sentence_encoder_attn_encoder_attn_layers_0_Softmax[] = { "_sentence_encoder_attn_encoder_attn_layers_0_Where_output_0" }; uint32_t dimensions__sentence_encoder_attn_encoder_attn_layers_0_Softmax_output_0[] = {1, 2, 129, 129}; Qnn_Tensor_t outputs__sentence_encoder_attn_encoder_attn_layers_0_Softmax[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_attn_layers_0_Softmax_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000152587890625f, .offset= 0}}}, .rank= 4, .dimensions=dimensions__sentence_encoder_attn_encoder_attn_layers_0_Softmax_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_attn_encoder_attn_layers_0_Softmax", // Node Name "qti.aisw", // Package Name "Softmax", // Qnn Node Type params__sentence_encoder_attn_encoder_attn_layers_0_Softmax, // Node Params 2, // Num Node Params inputs__sentence_encoder_attn_encoder_attn_layers_0_Softmax, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_attn_encoder_attn_layers_0_Softmax, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_attn_encoder_attn_layers_0_Softmax_output_0_nhwc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_attn_encoder_attn_layers_0_Softmax_output_0_nhwc */ uint32_t dimensions__sentence_encoder_attn_encoder_attn_layers_0_Softmax_output_0_nhwc_perm[] = {4}; uint32_t _sentence_encoder_attn_encoder_attn_layers_0_Softmax_output_0_nhwc_perm[] = {0, 2, 3, 1}; Qnn_Param_t params__sentence_encoder_attn_encoder_attn_layers_0_Softmax_output_0_nhwc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_attn_layers_0_Softmax_output_0_nhwc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__sentence_encoder_attn_encoder_attn_layers_0_Softmax_output_0_nhwc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_attn_encoder_attn_layers_0_Softmax_output_0_nhwc_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__sentence_encoder_attn_encoder_attn_layers_0_Softmax_output_0_nhwc[] = { "_sentence_encoder_attn_encoder_attn_layers_0_Softmax_output_0" }; uint32_t dimensions__sentence_encoder_attn_encoder_attn_layers_0_Softmax_output_0_nhwc[] = {1, 129, 129, 2}; Qnn_Tensor_t outputs__sentence_encoder_attn_encoder_attn_layers_0_Softmax_output_0_nhwc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_attn_layers_0_Softmax_output_0_nhwc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000152587890625f, .offset= 0}}}, .rank= 4, .dimensions=dimensions__sentence_encoder_attn_encoder_attn_layers_0_Softmax_output_0_nhwc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_attn_encoder_attn_layers_0_Softmax_output_0_nhwc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__sentence_encoder_attn_encoder_attn_layers_0_Softmax_output_0_nhwc, // Node Params 1, // Num Node Params inputs__sentence_encoder_attn_encoder_attn_layers_0_Softmax_output_0_nhwc, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_attn_encoder_attn_layers_0_Softmax_output_0_nhwc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_attn_encoder_attn_layers_0_MatMul_2(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_attn_encoder_attn_layers_0_MatMul_2 */ Qnn_Param_t params__sentence_encoder_attn_encoder_attn_layers_0_MatMul_2[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="transpose_in0", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="transpose_in1", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}} }; const char* inputs__sentence_encoder_attn_encoder_attn_layers_0_MatMul_2[] = { "_sentence_encoder_attn_encoder_attn_layers_0_Softmax_output_0", "_sentence_encoder_attn_encoder_attn_layers_0_Transpose_1_output_0_converted_UFIXED_POINT_8" }; uint32_t dimensions__sentence_encoder_attn_encoder_attn_layers_0_MatMul_2_output_0[] = {1, 2, 129, 32}; Qnn_Tensor_t outputs__sentence_encoder_attn_encoder_attn_layers_0_MatMul_2[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_attn_layers_0_MatMul_2_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000491046557727f, .offset= -32975}}}, .rank= 4, .dimensions=dimensions__sentence_encoder_attn_encoder_attn_layers_0_MatMul_2_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_attn_encoder_attn_layers_0_MatMul_2", // Node Name "qti.aisw", // Package Name "MatMul", // Qnn Node Type params__sentence_encoder_attn_encoder_attn_layers_0_MatMul_2, // Node Params 2, // Num Node Params inputs__sentence_encoder_attn_encoder_attn_layers_0_MatMul_2, // Input Tensor Names 2, // Num Input Tensor Names outputs__sentence_encoder_attn_encoder_attn_layers_0_MatMul_2, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_attn_encoder_attn_layers_0_Pad_3(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_attn_encoder_attn_layers_0_Pad_3 */ uint32_t dimensions__sentence_encoder_attn_encoder_attn_layers_0_Pad_3_pad_amount[] = {4, 2}; uint32_t _sentence_encoder_attn_encoder_attn_layers_0_Pad_3_pad_amount[] = {0, 0, 0, 0, 0, 128, 0, 0}; Qnn_Param_t params__sentence_encoder_attn_encoder_attn_layers_0_Pad_3[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_attn_layers_0_Pad_3_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__sentence_encoder_attn_encoder_attn_layers_0_Pad_3_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_attn_encoder_attn_layers_0_Pad_3_pad_amount, .dataSize=32}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="pad_constant_value", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_FLOAT_32, {.floatValue = 0.000000000000f}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="scheme", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}} }; const char* inputs__sentence_encoder_attn_encoder_attn_layers_0_Pad_3[] = { "_sentence_encoder_attn_encoder_attn_layers_0_Softmax_output_0_nhwc" }; uint32_t dimensions__sentence_encoder_attn_encoder_attn_layers_0_Pad_3_output_0[] = {1, 129, 257, 2}; Qnn_Tensor_t outputs__sentence_encoder_attn_encoder_attn_layers_0_Pad_3[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_attn_layers_0_Pad_3_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000152587890625f, .offset= 0}}}, .rank= 4, .dimensions=dimensions__sentence_encoder_attn_encoder_attn_layers_0_Pad_3_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_attn_encoder_attn_layers_0_Pad_3", // Node Name "qti.aisw", // Package Name "Pad", // Qnn Node Type params__sentence_encoder_attn_encoder_attn_layers_0_Pad_3, // Node Params 3, // Num Node Params inputs__sentence_encoder_attn_encoder_attn_layers_0_Pad_3, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_attn_encoder_attn_layers_0_Pad_3, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_attn_encoder_attn_layers_0_Pad_3_output_0_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_attn_encoder_attn_layers_0_Pad_3_output_0_nchw */ uint32_t dimensions__sentence_encoder_attn_encoder_attn_layers_0_Pad_3_output_0_nchw_perm[] = {4}; uint32_t _sentence_encoder_attn_encoder_attn_layers_0_Pad_3_output_0_nchw_perm[] = {0, 3, 1, 2}; Qnn_Param_t params__sentence_encoder_attn_encoder_attn_layers_0_Pad_3_output_0_nchw[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_attn_layers_0_Pad_3_output_0_nchw_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__sentence_encoder_attn_encoder_attn_layers_0_Pad_3_output_0_nchw_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_attn_encoder_attn_layers_0_Pad_3_output_0_nchw_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__sentence_encoder_attn_encoder_attn_layers_0_Pad_3_output_0_nchw[] = { "_sentence_encoder_attn_encoder_attn_layers_0_Pad_3_output_0" }; uint32_t dimensions__sentence_encoder_attn_encoder_attn_layers_0_Pad_3_output_0_nchw[] = {1, 2, 129, 257}; Qnn_Tensor_t outputs__sentence_encoder_attn_encoder_attn_layers_0_Pad_3_output_0_nchw[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_attn_layers_0_Pad_3_output_0_nchw", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000152587890625f, .offset= 0}}}, .rank= 4, .dimensions=dimensions__sentence_encoder_attn_encoder_attn_layers_0_Pad_3_output_0_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_attn_encoder_attn_layers_0_Pad_3_output_0_nchw", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__sentence_encoder_attn_encoder_attn_layers_0_Pad_3_output_0_nchw, // Node Params 1, // Num Node Params inputs__sentence_encoder_attn_encoder_attn_layers_0_Pad_3_output_0_nchw, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_attn_encoder_attn_layers_0_Pad_3_output_0_nchw, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_attn_encoder_attn_layers_0_Reshape_13(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_attn_encoder_attn_layers_0_Reshape_13 */ const char* inputs__sentence_encoder_attn_encoder_attn_layers_0_Reshape_13[] = { "_sentence_encoder_attn_encoder_attn_layers_0_Pad_3_output_0_nchw" }; uint32_t dimensions__sentence_encoder_attn_encoder_attn_layers_0_Reshape_13_output_0[] = {1, 2, 33153}; Qnn_Tensor_t outputs__sentence_encoder_attn_encoder_attn_layers_0_Reshape_13[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_attn_layers_0_Reshape_13_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000152587890625f, .offset= 0}}}, .rank= 3, .dimensions=dimensions__sentence_encoder_attn_encoder_attn_layers_0_Reshape_13_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_attn_encoder_attn_layers_0_Reshape_13", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__sentence_encoder_attn_encoder_attn_layers_0_Reshape_13, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_attn_encoder_attn_layers_0_Reshape_13, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_attn_encoder_attn_layers_0_Reshape_13_output_0_nfc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_attn_encoder_attn_layers_0_Reshape_13_output_0_nfc */ uint32_t dimensions__sentence_encoder_attn_encoder_attn_layers_0_Reshape_13_output_0_nfc_perm[] = {3}; uint32_t _sentence_encoder_attn_encoder_attn_layers_0_Reshape_13_output_0_nfc_perm[] = {0, 2, 1}; Qnn_Param_t params__sentence_encoder_attn_encoder_attn_layers_0_Reshape_13_output_0_nfc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_attn_layers_0_Reshape_13_output_0_nfc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__sentence_encoder_attn_encoder_attn_layers_0_Reshape_13_output_0_nfc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_attn_encoder_attn_layers_0_Reshape_13_output_0_nfc_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__sentence_encoder_attn_encoder_attn_layers_0_Reshape_13_output_0_nfc[] = { "_sentence_encoder_attn_encoder_attn_layers_0_Reshape_13_output_0" }; uint32_t dimensions__sentence_encoder_attn_encoder_attn_layers_0_Reshape_13_output_0_nfc[] = {1, 33153, 2}; Qnn_Tensor_t outputs__sentence_encoder_attn_encoder_attn_layers_0_Reshape_13_output_0_nfc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_attn_layers_0_Reshape_13_output_0_nfc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000152587890625f, .offset= 0}}}, .rank= 3, .dimensions=dimensions__sentence_encoder_attn_encoder_attn_layers_0_Reshape_13_output_0_nfc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_attn_encoder_attn_layers_0_Reshape_13_output_0_nfc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__sentence_encoder_attn_encoder_attn_layers_0_Reshape_13_output_0_nfc, // Node Params 1, // Num Node Params inputs__sentence_encoder_attn_encoder_attn_layers_0_Reshape_13_output_0_nfc, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_attn_encoder_attn_layers_0_Reshape_13_output_0_nfc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_attn_encoder_attn_layers_0_Pad_4(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_attn_encoder_attn_layers_0_Pad_4 */ uint32_t dimensions__sentence_encoder_attn_encoder_attn_layers_0_Pad_4_pad_amount[] = {3, 2}; uint32_t _sentence_encoder_attn_encoder_attn_layers_0_Pad_4_pad_amount[] = {0, 0, 129, 0, 0, 0}; Qnn_Param_t params__sentence_encoder_attn_encoder_attn_layers_0_Pad_4[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_attn_layers_0_Pad_4_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__sentence_encoder_attn_encoder_attn_layers_0_Pad_4_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_attn_encoder_attn_layers_0_Pad_4_pad_amount, .dataSize=24}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="pad_constant_value", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_FLOAT_32, {.floatValue = 0.000000000000f}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="scheme", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}} }; const char* inputs__sentence_encoder_attn_encoder_attn_layers_0_Pad_4[] = { "_sentence_encoder_attn_encoder_attn_layers_0_Reshape_13_output_0_nfc" }; uint32_t dimensions__sentence_encoder_attn_encoder_attn_layers_0_Pad_4_output_0[] = {1, 33282, 2}; Qnn_Tensor_t outputs__sentence_encoder_attn_encoder_attn_layers_0_Pad_4[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_attn_layers_0_Pad_4_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000152587890625f, .offset= 0}}}, .rank= 3, .dimensions=dimensions__sentence_encoder_attn_encoder_attn_layers_0_Pad_4_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_attn_encoder_attn_layers_0_Pad_4", // Node Name "qti.aisw", // Package Name "Pad", // Qnn Node Type params__sentence_encoder_attn_encoder_attn_layers_0_Pad_4, // Node Params 3, // Num Node Params inputs__sentence_encoder_attn_encoder_attn_layers_0_Pad_4, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_attn_encoder_attn_layers_0_Pad_4, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_attn_encoder_attn_layers_0_Pad_4_output_0_ncf(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_attn_encoder_attn_layers_0_Pad_4_output_0_ncf */ uint32_t dimensions__sentence_encoder_attn_encoder_attn_layers_0_Pad_4_output_0_ncf_perm[] = {3}; uint32_t _sentence_encoder_attn_encoder_attn_layers_0_Pad_4_output_0_ncf_perm[] = {0, 2, 1}; Qnn_Param_t params__sentence_encoder_attn_encoder_attn_layers_0_Pad_4_output_0_ncf[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_attn_layers_0_Pad_4_output_0_ncf_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__sentence_encoder_attn_encoder_attn_layers_0_Pad_4_output_0_ncf_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_attn_encoder_attn_layers_0_Pad_4_output_0_ncf_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__sentence_encoder_attn_encoder_attn_layers_0_Pad_4_output_0_ncf[] = { "_sentence_encoder_attn_encoder_attn_layers_0_Pad_4_output_0" }; uint32_t dimensions__sentence_encoder_attn_encoder_attn_layers_0_Pad_4_output_0_ncf[] = {1, 2, 33282}; Qnn_Tensor_t outputs__sentence_encoder_attn_encoder_attn_layers_0_Pad_4_output_0_ncf[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_attn_layers_0_Pad_4_output_0_ncf", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000152587890625f, .offset= 0}}}, .rank= 3, .dimensions=dimensions__sentence_encoder_attn_encoder_attn_layers_0_Pad_4_output_0_ncf, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_attn_encoder_attn_layers_0_Pad_4_output_0_ncf", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__sentence_encoder_attn_encoder_attn_layers_0_Pad_4_output_0_ncf, // Node Params 1, // Num Node Params inputs__sentence_encoder_attn_encoder_attn_layers_0_Pad_4_output_0_ncf, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_attn_encoder_attn_layers_0_Pad_4_output_0_ncf, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_attn_encoder_attn_layers_0_Reshape_16(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_attn_encoder_attn_layers_0_Reshape_16 */ const char* inputs__sentence_encoder_attn_encoder_attn_layers_0_Reshape_16[] = { "_sentence_encoder_attn_encoder_attn_layers_0_Pad_4_output_0_ncf" }; uint32_t dimensions__sentence_encoder_attn_encoder_attn_layers_0_Reshape_16_output_0[] = {1, 2, 129, 258}; Qnn_Tensor_t outputs__sentence_encoder_attn_encoder_attn_layers_0_Reshape_16[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_attn_layers_0_Reshape_16_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000152587890625f, .offset= 0}}}, .rank= 4, .dimensions=dimensions__sentence_encoder_attn_encoder_attn_layers_0_Reshape_16_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_attn_encoder_attn_layers_0_Reshape_16", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__sentence_encoder_attn_encoder_attn_layers_0_Reshape_16, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_attn_encoder_attn_layers_0_Reshape_16, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_attn_encoder_attn_layers_0_Slice_8(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_attn_encoder_attn_layers_0_Slice_8 */ uint32_t dimensions__sentence_encoder_attn_encoder_attn_layers_0_Slice_8_ranges[] = {4, 3}; int32_t _sentence_encoder_attn_encoder_attn_layers_0_Slice_8_ranges[] = {0, 1, 1, 0, 2, 1, 0, 129, 1, 1, 258, 1}; Qnn_Param_t params__sentence_encoder_attn_encoder_attn_layers_0_Slice_8[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="ranges", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_attn_layers_0_Slice_8_ranges", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_INT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__sentence_encoder_attn_encoder_attn_layers_0_Slice_8_ranges, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_attn_encoder_attn_layers_0_Slice_8_ranges, .dataSize=48}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="begin_mask", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="end_mask", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="new_axes_mask", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="shrink_axes", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}} }; const char* inputs__sentence_encoder_attn_encoder_attn_layers_0_Slice_8[] = { "_sentence_encoder_attn_encoder_attn_layers_0_Reshape_16_output_0" }; uint32_t dimensions__sentence_encoder_attn_encoder_attn_layers_0_Slice_8_output_0[] = {1, 2, 129, 257}; Qnn_Tensor_t outputs__sentence_encoder_attn_encoder_attn_layers_0_Slice_8[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_attn_layers_0_Slice_8_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000152587890625f, .offset= 0}}}, .rank= 4, .dimensions=dimensions__sentence_encoder_attn_encoder_attn_layers_0_Slice_8_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_attn_encoder_attn_layers_0_Slice_8", // Node Name "qti.aisw", // Package Name "StridedSlice", // Qnn Node Type params__sentence_encoder_attn_encoder_attn_layers_0_Slice_8, // Node Params 5, // Num Node Params inputs__sentence_encoder_attn_encoder_attn_layers_0_Slice_8, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_attn_encoder_attn_layers_0_Slice_8, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor__sentence_encoder_attn_encoder_attn_layers_0_Unsqueeze_41_output_0_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions__sentence_encoder_attn_encoder_attn_layers_0_Unsqueeze_41_output_0_nchw[] = {1, 1, 257, 32}; VALIDATE(model.addTensor("_sentence_encoder_attn_encoder_attn_layers_0_Unsqueeze_41_output_0_nchw", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_attn_layers_0_Unsqueeze_41_output_0_nchw", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0059074447490275f, .offset= -150}}}, .rank= 4, .dimensions=dimensions__sentence_encoder_attn_encoder_attn_layers_0_Unsqueeze_41_output_0_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(_sentence_encoder_attn_encoder_attn_layers_0_Unsqueeze_41_output_0_nchw), .dataSize=BINLEN(_sentence_encoder_attn_encoder_attn_layers_0_Unsqueeze_41_output_0_nchw)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__sentence_encoder_attn_encoder_attn_layers_0_MatMul_3(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_attn_encoder_attn_layers_0_MatMul_3 */ Qnn_Param_t params__sentence_encoder_attn_encoder_attn_layers_0_MatMul_3[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="transpose_in0", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="transpose_in1", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}} }; const char* inputs__sentence_encoder_attn_encoder_attn_layers_0_MatMul_3[] = { "_sentence_encoder_attn_encoder_attn_layers_0_Slice_8_output_0", "_sentence_encoder_attn_encoder_attn_layers_0_Unsqueeze_41_output_0_nchw" }; uint32_t dimensions__sentence_encoder_attn_encoder_attn_layers_0_MatMul_3_output_0[] = {1, 2, 129, 32}; Qnn_Tensor_t outputs__sentence_encoder_attn_encoder_attn_layers_0_MatMul_3[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_attn_layers_0_MatMul_3_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000066684133344f, .offset= -25373}}}, .rank= 4, .dimensions=dimensions__sentence_encoder_attn_encoder_attn_layers_0_MatMul_3_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_attn_encoder_attn_layers_0_MatMul_3", // Node Name "qti.aisw", // Package Name "MatMul", // Qnn Node Type params__sentence_encoder_attn_encoder_attn_layers_0_MatMul_3, // Node Params 2, // Num Node Params inputs__sentence_encoder_attn_encoder_attn_layers_0_MatMul_3, // Input Tensor Names 2, // Num Input Tensor Names outputs__sentence_encoder_attn_encoder_attn_layers_0_MatMul_3, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_attn_encoder_attn_layers_0_Add_4(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_attn_encoder_attn_layers_0_Add_4 */ Qnn_Param_t params__sentence_encoder_attn_encoder_attn_layers_0_Add_4[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}} }; const char* inputs__sentence_encoder_attn_encoder_attn_layers_0_Add_4[] = { "_sentence_encoder_attn_encoder_attn_layers_0_MatMul_2_output_0", "_sentence_encoder_attn_encoder_attn_layers_0_MatMul_3_output_0" }; uint32_t dimensions__sentence_encoder_attn_encoder_attn_layers_0_Add_4_output_0[] = {1, 2, 129, 32}; Qnn_Tensor_t outputs__sentence_encoder_attn_encoder_attn_layers_0_Add_4[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_attn_layers_0_Add_4_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000534894352313f, .offset= -33010}}}, .rank= 4, .dimensions=dimensions__sentence_encoder_attn_encoder_attn_layers_0_Add_4_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_attn_encoder_attn_layers_0_Add_4", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__sentence_encoder_attn_encoder_attn_layers_0_Add_4, // Node Params 1, // Num Node Params inputs__sentence_encoder_attn_encoder_attn_layers_0_Add_4, // Input Tensor Names 2, // Num Input Tensor Names outputs__sentence_encoder_attn_encoder_attn_layers_0_Add_4, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_attn_encoder_attn_layers_0_Transpose_9(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_attn_encoder_attn_layers_0_Transpose_9 */ uint32_t dimensions__sentence_encoder_attn_encoder_attn_layers_0_Transpose_9_perm[] = {4}; uint32_t _sentence_encoder_attn_encoder_attn_layers_0_Transpose_9_perm[] = {0, 1, 3, 2}; Qnn_Param_t params__sentence_encoder_attn_encoder_attn_layers_0_Transpose_9[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_attn_layers_0_Transpose_9_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__sentence_encoder_attn_encoder_attn_layers_0_Transpose_9_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_attn_encoder_attn_layers_0_Transpose_9_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__sentence_encoder_attn_encoder_attn_layers_0_Transpose_9[] = { "_sentence_encoder_attn_encoder_attn_layers_0_Add_4_output_0" }; uint32_t dimensions__sentence_encoder_attn_encoder_attn_layers_0_Transpose_9_output_0[] = {1, 2, 32, 129}; Qnn_Tensor_t outputs__sentence_encoder_attn_encoder_attn_layers_0_Transpose_9[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_attn_layers_0_Transpose_9_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000534894352313f, .offset= -33010}}}, .rank= 4, .dimensions=dimensions__sentence_encoder_attn_encoder_attn_layers_0_Transpose_9_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_attn_encoder_attn_layers_0_Transpose_9", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__sentence_encoder_attn_encoder_attn_layers_0_Transpose_9, // Node Params 1, // Num Node Params inputs__sentence_encoder_attn_encoder_attn_layers_0_Transpose_9, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_attn_encoder_attn_layers_0_Transpose_9, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_attn_encoder_attn_layers_0_Reshape_19(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_attn_encoder_attn_layers_0_Reshape_19 */ const char* inputs__sentence_encoder_attn_encoder_attn_layers_0_Reshape_19[] = { "_sentence_encoder_attn_encoder_attn_layers_0_Transpose_9_output_0" }; uint32_t dimensions__sentence_encoder_attn_encoder_attn_layers_0_conv_o_Conv_reshape_to_2d[] = {1, 64, 1, 129}; Qnn_Tensor_t outputs__sentence_encoder_attn_encoder_attn_layers_0_Reshape_19[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_attn_layers_0_conv_o_Conv_reshape_to_2d", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000534894352313f, .offset= -33010}}}, .rank= 4, .dimensions=dimensions__sentence_encoder_attn_encoder_attn_layers_0_conv_o_Conv_reshape_to_2d, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_attn_encoder_attn_layers_0_Reshape_19", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__sentence_encoder_attn_encoder_attn_layers_0_Reshape_19, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_attn_encoder_attn_layers_0_Reshape_19, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_attn_encoder_attn_layers_0_conv_o_Conv_reshape_to_2d_nhwc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_attn_encoder_attn_layers_0_conv_o_Conv_reshape_to_2d_nhwc */ uint32_t dimensions__sentence_encoder_attn_encoder_attn_layers_0_conv_o_Conv_reshape_to_2d_nhwc_perm[] = {4}; uint32_t _sentence_encoder_attn_encoder_attn_layers_0_conv_o_Conv_reshape_to_2d_nhwc_perm[] = {0, 2, 3, 1}; Qnn_Param_t params__sentence_encoder_attn_encoder_attn_layers_0_conv_o_Conv_reshape_to_2d_nhwc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_attn_layers_0_conv_o_Conv_reshape_to_2d_nhwc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__sentence_encoder_attn_encoder_attn_layers_0_conv_o_Conv_reshape_to_2d_nhwc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_attn_encoder_attn_layers_0_conv_o_Conv_reshape_to_2d_nhwc_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__sentence_encoder_attn_encoder_attn_layers_0_conv_o_Conv_reshape_to_2d_nhwc[] = { "_sentence_encoder_attn_encoder_attn_layers_0_conv_o_Conv_reshape_to_2d" }; uint32_t dimensions__sentence_encoder_attn_encoder_attn_layers_0_conv_o_Conv_reshape_to_2d_nhwc[] = {1, 1, 129, 64}; Qnn_Tensor_t outputs__sentence_encoder_attn_encoder_attn_layers_0_conv_o_Conv_reshape_to_2d_nhwc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_attn_layers_0_conv_o_Conv_reshape_to_2d_nhwc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000534894352313f, .offset= -33010}}}, .rank= 4, .dimensions=dimensions__sentence_encoder_attn_encoder_attn_layers_0_conv_o_Conv_reshape_to_2d_nhwc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_attn_encoder_attn_layers_0_conv_o_Conv_reshape_to_2d_nhwc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__sentence_encoder_attn_encoder_attn_layers_0_conv_o_Conv_reshape_to_2d_nhwc, // Node Params 1, // Num Node Params inputs__sentence_encoder_attn_encoder_attn_layers_0_conv_o_Conv_reshape_to_2d_nhwc, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_attn_encoder_attn_layers_0_conv_o_Conv_reshape_to_2d_nhwc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_dp_sentence_encoder_attn_encoder_attn_layers_0_conv_o_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_dp_sentence_encoder_attn_encoder_attn_layers_0_conv_o_weight[] = {1, 1, 64, 64}; VALIDATE(model.addTensor("tts_dp_sentence_encoder_attn_encoder_attn_layers_0_conv_o_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_dp_sentence_encoder_attn_encoder_attn_layers_0_conv_o_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0030226984526962f, .offset= -124}}}, .rank= 4, .dimensions=dimensions_tts_dp_sentence_encoder_attn_encoder_attn_layers_0_conv_o_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_dp_sentence_encoder_attn_encoder_attn_layers_0_conv_o_weight), .dataSize=BINLEN(tts_dp_sentence_encoder_attn_encoder_attn_layers_0_conv_o_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_dp_sentence_encoder_attn_encoder_attn_layers_0_conv_o_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_dp_sentence_encoder_attn_encoder_attn_layers_0_conv_o_bias[] = {64}; VALIDATE(model.addTensor("tts_dp_sentence_encoder_attn_encoder_attn_layers_0_conv_o_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_dp_sentence_encoder_attn_encoder_attn_layers_0_conv_o_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0012924538459629f, .offset= -126}}}, .rank= 1, .dimensions=dimensions_tts_dp_sentence_encoder_attn_encoder_attn_layers_0_conv_o_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_dp_sentence_encoder_attn_encoder_attn_layers_0_conv_o_bias), .dataSize=BINLEN(tts_dp_sentence_encoder_attn_encoder_attn_layers_0_conv_o_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__sentence_encoder_attn_encoder_attn_layers_0_conv_o_Conv_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_attn_encoder_attn_layers_0_conv_o_Conv_2d */ uint32_t dimensions__sentence_encoder_attn_encoder_attn_layers_0_conv_o_Conv_2d_dilation[] = {2}; uint32_t _sentence_encoder_attn_encoder_attn_layers_0_conv_o_Conv_2d_dilation[] = {1, 1}; uint32_t dimensions__sentence_encoder_attn_encoder_attn_layers_0_conv_o_Conv_2d_pad_amount[] = {2, 2}; uint32_t _sentence_encoder_attn_encoder_attn_layers_0_conv_o_Conv_2d_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions__sentence_encoder_attn_encoder_attn_layers_0_conv_o_Conv_2d_stride[] = {2}; uint32_t _sentence_encoder_attn_encoder_attn_layers_0_conv_o_Conv_2d_stride[] = {1, 1}; Qnn_Param_t params__sentence_encoder_attn_encoder_attn_layers_0_conv_o_Conv_2d[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_attn_layers_0_conv_o_Conv_2d_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__sentence_encoder_attn_encoder_attn_layers_0_conv_o_Conv_2d_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_attn_encoder_attn_layers_0_conv_o_Conv_2d_dilation, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_attn_layers_0_conv_o_Conv_2d_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__sentence_encoder_attn_encoder_attn_layers_0_conv_o_Conv_2d_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_attn_encoder_attn_layers_0_conv_o_Conv_2d_pad_amount, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_attn_layers_0_conv_o_Conv_2d_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__sentence_encoder_attn_encoder_attn_layers_0_conv_o_Conv_2d_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_attn_encoder_attn_layers_0_conv_o_Conv_2d_stride, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="group", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="reuse_sparse_indices", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}} }; const char* inputs__sentence_encoder_attn_encoder_attn_layers_0_conv_o_Conv_2d[] = { "_sentence_encoder_attn_encoder_attn_layers_0_conv_o_Conv_reshape_to_2d_nhwc", "tts_dp_sentence_encoder_attn_encoder_attn_layers_0_conv_o_weight", "tts_dp_sentence_encoder_attn_encoder_attn_layers_0_conv_o_bias" }; uint32_t dimensions__sentence_encoder_attn_encoder_attn_layers_0_conv_o_Conv_intermediate[] = {1, 1, 129, 64}; Qnn_Tensor_t outputs__sentence_encoder_attn_encoder_attn_layers_0_conv_o_Conv_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_attn_layers_0_conv_o_Conv_intermediate", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0002539426204748f, .offset= -38958}}}, .rank= 4, .dimensions=dimensions__sentence_encoder_attn_encoder_attn_layers_0_conv_o_Conv_intermediate, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_attn_encoder_attn_layers_0_conv_o_Conv_2d", // Node Name "qti.aisw", // Package Name "Conv2d", // Qnn Node Type params__sentence_encoder_attn_encoder_attn_layers_0_conv_o_Conv_2d, // Node Params 5, // Num Node Params inputs__sentence_encoder_attn_encoder_attn_layers_0_conv_o_Conv_2d, // Input Tensor Names 3, // Num Input Tensor Names outputs__sentence_encoder_attn_encoder_attn_layers_0_conv_o_Conv_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_attn_encoder_attn_layers_0_conv_o_Conv_intermediate_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_attn_encoder_attn_layers_0_conv_o_Conv_intermediate_nchw */ uint32_t dimensions__sentence_encoder_attn_encoder_attn_layers_0_conv_o_Conv_intermediate_nchw_perm[] = {4}; uint32_t _sentence_encoder_attn_encoder_attn_layers_0_conv_o_Conv_intermediate_nchw_perm[] = {0, 3, 1, 2}; Qnn_Param_t params__sentence_encoder_attn_encoder_attn_layers_0_conv_o_Conv_intermediate_nchw[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_attn_layers_0_conv_o_Conv_intermediate_nchw_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__sentence_encoder_attn_encoder_attn_layers_0_conv_o_Conv_intermediate_nchw_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_attn_encoder_attn_layers_0_conv_o_Conv_intermediate_nchw_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__sentence_encoder_attn_encoder_attn_layers_0_conv_o_Conv_intermediate_nchw[] = { "_sentence_encoder_attn_encoder_attn_layers_0_conv_o_Conv_intermediate" }; uint32_t dimensions__sentence_encoder_attn_encoder_attn_layers_0_conv_o_Conv_intermediate_nchw[] = {1, 64, 1, 129}; Qnn_Tensor_t outputs__sentence_encoder_attn_encoder_attn_layers_0_conv_o_Conv_intermediate_nchw[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_attn_layers_0_conv_o_Conv_intermediate_nchw", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0002539426204748f, .offset= -38958}}}, .rank= 4, .dimensions=dimensions__sentence_encoder_attn_encoder_attn_layers_0_conv_o_Conv_intermediate_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_attn_encoder_attn_layers_0_conv_o_Conv_intermediate_nchw", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__sentence_encoder_attn_encoder_attn_layers_0_conv_o_Conv_intermediate_nchw, // Node Params 1, // Num Node Params inputs__sentence_encoder_attn_encoder_attn_layers_0_conv_o_Conv_intermediate_nchw, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_attn_encoder_attn_layers_0_conv_o_Conv_intermediate_nchw, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_attn_encoder_attn_layers_0_conv_o_Conv_intermediate(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_attn_encoder_attn_layers_0_conv_o_Conv_intermediate */ const char* inputs__sentence_encoder_attn_encoder_attn_layers_0_conv_o_Conv_intermediate[] = { "_sentence_encoder_attn_encoder_attn_layers_0_conv_o_Conv_intermediate_nchw" }; uint32_t dimensions__sentence_encoder_attn_encoder_attn_layers_0_conv_o_Conv_output_0[] = {1, 64, 129}; Qnn_Tensor_t outputs__sentence_encoder_attn_encoder_attn_layers_0_conv_o_Conv_intermediate[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_attn_layers_0_conv_o_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0002539426204748f, .offset= -38958}}}, .rank= 3, .dimensions=dimensions__sentence_encoder_attn_encoder_attn_layers_0_conv_o_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_attn_encoder_attn_layers_0_conv_o_Conv_intermediate", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__sentence_encoder_attn_encoder_attn_layers_0_conv_o_Conv_intermediate, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_attn_encoder_attn_layers_0_conv_o_Conv_intermediate, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_attn_encoder_attn_layers_0_conv_o_Conv_output_0_nfc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_attn_encoder_attn_layers_0_conv_o_Conv_output_0_nfc */ uint32_t dimensions__sentence_encoder_attn_encoder_attn_layers_0_conv_o_Conv_output_0_nfc_perm[] = {3}; uint32_t _sentence_encoder_attn_encoder_attn_layers_0_conv_o_Conv_output_0_nfc_perm[] = {0, 2, 1}; Qnn_Param_t params__sentence_encoder_attn_encoder_attn_layers_0_conv_o_Conv_output_0_nfc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_attn_layers_0_conv_o_Conv_output_0_nfc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__sentence_encoder_attn_encoder_attn_layers_0_conv_o_Conv_output_0_nfc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_attn_encoder_attn_layers_0_conv_o_Conv_output_0_nfc_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__sentence_encoder_attn_encoder_attn_layers_0_conv_o_Conv_output_0_nfc[] = { "_sentence_encoder_attn_encoder_attn_layers_0_conv_o_Conv_output_0" }; uint32_t dimensions__sentence_encoder_attn_encoder_attn_layers_0_conv_o_Conv_output_0_nfc[] = {1, 129, 64}; Qnn_Tensor_t outputs__sentence_encoder_attn_encoder_attn_layers_0_conv_o_Conv_output_0_nfc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_attn_layers_0_conv_o_Conv_output_0_nfc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0002539426204748f, .offset= -38958}}}, .rank= 3, .dimensions=dimensions__sentence_encoder_attn_encoder_attn_layers_0_conv_o_Conv_output_0_nfc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_attn_encoder_attn_layers_0_conv_o_Conv_output_0_nfc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__sentence_encoder_attn_encoder_attn_layers_0_conv_o_Conv_output_0_nfc, // Node Params 1, // Num Node Params inputs__sentence_encoder_attn_encoder_attn_layers_0_conv_o_Conv_output_0_nfc, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_attn_encoder_attn_layers_0_conv_o_Conv_output_0_nfc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_attn_encoder_Add(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_attn_encoder_Add */ Qnn_Param_t params__sentence_encoder_attn_encoder_Add[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}} }; const char* inputs__sentence_encoder_attn_encoder_Add[] = { "_sentence_encoder_attn_encoder_Mul_1_output_0", "_sentence_encoder_attn_encoder_attn_layers_0_conv_o_Conv_output_0_nfc" }; uint32_t dimensions__sentence_encoder_attn_encoder_norm_layers_1_0_Transpose_output_0[] = {1, 129, 64}; Qnn_Tensor_t outputs__sentence_encoder_attn_encoder_Add[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_norm_layers_1_0_Transpose_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0002514841035008f, .offset= -45214}}}, .rank= 3, .dimensions=dimensions__sentence_encoder_attn_encoder_norm_layers_1_0_Transpose_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_attn_encoder_Add", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__sentence_encoder_attn_encoder_Add, // Node Params 1, // Num Node Params inputs__sentence_encoder_attn_encoder_Add, // Input Tensor Names 2, // Num Input Tensor Names outputs__sentence_encoder_attn_encoder_Add, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_dp_sentence_encoder_attn_encoder_norm_layers_1_0_norm_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_dp_sentence_encoder_attn_encoder_norm_layers_1_0_norm_weight[] = {64}; VALIDATE(model.addTensor("tts_dp_sentence_encoder_attn_encoder_norm_layers_1_0_norm_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_dp_sentence_encoder_attn_encoder_norm_layers_1_0_norm_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0029554630164057f, .offset= 0}}}, .rank= 1, .dimensions=dimensions_tts_dp_sentence_encoder_attn_encoder_norm_layers_1_0_norm_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_dp_sentence_encoder_attn_encoder_norm_layers_1_0_norm_weight), .dataSize=BINLEN(tts_dp_sentence_encoder_attn_encoder_norm_layers_1_0_norm_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_dp_sentence_encoder_attn_encoder_norm_layers_1_0_norm_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_dp_sentence_encoder_attn_encoder_norm_layers_1_0_norm_bias[] = {64}; VALIDATE(model.addTensor("tts_dp_sentence_encoder_attn_encoder_norm_layers_1_0_norm_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_dp_sentence_encoder_attn_encoder_norm_layers_1_0_norm_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0012491715606302f, .offset= -119}}}, .rank= 1, .dimensions=dimensions_tts_dp_sentence_encoder_attn_encoder_norm_layers_1_0_norm_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_dp_sentence_encoder_attn_encoder_norm_layers_1_0_norm_bias), .dataSize=BINLEN(tts_dp_sentence_encoder_attn_encoder_norm_layers_1_0_norm_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__sentence_encoder_attn_encoder_norm_layers_1_0_norm_LayerNormalization(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_attn_encoder_norm_layers_1_0_norm_LayerNormalization */ uint32_t dimensions__sentence_encoder_attn_encoder_norm_layers_1_0_norm_LayerNormalization_axes[] = {1}; uint32_t _sentence_encoder_attn_encoder_norm_layers_1_0_norm_LayerNormalization_axes[] = {2}; Qnn_Param_t params__sentence_encoder_attn_encoder_norm_layers_1_0_norm_LayerNormalization[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="axes", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_norm_layers_1_0_norm_LayerNormalization_axes", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__sentence_encoder_attn_encoder_norm_layers_1_0_norm_LayerNormalization_axes, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_attn_encoder_norm_layers_1_0_norm_LayerNormalization_axes, .dataSize=4}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="epsilon", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_FLOAT_32, {.floatValue = 0.000001000000f}}}} }; const char* inputs__sentence_encoder_attn_encoder_norm_layers_1_0_norm_LayerNormalization[] = { "_sentence_encoder_attn_encoder_norm_layers_1_0_Transpose_output_0", "tts_dp_sentence_encoder_attn_encoder_norm_layers_1_0_norm_weight", "tts_dp_sentence_encoder_attn_encoder_norm_layers_1_0_norm_bias" }; uint32_t dimensions__sentence_encoder_attn_encoder_norm_layers_1_0_Transpose_1_output_0[] = {1, 129, 64}; Qnn_Tensor_t outputs__sentence_encoder_attn_encoder_norm_layers_1_0_norm_LayerNormalization[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_norm_layers_1_0_Transpose_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000804976589279f, .offset= -46195}}}, .rank= 3, .dimensions=dimensions__sentence_encoder_attn_encoder_norm_layers_1_0_Transpose_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_attn_encoder_norm_layers_1_0_norm_LayerNormalization", // Node Name "qti.aisw", // Package Name "LayerNorm", // Qnn Node Type params__sentence_encoder_attn_encoder_norm_layers_1_0_norm_LayerNormalization, // Node Params 2, // Num Node Params inputs__sentence_encoder_attn_encoder_norm_layers_1_0_norm_LayerNormalization, // Input Tensor Names 3, // Num Input Tensor Names outputs__sentence_encoder_attn_encoder_norm_layers_1_0_norm_LayerNormalization, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_attn_encoder_ffn_layers_0_Mul(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_attn_encoder_ffn_layers_0_Mul */ Qnn_Param_t params__sentence_encoder_attn_encoder_ffn_layers_0_Mul[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__sentence_encoder_attn_encoder_ffn_layers_0_Mul[] = { "_sentence_encoder_attn_encoder_norm_layers_1_0_Transpose_1_output_0", "_sentence_encoder_Concat_2_output_0" }; uint32_t dimensions__sentence_encoder_attn_encoder_ffn_layers_0_Mul_output_0[] = {1, 129, 64}; Qnn_Tensor_t outputs__sentence_encoder_attn_encoder_ffn_layers_0_Mul[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_ffn_layers_0_Mul_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000804976589279f, .offset= -46195}}}, .rank= 3, .dimensions=dimensions__sentence_encoder_attn_encoder_ffn_layers_0_Mul_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_attn_encoder_ffn_layers_0_Mul", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__sentence_encoder_attn_encoder_ffn_layers_0_Mul, // Node Params 1, // Num Node Params inputs__sentence_encoder_attn_encoder_ffn_layers_0_Mul, // Input Tensor Names 2, // Num Input Tensor Names outputs__sentence_encoder_attn_encoder_ffn_layers_0_Mul, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_attn_encoder_ffn_layers_0_Mul_output_0_ncf(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_attn_encoder_ffn_layers_0_Mul_output_0_ncf */ uint32_t dimensions__sentence_encoder_attn_encoder_ffn_layers_0_Mul_output_0_ncf_perm[] = {3}; uint32_t _sentence_encoder_attn_encoder_ffn_layers_0_Mul_output_0_ncf_perm[] = {0, 2, 1}; Qnn_Param_t params__sentence_encoder_attn_encoder_ffn_layers_0_Mul_output_0_ncf[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_ffn_layers_0_Mul_output_0_ncf_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__sentence_encoder_attn_encoder_ffn_layers_0_Mul_output_0_ncf_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_attn_encoder_ffn_layers_0_Mul_output_0_ncf_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__sentence_encoder_attn_encoder_ffn_layers_0_Mul_output_0_ncf[] = { "_sentence_encoder_attn_encoder_ffn_layers_0_Mul_output_0" }; uint32_t dimensions__sentence_encoder_attn_encoder_ffn_layers_0_Mul_output_0_ncf[] = {1, 64, 129}; Qnn_Tensor_t outputs__sentence_encoder_attn_encoder_ffn_layers_0_Mul_output_0_ncf[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_ffn_layers_0_Mul_output_0_ncf", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000804976589279f, .offset= -46195}}}, .rank= 3, .dimensions=dimensions__sentence_encoder_attn_encoder_ffn_layers_0_Mul_output_0_ncf, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_attn_encoder_ffn_layers_0_Mul_output_0_ncf", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__sentence_encoder_attn_encoder_ffn_layers_0_Mul_output_0_ncf, // Node Params 1, // Num Node Params inputs__sentence_encoder_attn_encoder_ffn_layers_0_Mul_output_0_ncf, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_attn_encoder_ffn_layers_0_Mul_output_0_ncf, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_attn_encoder_ffn_layers_0_conv_1_Conv_reshape_to_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_attn_encoder_ffn_layers_0_conv_1_Conv_reshape_to_2d */ const char* inputs__sentence_encoder_attn_encoder_ffn_layers_0_conv_1_Conv_reshape_to_2d[] = { "_sentence_encoder_attn_encoder_ffn_layers_0_Mul_output_0_ncf" }; uint32_t dimensions__sentence_encoder_attn_encoder_ffn_layers_0_conv_1_Conv_reshape_to_2d[] = {1, 64, 1, 129}; Qnn_Tensor_t outputs__sentence_encoder_attn_encoder_ffn_layers_0_conv_1_Conv_reshape_to_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_ffn_layers_0_conv_1_Conv_reshape_to_2d", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000804976589279f, .offset= -46195}}}, .rank= 4, .dimensions=dimensions__sentence_encoder_attn_encoder_ffn_layers_0_conv_1_Conv_reshape_to_2d, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_attn_encoder_ffn_layers_0_conv_1_Conv_reshape_to_2d", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__sentence_encoder_attn_encoder_ffn_layers_0_conv_1_Conv_reshape_to_2d, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_attn_encoder_ffn_layers_0_conv_1_Conv_reshape_to_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_attn_encoder_ffn_layers_0_conv_1_Conv_reshape_to_2d_nhwc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_attn_encoder_ffn_layers_0_conv_1_Conv_reshape_to_2d_nhwc */ uint32_t dimensions__sentence_encoder_attn_encoder_ffn_layers_0_conv_1_Conv_reshape_to_2d_nhwc_perm[] = {4}; uint32_t _sentence_encoder_attn_encoder_ffn_layers_0_conv_1_Conv_reshape_to_2d_nhwc_perm[] = {0, 2, 3, 1}; Qnn_Param_t params__sentence_encoder_attn_encoder_ffn_layers_0_conv_1_Conv_reshape_to_2d_nhwc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_ffn_layers_0_conv_1_Conv_reshape_to_2d_nhwc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__sentence_encoder_attn_encoder_ffn_layers_0_conv_1_Conv_reshape_to_2d_nhwc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_attn_encoder_ffn_layers_0_conv_1_Conv_reshape_to_2d_nhwc_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__sentence_encoder_attn_encoder_ffn_layers_0_conv_1_Conv_reshape_to_2d_nhwc[] = { "_sentence_encoder_attn_encoder_ffn_layers_0_conv_1_Conv_reshape_to_2d" }; uint32_t dimensions__sentence_encoder_attn_encoder_ffn_layers_0_conv_1_Conv_reshape_to_2d_nhwc[] = {1, 1, 129, 64}; Qnn_Tensor_t outputs__sentence_encoder_attn_encoder_ffn_layers_0_conv_1_Conv_reshape_to_2d_nhwc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_ffn_layers_0_conv_1_Conv_reshape_to_2d_nhwc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000804976589279f, .offset= -46195}}}, .rank= 4, .dimensions=dimensions__sentence_encoder_attn_encoder_ffn_layers_0_conv_1_Conv_reshape_to_2d_nhwc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_attn_encoder_ffn_layers_0_conv_1_Conv_reshape_to_2d_nhwc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__sentence_encoder_attn_encoder_ffn_layers_0_conv_1_Conv_reshape_to_2d_nhwc, // Node Params 1, // Num Node Params inputs__sentence_encoder_attn_encoder_ffn_layers_0_conv_1_Conv_reshape_to_2d_nhwc, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_attn_encoder_ffn_layers_0_conv_1_Conv_reshape_to_2d_nhwc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_dp_sentence_encoder_attn_encoder_ffn_layers_0_conv_1_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_dp_sentence_encoder_attn_encoder_ffn_layers_0_conv_1_weight[] = {1, 1, 64, 256}; VALIDATE(model.addTensor("tts_dp_sentence_encoder_attn_encoder_ffn_layers_0_conv_1_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_dp_sentence_encoder_attn_encoder_ffn_layers_0_conv_1_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0045991879887879f, .offset= -127}}}, .rank= 4, .dimensions=dimensions_tts_dp_sentence_encoder_attn_encoder_ffn_layers_0_conv_1_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_dp_sentence_encoder_attn_encoder_ffn_layers_0_conv_1_weight), .dataSize=BINLEN(tts_dp_sentence_encoder_attn_encoder_ffn_layers_0_conv_1_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_dp_sentence_encoder_attn_encoder_ffn_layers_0_conv_1_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_dp_sentence_encoder_attn_encoder_ffn_layers_0_conv_1_bias[] = {256}; VALIDATE(model.addTensor("tts_dp_sentence_encoder_attn_encoder_ffn_layers_0_conv_1_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_dp_sentence_encoder_attn_encoder_ffn_layers_0_conv_1_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0015277531929314f, .offset= -232}}}, .rank= 1, .dimensions=dimensions_tts_dp_sentence_encoder_attn_encoder_ffn_layers_0_conv_1_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_dp_sentence_encoder_attn_encoder_ffn_layers_0_conv_1_bias), .dataSize=BINLEN(tts_dp_sentence_encoder_attn_encoder_ffn_layers_0_conv_1_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__sentence_encoder_attn_encoder_ffn_layers_0_conv_1_Conv_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_attn_encoder_ffn_layers_0_conv_1_Conv_2d */ uint32_t dimensions__sentence_encoder_attn_encoder_ffn_layers_0_conv_1_Conv_2d_dilation[] = {2}; uint32_t _sentence_encoder_attn_encoder_ffn_layers_0_conv_1_Conv_2d_dilation[] = {1, 1}; uint32_t dimensions__sentence_encoder_attn_encoder_ffn_layers_0_conv_1_Conv_2d_pad_amount[] = {2, 2}; uint32_t _sentence_encoder_attn_encoder_ffn_layers_0_conv_1_Conv_2d_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions__sentence_encoder_attn_encoder_ffn_layers_0_conv_1_Conv_2d_stride[] = {2}; uint32_t _sentence_encoder_attn_encoder_ffn_layers_0_conv_1_Conv_2d_stride[] = {1, 1}; Qnn_Param_t params__sentence_encoder_attn_encoder_ffn_layers_0_conv_1_Conv_2d[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_ffn_layers_0_conv_1_Conv_2d_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__sentence_encoder_attn_encoder_ffn_layers_0_conv_1_Conv_2d_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_attn_encoder_ffn_layers_0_conv_1_Conv_2d_dilation, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_ffn_layers_0_conv_1_Conv_2d_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__sentence_encoder_attn_encoder_ffn_layers_0_conv_1_Conv_2d_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_attn_encoder_ffn_layers_0_conv_1_Conv_2d_pad_amount, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_ffn_layers_0_conv_1_Conv_2d_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__sentence_encoder_attn_encoder_ffn_layers_0_conv_1_Conv_2d_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_attn_encoder_ffn_layers_0_conv_1_Conv_2d_stride, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="group", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="reuse_sparse_indices", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}} }; const char* inputs__sentence_encoder_attn_encoder_ffn_layers_0_conv_1_Conv_2d[] = { "_sentence_encoder_attn_encoder_ffn_layers_0_conv_1_Conv_reshape_to_2d_nhwc", "tts_dp_sentence_encoder_attn_encoder_ffn_layers_0_conv_1_weight", "tts_dp_sentence_encoder_attn_encoder_ffn_layers_0_conv_1_bias" }; uint32_t dimensions__sentence_encoder_attn_encoder_ffn_layers_0_conv_1_Conv_intermediate[] = {1, 1, 129, 256}; Qnn_Tensor_t outputs__sentence_encoder_attn_encoder_ffn_layers_0_conv_1_Conv_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_ffn_layers_0_conv_1_Conv_intermediate", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000730934843887f, .offset= -37651}}}, .rank= 4, .dimensions=dimensions__sentence_encoder_attn_encoder_ffn_layers_0_conv_1_Conv_intermediate, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_attn_encoder_ffn_layers_0_conv_1_Conv_2d", // Node Name "qti.aisw", // Package Name "Conv2d", // Qnn Node Type params__sentence_encoder_attn_encoder_ffn_layers_0_conv_1_Conv_2d, // Node Params 5, // Num Node Params inputs__sentence_encoder_attn_encoder_ffn_layers_0_conv_1_Conv_2d, // Input Tensor Names 3, // Num Input Tensor Names outputs__sentence_encoder_attn_encoder_ffn_layers_0_conv_1_Conv_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_attn_encoder_ffn_layers_0_conv_1_Conv_intermediate_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_attn_encoder_ffn_layers_0_conv_1_Conv_intermediate_nchw */ uint32_t dimensions__sentence_encoder_attn_encoder_ffn_layers_0_conv_1_Conv_intermediate_nchw_perm[] = {4}; uint32_t _sentence_encoder_attn_encoder_ffn_layers_0_conv_1_Conv_intermediate_nchw_perm[] = {0, 3, 1, 2}; Qnn_Param_t params__sentence_encoder_attn_encoder_ffn_layers_0_conv_1_Conv_intermediate_nchw[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_ffn_layers_0_conv_1_Conv_intermediate_nchw_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__sentence_encoder_attn_encoder_ffn_layers_0_conv_1_Conv_intermediate_nchw_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_attn_encoder_ffn_layers_0_conv_1_Conv_intermediate_nchw_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__sentence_encoder_attn_encoder_ffn_layers_0_conv_1_Conv_intermediate_nchw[] = { "_sentence_encoder_attn_encoder_ffn_layers_0_conv_1_Conv_intermediate" }; uint32_t dimensions__sentence_encoder_attn_encoder_ffn_layers_0_conv_1_Conv_intermediate_nchw[] = {1, 256, 1, 129}; Qnn_Tensor_t outputs__sentence_encoder_attn_encoder_ffn_layers_0_conv_1_Conv_intermediate_nchw[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_ffn_layers_0_conv_1_Conv_intermediate_nchw", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000730934843887f, .offset= -37651}}}, .rank= 4, .dimensions=dimensions__sentence_encoder_attn_encoder_ffn_layers_0_conv_1_Conv_intermediate_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_attn_encoder_ffn_layers_0_conv_1_Conv_intermediate_nchw", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__sentence_encoder_attn_encoder_ffn_layers_0_conv_1_Conv_intermediate_nchw, // Node Params 1, // Num Node Params inputs__sentence_encoder_attn_encoder_ffn_layers_0_conv_1_Conv_intermediate_nchw, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_attn_encoder_ffn_layers_0_conv_1_Conv_intermediate_nchw, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_attn_encoder_ffn_layers_0_conv_1_Conv_intermediate(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_attn_encoder_ffn_layers_0_conv_1_Conv_intermediate */ const char* inputs__sentence_encoder_attn_encoder_ffn_layers_0_conv_1_Conv_intermediate[] = { "_sentence_encoder_attn_encoder_ffn_layers_0_conv_1_Conv_intermediate_nchw" }; uint32_t dimensions__sentence_encoder_attn_encoder_ffn_layers_0_conv_1_Conv_output_0[] = {1, 256, 129}; Qnn_Tensor_t outputs__sentence_encoder_attn_encoder_ffn_layers_0_conv_1_Conv_intermediate[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_ffn_layers_0_conv_1_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000730934843887f, .offset= -37651}}}, .rank= 3, .dimensions=dimensions__sentence_encoder_attn_encoder_ffn_layers_0_conv_1_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_attn_encoder_ffn_layers_0_conv_1_Conv_intermediate", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__sentence_encoder_attn_encoder_ffn_layers_0_conv_1_Conv_intermediate, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_attn_encoder_ffn_layers_0_conv_1_Conv_intermediate, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_attn_encoder_ffn_layers_0_Relu(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_attn_encoder_ffn_layers_0_Relu */ Qnn_Param_t params__sentence_encoder_attn_encoder_ffn_layers_0_Relu[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 4}}}} }; const char* inputs__sentence_encoder_attn_encoder_ffn_layers_0_Relu[] = { "_sentence_encoder_attn_encoder_ffn_layers_0_conv_1_Conv_output_0" }; uint32_t dimensions__sentence_encoder_attn_encoder_ffn_layers_0_Relu_output_0[] = {1, 256, 129}; Qnn_Tensor_t outputs__sentence_encoder_attn_encoder_ffn_layers_0_Relu[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_ffn_layers_0_Relu_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000311004587275f, .offset= 0}}}, .rank= 3, .dimensions=dimensions__sentence_encoder_attn_encoder_ffn_layers_0_Relu_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_attn_encoder_ffn_layers_0_Relu", // Node Name "qti.aisw", // Package Name "ElementWiseNeuron", // Qnn Node Type params__sentence_encoder_attn_encoder_ffn_layers_0_Relu, // Node Params 1, // Num Node Params inputs__sentence_encoder_attn_encoder_ffn_layers_0_Relu, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_attn_encoder_ffn_layers_0_Relu, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_attn_encoder_ffn_layers_0_Relu_output_0_nfc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_attn_encoder_ffn_layers_0_Relu_output_0_nfc */ uint32_t dimensions__sentence_encoder_attn_encoder_ffn_layers_0_Relu_output_0_nfc_perm[] = {3}; uint32_t _sentence_encoder_attn_encoder_ffn_layers_0_Relu_output_0_nfc_perm[] = {0, 2, 1}; Qnn_Param_t params__sentence_encoder_attn_encoder_ffn_layers_0_Relu_output_0_nfc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_ffn_layers_0_Relu_output_0_nfc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__sentence_encoder_attn_encoder_ffn_layers_0_Relu_output_0_nfc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_attn_encoder_ffn_layers_0_Relu_output_0_nfc_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__sentence_encoder_attn_encoder_ffn_layers_0_Relu_output_0_nfc[] = { "_sentence_encoder_attn_encoder_ffn_layers_0_Relu_output_0" }; uint32_t dimensions__sentence_encoder_attn_encoder_ffn_layers_0_Relu_output_0_nfc[] = {1, 129, 256}; Qnn_Tensor_t outputs__sentence_encoder_attn_encoder_ffn_layers_0_Relu_output_0_nfc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_ffn_layers_0_Relu_output_0_nfc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000311004587275f, .offset= 0}}}, .rank= 3, .dimensions=dimensions__sentence_encoder_attn_encoder_ffn_layers_0_Relu_output_0_nfc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_attn_encoder_ffn_layers_0_Relu_output_0_nfc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__sentence_encoder_attn_encoder_ffn_layers_0_Relu_output_0_nfc, // Node Params 1, // Num Node Params inputs__sentence_encoder_attn_encoder_ffn_layers_0_Relu_output_0_nfc, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_attn_encoder_ffn_layers_0_Relu_output_0_nfc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_attn_encoder_ffn_layers_0_Mul_1(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_attn_encoder_ffn_layers_0_Mul_1 */ Qnn_Param_t params__sentence_encoder_attn_encoder_ffn_layers_0_Mul_1[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__sentence_encoder_attn_encoder_ffn_layers_0_Mul_1[] = { "_sentence_encoder_attn_encoder_ffn_layers_0_Relu_output_0_nfc", "_sentence_encoder_Concat_2_output_0" }; uint32_t dimensions__sentence_encoder_attn_encoder_ffn_layers_0_Mul_1_output_0[] = {1, 129, 256}; Qnn_Tensor_t outputs__sentence_encoder_attn_encoder_ffn_layers_0_Mul_1[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_ffn_layers_0_Mul_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000311004587275f, .offset= 0}}}, .rank= 3, .dimensions=dimensions__sentence_encoder_attn_encoder_ffn_layers_0_Mul_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_attn_encoder_ffn_layers_0_Mul_1", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__sentence_encoder_attn_encoder_ffn_layers_0_Mul_1, // Node Params 1, // Num Node Params inputs__sentence_encoder_attn_encoder_ffn_layers_0_Mul_1, // Input Tensor Names 2, // Num Input Tensor Names outputs__sentence_encoder_attn_encoder_ffn_layers_0_Mul_1, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_attn_encoder_ffn_layers_0_Mul_1_output_0_ncf(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_attn_encoder_ffn_layers_0_Mul_1_output_0_ncf */ uint32_t dimensions__sentence_encoder_attn_encoder_ffn_layers_0_Mul_1_output_0_ncf_perm[] = {3}; uint32_t _sentence_encoder_attn_encoder_ffn_layers_0_Mul_1_output_0_ncf_perm[] = {0, 2, 1}; Qnn_Param_t params__sentence_encoder_attn_encoder_ffn_layers_0_Mul_1_output_0_ncf[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_ffn_layers_0_Mul_1_output_0_ncf_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__sentence_encoder_attn_encoder_ffn_layers_0_Mul_1_output_0_ncf_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_attn_encoder_ffn_layers_0_Mul_1_output_0_ncf_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__sentence_encoder_attn_encoder_ffn_layers_0_Mul_1_output_0_ncf[] = { "_sentence_encoder_attn_encoder_ffn_layers_0_Mul_1_output_0" }; uint32_t dimensions__sentence_encoder_attn_encoder_ffn_layers_0_Mul_1_output_0_ncf[] = {1, 256, 129}; Qnn_Tensor_t outputs__sentence_encoder_attn_encoder_ffn_layers_0_Mul_1_output_0_ncf[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_ffn_layers_0_Mul_1_output_0_ncf", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000311004587275f, .offset= 0}}}, .rank= 3, .dimensions=dimensions__sentence_encoder_attn_encoder_ffn_layers_0_Mul_1_output_0_ncf, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_attn_encoder_ffn_layers_0_Mul_1_output_0_ncf", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__sentence_encoder_attn_encoder_ffn_layers_0_Mul_1_output_0_ncf, // Node Params 1, // Num Node Params inputs__sentence_encoder_attn_encoder_ffn_layers_0_Mul_1_output_0_ncf, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_attn_encoder_ffn_layers_0_Mul_1_output_0_ncf, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_reshape_to_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_reshape_to_2d */ const char* inputs__sentence_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_reshape_to_2d[] = { "_sentence_encoder_attn_encoder_ffn_layers_0_Mul_1_output_0_ncf" }; uint32_t dimensions__sentence_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_reshape_to_2d[] = {1, 256, 1, 129}; Qnn_Tensor_t outputs__sentence_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_reshape_to_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_reshape_to_2d", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000311004587275f, .offset= 0}}}, .rank= 4, .dimensions=dimensions__sentence_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_reshape_to_2d, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_reshape_to_2d", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__sentence_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_reshape_to_2d, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_reshape_to_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_reshape_to_2d_nhwc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_reshape_to_2d_nhwc */ uint32_t dimensions__sentence_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_reshape_to_2d_nhwc_perm[] = {4}; uint32_t _sentence_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_reshape_to_2d_nhwc_perm[] = {0, 2, 3, 1}; Qnn_Param_t params__sentence_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_reshape_to_2d_nhwc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_reshape_to_2d_nhwc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__sentence_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_reshape_to_2d_nhwc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_reshape_to_2d_nhwc_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__sentence_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_reshape_to_2d_nhwc[] = { "_sentence_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_reshape_to_2d" }; uint32_t dimensions__sentence_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_reshape_to_2d_nhwc[] = {1, 1, 129, 256}; Qnn_Tensor_t outputs__sentence_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_reshape_to_2d_nhwc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_reshape_to_2d_nhwc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000311004587275f, .offset= 0}}}, .rank= 4, .dimensions=dimensions__sentence_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_reshape_to_2d_nhwc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_reshape_to_2d_nhwc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__sentence_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_reshape_to_2d_nhwc, // Node Params 1, // Num Node Params inputs__sentence_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_reshape_to_2d_nhwc, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_reshape_to_2d_nhwc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_dp_sentence_encoder_attn_encoder_ffn_layers_0_conv_2_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_dp_sentence_encoder_attn_encoder_ffn_layers_0_conv_2_weight[] = {1, 1, 256, 64}; VALIDATE(model.addTensor("tts_dp_sentence_encoder_attn_encoder_ffn_layers_0_conv_2_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_dp_sentence_encoder_attn_encoder_ffn_layers_0_conv_2_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0042993673123419f, .offset= -133}}}, .rank= 4, .dimensions=dimensions_tts_dp_sentence_encoder_attn_encoder_ffn_layers_0_conv_2_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_dp_sentence_encoder_attn_encoder_ffn_layers_0_conv_2_weight), .dataSize=BINLEN(tts_dp_sentence_encoder_attn_encoder_ffn_layers_0_conv_2_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_dp_sentence_encoder_attn_encoder_ffn_layers_0_conv_2_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_dp_sentence_encoder_attn_encoder_ffn_layers_0_conv_2_bias[] = {64}; VALIDATE(model.addTensor("tts_dp_sentence_encoder_attn_encoder_ffn_layers_0_conv_2_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_dp_sentence_encoder_attn_encoder_ffn_layers_0_conv_2_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0017581548308954f, .offset= -140}}}, .rank= 1, .dimensions=dimensions_tts_dp_sentence_encoder_attn_encoder_ffn_layers_0_conv_2_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_dp_sentence_encoder_attn_encoder_ffn_layers_0_conv_2_bias), .dataSize=BINLEN(tts_dp_sentence_encoder_attn_encoder_ffn_layers_0_conv_2_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__sentence_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_2d */ uint32_t dimensions__sentence_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_2d_dilation[] = {2}; uint32_t _sentence_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_2d_dilation[] = {1, 1}; uint32_t dimensions__sentence_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_2d_pad_amount[] = {2, 2}; uint32_t _sentence_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_2d_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions__sentence_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_2d_stride[] = {2}; uint32_t _sentence_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_2d_stride[] = {1, 1}; Qnn_Param_t params__sentence_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_2d[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_2d_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__sentence_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_2d_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_2d_dilation, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_2d_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__sentence_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_2d_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_2d_pad_amount, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_2d_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__sentence_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_2d_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_2d_stride, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="group", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="reuse_sparse_indices", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}} }; const char* inputs__sentence_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_2d[] = { "_sentence_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_reshape_to_2d_nhwc", "tts_dp_sentence_encoder_attn_encoder_ffn_layers_0_conv_2_weight", "tts_dp_sentence_encoder_attn_encoder_ffn_layers_0_conv_2_bias" }; uint32_t dimensions__sentence_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_intermediate[] = {1, 1, 129, 64}; Qnn_Tensor_t outputs__sentence_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_intermediate", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0001242895814357f, .offset= -34660}}}, .rank= 4, .dimensions=dimensions__sentence_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_intermediate, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_2d", // Node Name "qti.aisw", // Package Name "Conv2d", // Qnn Node Type params__sentence_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_2d, // Node Params 5, // Num Node Params inputs__sentence_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_2d, // Input Tensor Names 3, // Num Input Tensor Names outputs__sentence_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_intermediate_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_intermediate_nchw */ uint32_t dimensions__sentence_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_intermediate_nchw_perm[] = {4}; uint32_t _sentence_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_intermediate_nchw_perm[] = {0, 3, 1, 2}; Qnn_Param_t params__sentence_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_intermediate_nchw[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_intermediate_nchw_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__sentence_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_intermediate_nchw_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_intermediate_nchw_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__sentence_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_intermediate_nchw[] = { "_sentence_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_intermediate" }; uint32_t dimensions__sentence_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_intermediate_nchw[] = {1, 64, 1, 129}; Qnn_Tensor_t outputs__sentence_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_intermediate_nchw[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_intermediate_nchw", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0001242895814357f, .offset= -34660}}}, .rank= 4, .dimensions=dimensions__sentence_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_intermediate_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_intermediate_nchw", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__sentence_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_intermediate_nchw, // Node Params 1, // Num Node Params inputs__sentence_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_intermediate_nchw, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_intermediate_nchw, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_intermediate(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_intermediate */ const char* inputs__sentence_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_intermediate[] = { "_sentence_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_intermediate_nchw" }; uint32_t dimensions__sentence_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_output_0[] = {1, 64, 129}; Qnn_Tensor_t outputs__sentence_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_intermediate[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0001242895814357f, .offset= -34660}}}, .rank= 3, .dimensions=dimensions__sentence_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_intermediate", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__sentence_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_intermediate, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_intermediate, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_output_0_nfc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_output_0_nfc */ uint32_t dimensions__sentence_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_output_0_nfc_perm[] = {3}; uint32_t _sentence_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_output_0_nfc_perm[] = {0, 2, 1}; Qnn_Param_t params__sentence_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_output_0_nfc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_output_0_nfc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__sentence_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_output_0_nfc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_output_0_nfc_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__sentence_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_output_0_nfc[] = { "_sentence_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_output_0" }; uint32_t dimensions__sentence_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_output_0_nfc[] = {1, 129, 64}; Qnn_Tensor_t outputs__sentence_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_output_0_nfc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_output_0_nfc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0001242895814357f, .offset= -34660}}}, .rank= 3, .dimensions=dimensions__sentence_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_output_0_nfc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_output_0_nfc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__sentence_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_output_0_nfc, // Node Params 1, // Num Node Params inputs__sentence_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_output_0_nfc, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_output_0_nfc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_attn_encoder_ffn_layers_0_Mul_2(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_attn_encoder_ffn_layers_0_Mul_2 */ Qnn_Param_t params__sentence_encoder_attn_encoder_ffn_layers_0_Mul_2[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__sentence_encoder_attn_encoder_ffn_layers_0_Mul_2[] = { "_sentence_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_output_0_nfc", "_sentence_encoder_Concat_2_output_0" }; uint32_t dimensions__sentence_encoder_attn_encoder_ffn_layers_0_Mul_2_output_0[] = {1, 129, 64}; Qnn_Tensor_t outputs__sentence_encoder_attn_encoder_ffn_layers_0_Mul_2[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_ffn_layers_0_Mul_2_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0001242895814357f, .offset= -34660}}}, .rank= 3, .dimensions=dimensions__sentence_encoder_attn_encoder_ffn_layers_0_Mul_2_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_attn_encoder_ffn_layers_0_Mul_2", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__sentence_encoder_attn_encoder_ffn_layers_0_Mul_2, // Node Params 1, // Num Node Params inputs__sentence_encoder_attn_encoder_ffn_layers_0_Mul_2, // Input Tensor Names 2, // Num Input Tensor Names outputs__sentence_encoder_attn_encoder_ffn_layers_0_Mul_2, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_attn_encoder_Add_1(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_attn_encoder_Add_1 */ Qnn_Param_t params__sentence_encoder_attn_encoder_Add_1[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}} }; const char* inputs__sentence_encoder_attn_encoder_Add_1[] = { "_sentence_encoder_attn_encoder_norm_layers_1_0_Transpose_1_output_0", "_sentence_encoder_attn_encoder_ffn_layers_0_Mul_2_output_0" }; uint32_t dimensions__sentence_encoder_attn_encoder_norm_layers_2_0_Transpose_output_0[] = {1, 129, 64}; Qnn_Tensor_t outputs__sentence_encoder_attn_encoder_Add_1[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_norm_layers_2_0_Transpose_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0001296999253100f, .offset= -31165}}}, .rank= 3, .dimensions=dimensions__sentence_encoder_attn_encoder_norm_layers_2_0_Transpose_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_attn_encoder_Add_1", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__sentence_encoder_attn_encoder_Add_1, // Node Params 1, // Num Node Params inputs__sentence_encoder_attn_encoder_Add_1, // Input Tensor Names 2, // Num Input Tensor Names outputs__sentence_encoder_attn_encoder_Add_1, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_dp_sentence_encoder_attn_encoder_norm_layers_2_0_norm_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_dp_sentence_encoder_attn_encoder_norm_layers_2_0_norm_weight[] = {64}; VALIDATE(model.addTensor("tts_dp_sentence_encoder_attn_encoder_norm_layers_2_0_norm_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_dp_sentence_encoder_attn_encoder_norm_layers_2_0_norm_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0029495428316295f, .offset= 0}}}, .rank= 1, .dimensions=dimensions_tts_dp_sentence_encoder_attn_encoder_norm_layers_2_0_norm_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_dp_sentence_encoder_attn_encoder_norm_layers_2_0_norm_weight), .dataSize=BINLEN(tts_dp_sentence_encoder_attn_encoder_norm_layers_2_0_norm_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_dp_sentence_encoder_attn_encoder_norm_layers_2_0_norm_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_dp_sentence_encoder_attn_encoder_norm_layers_2_0_norm_bias[] = {64}; VALIDATE(model.addTensor("tts_dp_sentence_encoder_attn_encoder_norm_layers_2_0_norm_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_dp_sentence_encoder_attn_encoder_norm_layers_2_0_norm_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0011040057288483f, .offset= -107}}}, .rank= 1, .dimensions=dimensions_tts_dp_sentence_encoder_attn_encoder_norm_layers_2_0_norm_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_dp_sentence_encoder_attn_encoder_norm_layers_2_0_norm_bias), .dataSize=BINLEN(tts_dp_sentence_encoder_attn_encoder_norm_layers_2_0_norm_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__sentence_encoder_attn_encoder_norm_layers_2_0_norm_LayerNormalization(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_attn_encoder_norm_layers_2_0_norm_LayerNormalization */ uint32_t dimensions__sentence_encoder_attn_encoder_norm_layers_2_0_norm_LayerNormalization_axes[] = {1}; uint32_t _sentence_encoder_attn_encoder_norm_layers_2_0_norm_LayerNormalization_axes[] = {2}; Qnn_Param_t params__sentence_encoder_attn_encoder_norm_layers_2_0_norm_LayerNormalization[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="axes", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_norm_layers_2_0_norm_LayerNormalization_axes", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__sentence_encoder_attn_encoder_norm_layers_2_0_norm_LayerNormalization_axes, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_attn_encoder_norm_layers_2_0_norm_LayerNormalization_axes, .dataSize=4}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="epsilon", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_FLOAT_32, {.floatValue = 0.000001000000f}}}} }; const char* inputs__sentence_encoder_attn_encoder_norm_layers_2_0_norm_LayerNormalization[] = { "_sentence_encoder_attn_encoder_norm_layers_2_0_Transpose_output_0", "tts_dp_sentence_encoder_attn_encoder_norm_layers_2_0_norm_weight", "tts_dp_sentence_encoder_attn_encoder_norm_layers_2_0_norm_bias" }; uint32_t dimensions__sentence_encoder_attn_encoder_norm_layers_2_0_Transpose_1_output_0[] = {1, 129, 64}; Qnn_Tensor_t outputs__sentence_encoder_attn_encoder_norm_layers_2_0_norm_LayerNormalization[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_norm_layers_2_0_Transpose_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000906407585717f, .offset= -37163}}}, .rank= 3, .dimensions=dimensions__sentence_encoder_attn_encoder_norm_layers_2_0_Transpose_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_attn_encoder_norm_layers_2_0_norm_LayerNormalization", // Node Name "qti.aisw", // Package Name "LayerNorm", // Qnn Node Type params__sentence_encoder_attn_encoder_norm_layers_2_0_norm_LayerNormalization, // Node Params 2, // Num Node Params inputs__sentence_encoder_attn_encoder_norm_layers_2_0_norm_LayerNormalization, // Input Tensor Names 3, // Num Input Tensor Names outputs__sentence_encoder_attn_encoder_norm_layers_2_0_norm_LayerNormalization, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_attn_encoder_norm_layers_2_0_Transpose_1_output_0_ncf(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_attn_encoder_norm_layers_2_0_Transpose_1_output_0_ncf */ uint32_t dimensions__sentence_encoder_attn_encoder_norm_layers_2_0_Transpose_1_output_0_ncf_perm[] = {3}; uint32_t _sentence_encoder_attn_encoder_norm_layers_2_0_Transpose_1_output_0_ncf_perm[] = {0, 2, 1}; Qnn_Param_t params__sentence_encoder_attn_encoder_norm_layers_2_0_Transpose_1_output_0_ncf[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_norm_layers_2_0_Transpose_1_output_0_ncf_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__sentence_encoder_attn_encoder_norm_layers_2_0_Transpose_1_output_0_ncf_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_attn_encoder_norm_layers_2_0_Transpose_1_output_0_ncf_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__sentence_encoder_attn_encoder_norm_layers_2_0_Transpose_1_output_0_ncf[] = { "_sentence_encoder_attn_encoder_norm_layers_2_0_Transpose_1_output_0" }; uint32_t dimensions__sentence_encoder_attn_encoder_norm_layers_2_0_Transpose_1_output_0_ncf[] = {1, 64, 129}; Qnn_Tensor_t outputs__sentence_encoder_attn_encoder_norm_layers_2_0_Transpose_1_output_0_ncf[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_norm_layers_2_0_Transpose_1_output_0_ncf", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000906407585717f, .offset= -37163}}}, .rank= 3, .dimensions=dimensions__sentence_encoder_attn_encoder_norm_layers_2_0_Transpose_1_output_0_ncf, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_attn_encoder_norm_layers_2_0_Transpose_1_output_0_ncf", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__sentence_encoder_attn_encoder_norm_layers_2_0_Transpose_1_output_0_ncf, // Node Params 1, // Num Node Params inputs__sentence_encoder_attn_encoder_norm_layers_2_0_Transpose_1_output_0_ncf, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_attn_encoder_norm_layers_2_0_Transpose_1_output_0_ncf, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_attn_encoder_attn_layers_1_conv_q_Conv_reshape_to_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_attn_encoder_attn_layers_1_conv_q_Conv_reshape_to_2d */ const char* inputs__sentence_encoder_attn_encoder_attn_layers_1_conv_q_Conv_reshape_to_2d[] = { "_sentence_encoder_attn_encoder_norm_layers_2_0_Transpose_1_output_0_ncf" }; uint32_t dimensions__sentence_encoder_attn_encoder_attn_layers_1_conv_q_Conv_reshape_to_2d[] = {1, 64, 1, 129}; Qnn_Tensor_t outputs__sentence_encoder_attn_encoder_attn_layers_1_conv_q_Conv_reshape_to_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_attn_layers_1_conv_q_Conv_reshape_to_2d", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000906407585717f, .offset= -37163}}}, .rank= 4, .dimensions=dimensions__sentence_encoder_attn_encoder_attn_layers_1_conv_q_Conv_reshape_to_2d, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_attn_encoder_attn_layers_1_conv_q_Conv_reshape_to_2d", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__sentence_encoder_attn_encoder_attn_layers_1_conv_q_Conv_reshape_to_2d, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_attn_encoder_attn_layers_1_conv_q_Conv_reshape_to_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_attn_encoder_attn_layers_1_conv_q_Conv_reshape_to_2d_nhwc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_attn_encoder_attn_layers_1_conv_q_Conv_reshape_to_2d_nhwc */ uint32_t dimensions__sentence_encoder_attn_encoder_attn_layers_1_conv_q_Conv_reshape_to_2d_nhwc_perm[] = {4}; uint32_t _sentence_encoder_attn_encoder_attn_layers_1_conv_q_Conv_reshape_to_2d_nhwc_perm[] = {0, 2, 3, 1}; Qnn_Param_t params__sentence_encoder_attn_encoder_attn_layers_1_conv_q_Conv_reshape_to_2d_nhwc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_attn_layers_1_conv_q_Conv_reshape_to_2d_nhwc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__sentence_encoder_attn_encoder_attn_layers_1_conv_q_Conv_reshape_to_2d_nhwc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_attn_encoder_attn_layers_1_conv_q_Conv_reshape_to_2d_nhwc_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__sentence_encoder_attn_encoder_attn_layers_1_conv_q_Conv_reshape_to_2d_nhwc[] = { "_sentence_encoder_attn_encoder_attn_layers_1_conv_q_Conv_reshape_to_2d" }; uint32_t dimensions__sentence_encoder_attn_encoder_attn_layers_1_conv_q_Conv_reshape_to_2d_nhwc[] = {1, 1, 129, 64}; Qnn_Tensor_t outputs__sentence_encoder_attn_encoder_attn_layers_1_conv_q_Conv_reshape_to_2d_nhwc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_attn_layers_1_conv_q_Conv_reshape_to_2d_nhwc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000906407585717f, .offset= -37163}}}, .rank= 4, .dimensions=dimensions__sentence_encoder_attn_encoder_attn_layers_1_conv_q_Conv_reshape_to_2d_nhwc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_attn_encoder_attn_layers_1_conv_q_Conv_reshape_to_2d_nhwc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__sentence_encoder_attn_encoder_attn_layers_1_conv_q_Conv_reshape_to_2d_nhwc, // Node Params 1, // Num Node Params inputs__sentence_encoder_attn_encoder_attn_layers_1_conv_q_Conv_reshape_to_2d_nhwc, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_attn_encoder_attn_layers_1_conv_q_Conv_reshape_to_2d_nhwc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_dp_sentence_encoder_attn_encoder_attn_layers_1_conv_q_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_dp_sentence_encoder_attn_encoder_attn_layers_1_conv_q_weight[] = {1, 1, 64, 64}; VALIDATE(model.addTensor("tts_dp_sentence_encoder_attn_encoder_attn_layers_1_conv_q_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_dp_sentence_encoder_attn_encoder_attn_layers_1_conv_q_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0028860257007182f, .offset= -111}}}, .rank= 4, .dimensions=dimensions_tts_dp_sentence_encoder_attn_encoder_attn_layers_1_conv_q_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_dp_sentence_encoder_attn_encoder_attn_layers_1_conv_q_weight), .dataSize=BINLEN(tts_dp_sentence_encoder_attn_encoder_attn_layers_1_conv_q_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_dp_sentence_encoder_attn_encoder_attn_layers_1_conv_q_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_dp_sentence_encoder_attn_encoder_attn_layers_1_conv_q_bias[] = {64}; VALIDATE(model.addTensor("tts_dp_sentence_encoder_attn_encoder_attn_layers_1_conv_q_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_dp_sentence_encoder_attn_encoder_attn_layers_1_conv_q_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0012170282425359f, .offset= -128}}}, .rank= 1, .dimensions=dimensions_tts_dp_sentence_encoder_attn_encoder_attn_layers_1_conv_q_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_dp_sentence_encoder_attn_encoder_attn_layers_1_conv_q_bias), .dataSize=BINLEN(tts_dp_sentence_encoder_attn_encoder_attn_layers_1_conv_q_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__sentence_encoder_attn_encoder_attn_layers_1_conv_q_Conv_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_attn_encoder_attn_layers_1_conv_q_Conv_2d */ uint32_t dimensions__sentence_encoder_attn_encoder_attn_layers_1_conv_q_Conv_2d_dilation[] = {2}; uint32_t _sentence_encoder_attn_encoder_attn_layers_1_conv_q_Conv_2d_dilation[] = {1, 1}; uint32_t dimensions__sentence_encoder_attn_encoder_attn_layers_1_conv_q_Conv_2d_pad_amount[] = {2, 2}; uint32_t _sentence_encoder_attn_encoder_attn_layers_1_conv_q_Conv_2d_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions__sentence_encoder_attn_encoder_attn_layers_1_conv_q_Conv_2d_stride[] = {2}; uint32_t _sentence_encoder_attn_encoder_attn_layers_1_conv_q_Conv_2d_stride[] = {1, 1}; Qnn_Param_t params__sentence_encoder_attn_encoder_attn_layers_1_conv_q_Conv_2d[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_attn_layers_1_conv_q_Conv_2d_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__sentence_encoder_attn_encoder_attn_layers_1_conv_q_Conv_2d_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_attn_encoder_attn_layers_1_conv_q_Conv_2d_dilation, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_attn_layers_1_conv_q_Conv_2d_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__sentence_encoder_attn_encoder_attn_layers_1_conv_q_Conv_2d_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_attn_encoder_attn_layers_1_conv_q_Conv_2d_pad_amount, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_attn_layers_1_conv_q_Conv_2d_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__sentence_encoder_attn_encoder_attn_layers_1_conv_q_Conv_2d_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_attn_encoder_attn_layers_1_conv_q_Conv_2d_stride, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="group", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="reuse_sparse_indices", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}} }; const char* inputs__sentence_encoder_attn_encoder_attn_layers_1_conv_q_Conv_2d[] = { "_sentence_encoder_attn_encoder_attn_layers_1_conv_q_Conv_reshape_to_2d_nhwc", "tts_dp_sentence_encoder_attn_encoder_attn_layers_1_conv_q_weight", "tts_dp_sentence_encoder_attn_encoder_attn_layers_1_conv_q_bias" }; uint32_t dimensions__sentence_encoder_attn_encoder_attn_layers_1_conv_q_Conv_intermediate[] = {1, 1, 129, 64}; Qnn_Tensor_t outputs__sentence_encoder_attn_encoder_attn_layers_1_conv_q_Conv_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_attn_layers_1_conv_q_Conv_intermediate", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000692777903168f, .offset= -34481}}}, .rank= 4, .dimensions=dimensions__sentence_encoder_attn_encoder_attn_layers_1_conv_q_Conv_intermediate, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_attn_encoder_attn_layers_1_conv_q_Conv_2d", // Node Name "qti.aisw", // Package Name "Conv2d", // Qnn Node Type params__sentence_encoder_attn_encoder_attn_layers_1_conv_q_Conv_2d, // Node Params 5, // Num Node Params inputs__sentence_encoder_attn_encoder_attn_layers_1_conv_q_Conv_2d, // Input Tensor Names 3, // Num Input Tensor Names outputs__sentence_encoder_attn_encoder_attn_layers_1_conv_q_Conv_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_attn_encoder_attn_layers_1_conv_q_Conv_intermediate_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_attn_encoder_attn_layers_1_conv_q_Conv_intermediate_nchw */ uint32_t dimensions__sentence_encoder_attn_encoder_attn_layers_1_conv_q_Conv_intermediate_nchw_perm[] = {4}; uint32_t _sentence_encoder_attn_encoder_attn_layers_1_conv_q_Conv_intermediate_nchw_perm[] = {0, 3, 1, 2}; Qnn_Param_t params__sentence_encoder_attn_encoder_attn_layers_1_conv_q_Conv_intermediate_nchw[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_attn_layers_1_conv_q_Conv_intermediate_nchw_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__sentence_encoder_attn_encoder_attn_layers_1_conv_q_Conv_intermediate_nchw_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_attn_encoder_attn_layers_1_conv_q_Conv_intermediate_nchw_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__sentence_encoder_attn_encoder_attn_layers_1_conv_q_Conv_intermediate_nchw[] = { "_sentence_encoder_attn_encoder_attn_layers_1_conv_q_Conv_intermediate" }; uint32_t dimensions__sentence_encoder_attn_encoder_attn_layers_1_conv_q_Conv_intermediate_nchw[] = {1, 64, 1, 129}; Qnn_Tensor_t outputs__sentence_encoder_attn_encoder_attn_layers_1_conv_q_Conv_intermediate_nchw[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_attn_layers_1_conv_q_Conv_intermediate_nchw", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000692777903168f, .offset= -34481}}}, .rank= 4, .dimensions=dimensions__sentence_encoder_attn_encoder_attn_layers_1_conv_q_Conv_intermediate_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_attn_encoder_attn_layers_1_conv_q_Conv_intermediate_nchw", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__sentence_encoder_attn_encoder_attn_layers_1_conv_q_Conv_intermediate_nchw, // Node Params 1, // Num Node Params inputs__sentence_encoder_attn_encoder_attn_layers_1_conv_q_Conv_intermediate_nchw, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_attn_encoder_attn_layers_1_conv_q_Conv_intermediate_nchw, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_attn_encoder_attn_layers_1_conv_q_Conv_intermediate(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_attn_encoder_attn_layers_1_conv_q_Conv_intermediate */ const char* inputs__sentence_encoder_attn_encoder_attn_layers_1_conv_q_Conv_intermediate[] = { "_sentence_encoder_attn_encoder_attn_layers_1_conv_q_Conv_intermediate_nchw" }; uint32_t dimensions__sentence_encoder_attn_encoder_attn_layers_1_Reshape_output_0[] = {1, 2, 32, 129}; Qnn_Tensor_t outputs__sentence_encoder_attn_encoder_attn_layers_1_conv_q_Conv_intermediate[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_attn_layers_1_Reshape_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000692777903168f, .offset= -34481}}}, .rank= 4, .dimensions=dimensions__sentence_encoder_attn_encoder_attn_layers_1_Reshape_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_attn_encoder_attn_layers_1_conv_q_Conv_intermediate", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__sentence_encoder_attn_encoder_attn_layers_1_conv_q_Conv_intermediate, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_attn_encoder_attn_layers_1_conv_q_Conv_intermediate, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_attn_encoder_attn_layers_1_conv_k_Conv_reshape_to_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_attn_encoder_attn_layers_1_conv_k_Conv_reshape_to_2d */ const char* inputs__sentence_encoder_attn_encoder_attn_layers_1_conv_k_Conv_reshape_to_2d[] = { "_sentence_encoder_attn_encoder_norm_layers_2_0_Transpose_1_output_0_ncf" }; uint32_t dimensions__sentence_encoder_attn_encoder_attn_layers_1_conv_k_Conv_reshape_to_2d[] = {1, 64, 1, 129}; Qnn_Tensor_t outputs__sentence_encoder_attn_encoder_attn_layers_1_conv_k_Conv_reshape_to_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_attn_layers_1_conv_k_Conv_reshape_to_2d", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000906407585717f, .offset= -37163}}}, .rank= 4, .dimensions=dimensions__sentence_encoder_attn_encoder_attn_layers_1_conv_k_Conv_reshape_to_2d, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_attn_encoder_attn_layers_1_conv_k_Conv_reshape_to_2d", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__sentence_encoder_attn_encoder_attn_layers_1_conv_k_Conv_reshape_to_2d, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_attn_encoder_attn_layers_1_conv_k_Conv_reshape_to_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_attn_encoder_attn_layers_1_conv_k_Conv_reshape_to_2d_nhwc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_attn_encoder_attn_layers_1_conv_k_Conv_reshape_to_2d_nhwc */ uint32_t dimensions__sentence_encoder_attn_encoder_attn_layers_1_conv_k_Conv_reshape_to_2d_nhwc_perm[] = {4}; uint32_t _sentence_encoder_attn_encoder_attn_layers_1_conv_k_Conv_reshape_to_2d_nhwc_perm[] = {0, 2, 3, 1}; Qnn_Param_t params__sentence_encoder_attn_encoder_attn_layers_1_conv_k_Conv_reshape_to_2d_nhwc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_attn_layers_1_conv_k_Conv_reshape_to_2d_nhwc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__sentence_encoder_attn_encoder_attn_layers_1_conv_k_Conv_reshape_to_2d_nhwc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_attn_encoder_attn_layers_1_conv_k_Conv_reshape_to_2d_nhwc_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__sentence_encoder_attn_encoder_attn_layers_1_conv_k_Conv_reshape_to_2d_nhwc[] = { "_sentence_encoder_attn_encoder_attn_layers_1_conv_k_Conv_reshape_to_2d" }; uint32_t dimensions__sentence_encoder_attn_encoder_attn_layers_1_conv_k_Conv_reshape_to_2d_nhwc[] = {1, 1, 129, 64}; Qnn_Tensor_t outputs__sentence_encoder_attn_encoder_attn_layers_1_conv_k_Conv_reshape_to_2d_nhwc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_attn_layers_1_conv_k_Conv_reshape_to_2d_nhwc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000906407585717f, .offset= -37163}}}, .rank= 4, .dimensions=dimensions__sentence_encoder_attn_encoder_attn_layers_1_conv_k_Conv_reshape_to_2d_nhwc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_attn_encoder_attn_layers_1_conv_k_Conv_reshape_to_2d_nhwc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__sentence_encoder_attn_encoder_attn_layers_1_conv_k_Conv_reshape_to_2d_nhwc, // Node Params 1, // Num Node Params inputs__sentence_encoder_attn_encoder_attn_layers_1_conv_k_Conv_reshape_to_2d_nhwc, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_attn_encoder_attn_layers_1_conv_k_Conv_reshape_to_2d_nhwc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_dp_sentence_encoder_attn_encoder_attn_layers_1_conv_k_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_dp_sentence_encoder_attn_encoder_attn_layers_1_conv_k_weight[] = {1, 1, 64, 64}; VALIDATE(model.addTensor("tts_dp_sentence_encoder_attn_encoder_attn_layers_1_conv_k_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_dp_sentence_encoder_attn_encoder_attn_layers_1_conv_k_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0027919667772949f, .offset= -120}}}, .rank= 4, .dimensions=dimensions_tts_dp_sentence_encoder_attn_encoder_attn_layers_1_conv_k_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_dp_sentence_encoder_attn_encoder_attn_layers_1_conv_k_weight), .dataSize=BINLEN(tts_dp_sentence_encoder_attn_encoder_attn_layers_1_conv_k_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_dp_sentence_encoder_attn_encoder_attn_layers_1_conv_k_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_dp_sentence_encoder_attn_encoder_attn_layers_1_conv_k_bias[] = {64}; VALIDATE(model.addTensor("tts_dp_sentence_encoder_attn_encoder_attn_layers_1_conv_k_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_dp_sentence_encoder_attn_encoder_attn_layers_1_conv_k_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0010253988439217f, .offset= -126}}}, .rank= 1, .dimensions=dimensions_tts_dp_sentence_encoder_attn_encoder_attn_layers_1_conv_k_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_dp_sentence_encoder_attn_encoder_attn_layers_1_conv_k_bias), .dataSize=BINLEN(tts_dp_sentence_encoder_attn_encoder_attn_layers_1_conv_k_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__sentence_encoder_attn_encoder_attn_layers_1_conv_k_Conv_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_attn_encoder_attn_layers_1_conv_k_Conv_2d */ uint32_t dimensions__sentence_encoder_attn_encoder_attn_layers_1_conv_k_Conv_2d_dilation[] = {2}; uint32_t _sentence_encoder_attn_encoder_attn_layers_1_conv_k_Conv_2d_dilation[] = {1, 1}; uint32_t dimensions__sentence_encoder_attn_encoder_attn_layers_1_conv_k_Conv_2d_pad_amount[] = {2, 2}; uint32_t _sentence_encoder_attn_encoder_attn_layers_1_conv_k_Conv_2d_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions__sentence_encoder_attn_encoder_attn_layers_1_conv_k_Conv_2d_stride[] = {2}; uint32_t _sentence_encoder_attn_encoder_attn_layers_1_conv_k_Conv_2d_stride[] = {1, 1}; Qnn_Param_t params__sentence_encoder_attn_encoder_attn_layers_1_conv_k_Conv_2d[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_attn_layers_1_conv_k_Conv_2d_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__sentence_encoder_attn_encoder_attn_layers_1_conv_k_Conv_2d_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_attn_encoder_attn_layers_1_conv_k_Conv_2d_dilation, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_attn_layers_1_conv_k_Conv_2d_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__sentence_encoder_attn_encoder_attn_layers_1_conv_k_Conv_2d_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_attn_encoder_attn_layers_1_conv_k_Conv_2d_pad_amount, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_attn_layers_1_conv_k_Conv_2d_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__sentence_encoder_attn_encoder_attn_layers_1_conv_k_Conv_2d_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_attn_encoder_attn_layers_1_conv_k_Conv_2d_stride, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="group", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="reuse_sparse_indices", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}} }; const char* inputs__sentence_encoder_attn_encoder_attn_layers_1_conv_k_Conv_2d[] = { "_sentence_encoder_attn_encoder_attn_layers_1_conv_k_Conv_reshape_to_2d_nhwc", "tts_dp_sentence_encoder_attn_encoder_attn_layers_1_conv_k_weight", "tts_dp_sentence_encoder_attn_encoder_attn_layers_1_conv_k_bias" }; uint32_t dimensions__sentence_encoder_attn_encoder_attn_layers_1_conv_k_Conv_intermediate[] = {1, 1, 129, 64}; Qnn_Tensor_t outputs__sentence_encoder_attn_encoder_attn_layers_1_conv_k_Conv_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_attn_layers_1_conv_k_Conv_intermediate", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000561706765438f, .offset= -32629}}}, .rank= 4, .dimensions=dimensions__sentence_encoder_attn_encoder_attn_layers_1_conv_k_Conv_intermediate, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_attn_encoder_attn_layers_1_conv_k_Conv_2d", // Node Name "qti.aisw", // Package Name "Conv2d", // Qnn Node Type params__sentence_encoder_attn_encoder_attn_layers_1_conv_k_Conv_2d, // Node Params 5, // Num Node Params inputs__sentence_encoder_attn_encoder_attn_layers_1_conv_k_Conv_2d, // Input Tensor Names 3, // Num Input Tensor Names outputs__sentence_encoder_attn_encoder_attn_layers_1_conv_k_Conv_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_attn_encoder_attn_layers_1_conv_k_Conv_intermediate_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_attn_encoder_attn_layers_1_conv_k_Conv_intermediate_nchw */ uint32_t dimensions__sentence_encoder_attn_encoder_attn_layers_1_conv_k_Conv_intermediate_nchw_perm[] = {4}; uint32_t _sentence_encoder_attn_encoder_attn_layers_1_conv_k_Conv_intermediate_nchw_perm[] = {0, 3, 1, 2}; Qnn_Param_t params__sentence_encoder_attn_encoder_attn_layers_1_conv_k_Conv_intermediate_nchw[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_attn_layers_1_conv_k_Conv_intermediate_nchw_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__sentence_encoder_attn_encoder_attn_layers_1_conv_k_Conv_intermediate_nchw_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_attn_encoder_attn_layers_1_conv_k_Conv_intermediate_nchw_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__sentence_encoder_attn_encoder_attn_layers_1_conv_k_Conv_intermediate_nchw[] = { "_sentence_encoder_attn_encoder_attn_layers_1_conv_k_Conv_intermediate" }; uint32_t dimensions__sentence_encoder_attn_encoder_attn_layers_1_conv_k_Conv_intermediate_nchw[] = {1, 64, 1, 129}; Qnn_Tensor_t outputs__sentence_encoder_attn_encoder_attn_layers_1_conv_k_Conv_intermediate_nchw[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_attn_layers_1_conv_k_Conv_intermediate_nchw", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000561706765438f, .offset= -32629}}}, .rank= 4, .dimensions=dimensions__sentence_encoder_attn_encoder_attn_layers_1_conv_k_Conv_intermediate_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_attn_encoder_attn_layers_1_conv_k_Conv_intermediate_nchw", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__sentence_encoder_attn_encoder_attn_layers_1_conv_k_Conv_intermediate_nchw, // Node Params 1, // Num Node Params inputs__sentence_encoder_attn_encoder_attn_layers_1_conv_k_Conv_intermediate_nchw, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_attn_encoder_attn_layers_1_conv_k_Conv_intermediate_nchw, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_attn_encoder_attn_layers_1_conv_k_Conv_intermediate(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_attn_encoder_attn_layers_1_conv_k_Conv_intermediate */ const char* inputs__sentence_encoder_attn_encoder_attn_layers_1_conv_k_Conv_intermediate[] = { "_sentence_encoder_attn_encoder_attn_layers_1_conv_k_Conv_intermediate_nchw" }; uint32_t dimensions__sentence_encoder_attn_encoder_attn_layers_1_Reshape_1_output_0[] = {1, 2, 32, 129}; Qnn_Tensor_t outputs__sentence_encoder_attn_encoder_attn_layers_1_conv_k_Conv_intermediate[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_attn_layers_1_Reshape_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000561706765438f, .offset= -32629}}}, .rank= 4, .dimensions=dimensions__sentence_encoder_attn_encoder_attn_layers_1_Reshape_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_attn_encoder_attn_layers_1_conv_k_Conv_intermediate", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__sentence_encoder_attn_encoder_attn_layers_1_conv_k_Conv_intermediate, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_attn_encoder_attn_layers_1_conv_k_Conv_intermediate, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_attn_encoder_attn_layers_1_Reshape_1_output_0_converted_UFIXED_POINT_8(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_attn_encoder_attn_layers_1_Reshape_1_output_0_converted_UFIXED_POINT_8 */ Qnn_Param_t params__sentence_encoder_attn_encoder_attn_layers_1_Reshape_1_output_0_converted_UFIXED_POINT_8[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="dynamic_input_data", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="dynamic_output_data", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}} }; const char* inputs__sentence_encoder_attn_encoder_attn_layers_1_Reshape_1_output_0_converted_UFIXED_POINT_8[] = { "_sentence_encoder_attn_encoder_attn_layers_1_Reshape_1_output_0" }; uint32_t dimensions__sentence_encoder_attn_encoder_attn_layers_1_Reshape_1_output_0_converted_UFIXED_POINT_8[] = {1, 2, 32, 129}; Qnn_Tensor_t outputs__sentence_encoder_attn_encoder_attn_layers_1_Reshape_1_output_0_converted_UFIXED_POINT_8[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_attn_layers_1_Reshape_1_output_0_converted_UFIXED_POINT_8", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0143796931952238f, .offset= -127}}}, .rank= 4, .dimensions=dimensions__sentence_encoder_attn_encoder_attn_layers_1_Reshape_1_output_0_converted_UFIXED_POINT_8, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_attn_encoder_attn_layers_1_Reshape_1_output_0_converted_UFIXED_POINT_8", // Node Name "qti.aisw", // Package Name "Convert", // Qnn Node Type params__sentence_encoder_attn_encoder_attn_layers_1_Reshape_1_output_0_converted_UFIXED_POINT_8, // Node Params 2, // Num Node Params inputs__sentence_encoder_attn_encoder_attn_layers_1_Reshape_1_output_0_converted_UFIXED_POINT_8, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_attn_encoder_attn_layers_1_Reshape_1_output_0_converted_UFIXED_POINT_8, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_attn_encoder_attn_layers_1_conv_v_Conv_reshape_to_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_attn_encoder_attn_layers_1_conv_v_Conv_reshape_to_2d */ const char* inputs__sentence_encoder_attn_encoder_attn_layers_1_conv_v_Conv_reshape_to_2d[] = { "_sentence_encoder_attn_encoder_norm_layers_2_0_Transpose_1_output_0_ncf" }; uint32_t dimensions__sentence_encoder_attn_encoder_attn_layers_1_conv_v_Conv_reshape_to_2d[] = {1, 64, 1, 129}; Qnn_Tensor_t outputs__sentence_encoder_attn_encoder_attn_layers_1_conv_v_Conv_reshape_to_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_attn_layers_1_conv_v_Conv_reshape_to_2d", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000906407585717f, .offset= -37163}}}, .rank= 4, .dimensions=dimensions__sentence_encoder_attn_encoder_attn_layers_1_conv_v_Conv_reshape_to_2d, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_attn_encoder_attn_layers_1_conv_v_Conv_reshape_to_2d", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__sentence_encoder_attn_encoder_attn_layers_1_conv_v_Conv_reshape_to_2d, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_attn_encoder_attn_layers_1_conv_v_Conv_reshape_to_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_attn_encoder_attn_layers_1_conv_v_Conv_reshape_to_2d_nhwc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_attn_encoder_attn_layers_1_conv_v_Conv_reshape_to_2d_nhwc */ uint32_t dimensions__sentence_encoder_attn_encoder_attn_layers_1_conv_v_Conv_reshape_to_2d_nhwc_perm[] = {4}; uint32_t _sentence_encoder_attn_encoder_attn_layers_1_conv_v_Conv_reshape_to_2d_nhwc_perm[] = {0, 2, 3, 1}; Qnn_Param_t params__sentence_encoder_attn_encoder_attn_layers_1_conv_v_Conv_reshape_to_2d_nhwc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_attn_layers_1_conv_v_Conv_reshape_to_2d_nhwc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__sentence_encoder_attn_encoder_attn_layers_1_conv_v_Conv_reshape_to_2d_nhwc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_attn_encoder_attn_layers_1_conv_v_Conv_reshape_to_2d_nhwc_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__sentence_encoder_attn_encoder_attn_layers_1_conv_v_Conv_reshape_to_2d_nhwc[] = { "_sentence_encoder_attn_encoder_attn_layers_1_conv_v_Conv_reshape_to_2d" }; uint32_t dimensions__sentence_encoder_attn_encoder_attn_layers_1_conv_v_Conv_reshape_to_2d_nhwc[] = {1, 1, 129, 64}; Qnn_Tensor_t outputs__sentence_encoder_attn_encoder_attn_layers_1_conv_v_Conv_reshape_to_2d_nhwc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_attn_layers_1_conv_v_Conv_reshape_to_2d_nhwc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000906407585717f, .offset= -37163}}}, .rank= 4, .dimensions=dimensions__sentence_encoder_attn_encoder_attn_layers_1_conv_v_Conv_reshape_to_2d_nhwc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_attn_encoder_attn_layers_1_conv_v_Conv_reshape_to_2d_nhwc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__sentence_encoder_attn_encoder_attn_layers_1_conv_v_Conv_reshape_to_2d_nhwc, // Node Params 1, // Num Node Params inputs__sentence_encoder_attn_encoder_attn_layers_1_conv_v_Conv_reshape_to_2d_nhwc, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_attn_encoder_attn_layers_1_conv_v_Conv_reshape_to_2d_nhwc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_dp_sentence_encoder_attn_encoder_attn_layers_1_conv_v_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_dp_sentence_encoder_attn_encoder_attn_layers_1_conv_v_weight[] = {1, 1, 64, 64}; VALIDATE(model.addTensor("tts_dp_sentence_encoder_attn_encoder_attn_layers_1_conv_v_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_dp_sentence_encoder_attn_encoder_attn_layers_1_conv_v_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0040961876511574f, .offset= -110}}}, .rank= 4, .dimensions=dimensions_tts_dp_sentence_encoder_attn_encoder_attn_layers_1_conv_v_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_dp_sentence_encoder_attn_encoder_attn_layers_1_conv_v_weight), .dataSize=BINLEN(tts_dp_sentence_encoder_attn_encoder_attn_layers_1_conv_v_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_dp_sentence_encoder_attn_encoder_attn_layers_1_conv_v_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_dp_sentence_encoder_attn_encoder_attn_layers_1_conv_v_bias[] = {64}; VALIDATE(model.addTensor("tts_dp_sentence_encoder_attn_encoder_attn_layers_1_conv_v_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_dp_sentence_encoder_attn_encoder_attn_layers_1_conv_v_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0007947096019052f, .offset= -132}}}, .rank= 1, .dimensions=dimensions_tts_dp_sentence_encoder_attn_encoder_attn_layers_1_conv_v_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_dp_sentence_encoder_attn_encoder_attn_layers_1_conv_v_bias), .dataSize=BINLEN(tts_dp_sentence_encoder_attn_encoder_attn_layers_1_conv_v_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__sentence_encoder_attn_encoder_attn_layers_1_conv_v_Conv_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_attn_encoder_attn_layers_1_conv_v_Conv_2d */ uint32_t dimensions__sentence_encoder_attn_encoder_attn_layers_1_conv_v_Conv_2d_dilation[] = {2}; uint32_t _sentence_encoder_attn_encoder_attn_layers_1_conv_v_Conv_2d_dilation[] = {1, 1}; uint32_t dimensions__sentence_encoder_attn_encoder_attn_layers_1_conv_v_Conv_2d_pad_amount[] = {2, 2}; uint32_t _sentence_encoder_attn_encoder_attn_layers_1_conv_v_Conv_2d_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions__sentence_encoder_attn_encoder_attn_layers_1_conv_v_Conv_2d_stride[] = {2}; uint32_t _sentence_encoder_attn_encoder_attn_layers_1_conv_v_Conv_2d_stride[] = {1, 1}; Qnn_Param_t params__sentence_encoder_attn_encoder_attn_layers_1_conv_v_Conv_2d[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_attn_layers_1_conv_v_Conv_2d_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__sentence_encoder_attn_encoder_attn_layers_1_conv_v_Conv_2d_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_attn_encoder_attn_layers_1_conv_v_Conv_2d_dilation, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_attn_layers_1_conv_v_Conv_2d_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__sentence_encoder_attn_encoder_attn_layers_1_conv_v_Conv_2d_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_attn_encoder_attn_layers_1_conv_v_Conv_2d_pad_amount, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_attn_layers_1_conv_v_Conv_2d_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__sentence_encoder_attn_encoder_attn_layers_1_conv_v_Conv_2d_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_attn_encoder_attn_layers_1_conv_v_Conv_2d_stride, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="group", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="reuse_sparse_indices", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}} }; const char* inputs__sentence_encoder_attn_encoder_attn_layers_1_conv_v_Conv_2d[] = { "_sentence_encoder_attn_encoder_attn_layers_1_conv_v_Conv_reshape_to_2d_nhwc", "tts_dp_sentence_encoder_attn_encoder_attn_layers_1_conv_v_weight", "tts_dp_sentence_encoder_attn_encoder_attn_layers_1_conv_v_bias" }; uint32_t dimensions__sentence_encoder_attn_encoder_attn_layers_1_conv_v_Conv_intermediate[] = {1, 1, 129, 64}; Qnn_Tensor_t outputs__sentence_encoder_attn_encoder_attn_layers_1_conv_v_Conv_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_attn_layers_1_conv_v_Conv_intermediate", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000755146247684f, .offset= -33884}}}, .rank= 4, .dimensions=dimensions__sentence_encoder_attn_encoder_attn_layers_1_conv_v_Conv_intermediate, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_attn_encoder_attn_layers_1_conv_v_Conv_2d", // Node Name "qti.aisw", // Package Name "Conv2d", // Qnn Node Type params__sentence_encoder_attn_encoder_attn_layers_1_conv_v_Conv_2d, // Node Params 5, // Num Node Params inputs__sentence_encoder_attn_encoder_attn_layers_1_conv_v_Conv_2d, // Input Tensor Names 3, // Num Input Tensor Names outputs__sentence_encoder_attn_encoder_attn_layers_1_conv_v_Conv_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_attn_encoder_attn_layers_1_conv_v_Conv_intermediate_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_attn_encoder_attn_layers_1_conv_v_Conv_intermediate_nchw */ uint32_t dimensions__sentence_encoder_attn_encoder_attn_layers_1_conv_v_Conv_intermediate_nchw_perm[] = {4}; uint32_t _sentence_encoder_attn_encoder_attn_layers_1_conv_v_Conv_intermediate_nchw_perm[] = {0, 3, 1, 2}; Qnn_Param_t params__sentence_encoder_attn_encoder_attn_layers_1_conv_v_Conv_intermediate_nchw[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_attn_layers_1_conv_v_Conv_intermediate_nchw_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__sentence_encoder_attn_encoder_attn_layers_1_conv_v_Conv_intermediate_nchw_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_attn_encoder_attn_layers_1_conv_v_Conv_intermediate_nchw_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__sentence_encoder_attn_encoder_attn_layers_1_conv_v_Conv_intermediate_nchw[] = { "_sentence_encoder_attn_encoder_attn_layers_1_conv_v_Conv_intermediate" }; uint32_t dimensions__sentence_encoder_attn_encoder_attn_layers_1_conv_v_Conv_intermediate_nchw[] = {1, 64, 1, 129}; Qnn_Tensor_t outputs__sentence_encoder_attn_encoder_attn_layers_1_conv_v_Conv_intermediate_nchw[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_attn_layers_1_conv_v_Conv_intermediate_nchw", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000755146247684f, .offset= -33884}}}, .rank= 4, .dimensions=dimensions__sentence_encoder_attn_encoder_attn_layers_1_conv_v_Conv_intermediate_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_attn_encoder_attn_layers_1_conv_v_Conv_intermediate_nchw", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__sentence_encoder_attn_encoder_attn_layers_1_conv_v_Conv_intermediate_nchw, // Node Params 1, // Num Node Params inputs__sentence_encoder_attn_encoder_attn_layers_1_conv_v_Conv_intermediate_nchw, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_attn_encoder_attn_layers_1_conv_v_Conv_intermediate_nchw, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_attn_encoder_attn_layers_1_conv_v_Conv_intermediate(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_attn_encoder_attn_layers_1_conv_v_Conv_intermediate */ const char* inputs__sentence_encoder_attn_encoder_attn_layers_1_conv_v_Conv_intermediate[] = { "_sentence_encoder_attn_encoder_attn_layers_1_conv_v_Conv_intermediate_nchw" }; uint32_t dimensions__sentence_encoder_attn_encoder_attn_layers_1_Reshape_2_output_0[] = {1, 2, 32, 129}; Qnn_Tensor_t outputs__sentence_encoder_attn_encoder_attn_layers_1_conv_v_Conv_intermediate[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_attn_layers_1_Reshape_2_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000755146247684f, .offset= -33884}}}, .rank= 4, .dimensions=dimensions__sentence_encoder_attn_encoder_attn_layers_1_Reshape_2_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_attn_encoder_attn_layers_1_conv_v_Conv_intermediate", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__sentence_encoder_attn_encoder_attn_layers_1_conv_v_Conv_intermediate, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_attn_encoder_attn_layers_1_conv_v_Conv_intermediate, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_attn_encoder_attn_layers_1_Transpose(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_attn_encoder_attn_layers_1_Transpose */ uint32_t dimensions__sentence_encoder_attn_encoder_attn_layers_1_Transpose_perm[] = {4}; uint32_t _sentence_encoder_attn_encoder_attn_layers_1_Transpose_perm[] = {0, 1, 3, 2}; Qnn_Param_t params__sentence_encoder_attn_encoder_attn_layers_1_Transpose[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_attn_layers_1_Transpose_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__sentence_encoder_attn_encoder_attn_layers_1_Transpose_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_attn_encoder_attn_layers_1_Transpose_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__sentence_encoder_attn_encoder_attn_layers_1_Transpose[] = { "_sentence_encoder_attn_encoder_attn_layers_1_Reshape_output_0" }; uint32_t dimensions__sentence_encoder_attn_encoder_attn_layers_1_Transpose_output_0[] = {1, 2, 129, 32}; Qnn_Tensor_t outputs__sentence_encoder_attn_encoder_attn_layers_1_Transpose[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_attn_layers_1_Transpose_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000692777903168f, .offset= -34481}}}, .rank= 4, .dimensions=dimensions__sentence_encoder_attn_encoder_attn_layers_1_Transpose_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_attn_encoder_attn_layers_1_Transpose", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__sentence_encoder_attn_encoder_attn_layers_1_Transpose, // Node Params 1, // Num Node Params inputs__sentence_encoder_attn_encoder_attn_layers_1_Transpose, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_attn_encoder_attn_layers_1_Transpose, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_attn_encoder_attn_layers_1_Transpose_1(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_attn_encoder_attn_layers_1_Transpose_1 */ uint32_t dimensions__sentence_encoder_attn_encoder_attn_layers_1_Transpose_1_perm[] = {4}; uint32_t _sentence_encoder_attn_encoder_attn_layers_1_Transpose_1_perm[] = {0, 1, 3, 2}; Qnn_Param_t params__sentence_encoder_attn_encoder_attn_layers_1_Transpose_1[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_attn_layers_1_Transpose_1_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__sentence_encoder_attn_encoder_attn_layers_1_Transpose_1_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_attn_encoder_attn_layers_1_Transpose_1_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__sentence_encoder_attn_encoder_attn_layers_1_Transpose_1[] = { "_sentence_encoder_attn_encoder_attn_layers_1_Reshape_2_output_0" }; uint32_t dimensions__sentence_encoder_attn_encoder_attn_layers_1_Transpose_1_output_0[] = {1, 2, 129, 32}; Qnn_Tensor_t outputs__sentence_encoder_attn_encoder_attn_layers_1_Transpose_1[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_attn_layers_1_Transpose_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000755146247684f, .offset= -33884}}}, .rank= 4, .dimensions=dimensions__sentence_encoder_attn_encoder_attn_layers_1_Transpose_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_attn_encoder_attn_layers_1_Transpose_1", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__sentence_encoder_attn_encoder_attn_layers_1_Transpose_1, // Node Params 1, // Num Node Params inputs__sentence_encoder_attn_encoder_attn_layers_1_Transpose_1, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_attn_encoder_attn_layers_1_Transpose_1, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_attn_encoder_attn_layers_1_Transpose_1_output_0_converted_UFIXED_POINT_8(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_attn_encoder_attn_layers_1_Transpose_1_output_0_converted_UFIXED_POINT_8 */ Qnn_Param_t params__sentence_encoder_attn_encoder_attn_layers_1_Transpose_1_output_0_converted_UFIXED_POINT_8[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="dynamic_input_data", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="dynamic_output_data", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}} }; const char* inputs__sentence_encoder_attn_encoder_attn_layers_1_Transpose_1_output_0_converted_UFIXED_POINT_8[] = { "_sentence_encoder_attn_encoder_attn_layers_1_Transpose_1_output_0" }; uint32_t dimensions__sentence_encoder_attn_encoder_attn_layers_1_Transpose_1_output_0_converted_UFIXED_POINT_8[] = {1, 2, 129, 32}; Qnn_Tensor_t outputs__sentence_encoder_attn_encoder_attn_layers_1_Transpose_1_output_0_converted_UFIXED_POINT_8[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_attn_layers_1_Transpose_1_output_0_converted_UFIXED_POINT_8", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0193317439407110f, .offset= -132}}}, .rank= 4, .dimensions=dimensions__sentence_encoder_attn_encoder_attn_layers_1_Transpose_1_output_0_converted_UFIXED_POINT_8, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_attn_encoder_attn_layers_1_Transpose_1_output_0_converted_UFIXED_POINT_8", // Node Name "qti.aisw", // Package Name "Convert", // Qnn Node Type params__sentence_encoder_attn_encoder_attn_layers_1_Transpose_1_output_0_converted_UFIXED_POINT_8, // Node Params 2, // Num Node Params inputs__sentence_encoder_attn_encoder_attn_layers_1_Transpose_1_output_0_converted_UFIXED_POINT_8, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_attn_encoder_attn_layers_1_Transpose_1_output_0_converted_UFIXED_POINT_8, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_attn_encoder_attn_layers_1_Div(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_attn_encoder_attn_layers_1_Div */ Qnn_Param_t params__sentence_encoder_attn_encoder_attn_layers_1_Div[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 2}}}} }; const char* inputs__sentence_encoder_attn_encoder_attn_layers_1_Div[] = { "_sentence_encoder_attn_encoder_attn_layers_1_Transpose_output_0", "_sentence_encoder_attn_encoder_attn_layers_0_Constant_10_output_0" }; uint32_t dimensions__sentence_encoder_attn_encoder_attn_layers_1_Div_output_0[] = {1, 2, 129, 32}; Qnn_Tensor_t outputs__sentence_encoder_attn_encoder_attn_layers_1_Div[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_attn_layers_1_Div_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000122466981338f, .offset= -34481}}}, .rank= 4, .dimensions=dimensions__sentence_encoder_attn_encoder_attn_layers_1_Div_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_attn_encoder_attn_layers_1_Div", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__sentence_encoder_attn_encoder_attn_layers_1_Div, // Node Params 1, // Num Node Params inputs__sentence_encoder_attn_encoder_attn_layers_1_Div, // Input Tensor Names 2, // Num Input Tensor Names outputs__sentence_encoder_attn_encoder_attn_layers_1_Div, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_attn_encoder_attn_layers_1_MatMul(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_attn_encoder_attn_layers_1_MatMul */ Qnn_Param_t params__sentence_encoder_attn_encoder_attn_layers_1_MatMul[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="transpose_in0", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="transpose_in1", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}} }; const char* inputs__sentence_encoder_attn_encoder_attn_layers_1_MatMul[] = { "_sentence_encoder_attn_encoder_attn_layers_1_Div_output_0", "_sentence_encoder_attn_encoder_attn_layers_1_Reshape_1_output_0_converted_UFIXED_POINT_8" }; uint32_t dimensions__sentence_encoder_attn_encoder_attn_layers_1_MatMul_output_0[] = {1, 2, 129, 129}; Qnn_Tensor_t outputs__sentence_encoder_attn_encoder_attn_layers_1_MatMul[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_attn_layers_1_MatMul_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000396463256038f, .offset= -33195}}}, .rank= 4, .dimensions=dimensions__sentence_encoder_attn_encoder_attn_layers_1_MatMul_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_attn_encoder_attn_layers_1_MatMul", // Node Name "qti.aisw", // Package Name "MatMul", // Qnn Node Type params__sentence_encoder_attn_encoder_attn_layers_1_MatMul, // Node Params 2, // Num Node Params inputs__sentence_encoder_attn_encoder_attn_layers_1_MatMul, // Input Tensor Names 2, // Num Input Tensor Names outputs__sentence_encoder_attn_encoder_attn_layers_1_MatMul, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor__sentence_encoder_attn_encoder_attn_layers_1_Transpose_3_output_0_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions__sentence_encoder_attn_encoder_attn_layers_1_Transpose_3_output_0_nchw[] = {1, 1, 32, 257}; VALIDATE(model.addTensor("_sentence_encoder_attn_encoder_attn_layers_1_Transpose_3_output_0_nchw", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_attn_layers_1_Transpose_3_output_0_nchw", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0060451999306679f, .offset= -114}}}, .rank= 4, .dimensions=dimensions__sentence_encoder_attn_encoder_attn_layers_1_Transpose_3_output_0_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(_sentence_encoder_attn_encoder_attn_layers_1_Transpose_3_output_0_nchw), .dataSize=BINLEN(_sentence_encoder_attn_encoder_attn_layers_1_Transpose_3_output_0_nchw)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__sentence_encoder_attn_encoder_attn_layers_1_MatMul_1(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_attn_encoder_attn_layers_1_MatMul_1 */ Qnn_Param_t params__sentence_encoder_attn_encoder_attn_layers_1_MatMul_1[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="transpose_in0", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="transpose_in1", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}} }; const char* inputs__sentence_encoder_attn_encoder_attn_layers_1_MatMul_1[] = { "_sentence_encoder_attn_encoder_attn_layers_1_Div_output_0", "_sentence_encoder_attn_encoder_attn_layers_1_Transpose_3_output_0_nchw" }; uint32_t dimensions__sentence_encoder_attn_encoder_attn_layers_1_MatMul_1_output_0[] = {1, 2, 129, 257}; Qnn_Tensor_t outputs__sentence_encoder_attn_encoder_attn_layers_1_MatMul_1[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_attn_layers_1_MatMul_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000139439325721f, .offset= -30672}}}, .rank= 4, .dimensions=dimensions__sentence_encoder_attn_encoder_attn_layers_1_MatMul_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_attn_encoder_attn_layers_1_MatMul_1", // Node Name "qti.aisw", // Package Name "MatMul", // Qnn Node Type params__sentence_encoder_attn_encoder_attn_layers_1_MatMul_1, // Node Params 2, // Num Node Params inputs__sentence_encoder_attn_encoder_attn_layers_1_MatMul_1, // Input Tensor Names 2, // Num Input Tensor Names outputs__sentence_encoder_attn_encoder_attn_layers_1_MatMul_1, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_attn_encoder_attn_layers_1_MatMul_1_output_0_nhwc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_attn_encoder_attn_layers_1_MatMul_1_output_0_nhwc */ uint32_t dimensions__sentence_encoder_attn_encoder_attn_layers_1_MatMul_1_output_0_nhwc_perm[] = {4}; uint32_t _sentence_encoder_attn_encoder_attn_layers_1_MatMul_1_output_0_nhwc_perm[] = {0, 2, 3, 1}; Qnn_Param_t params__sentence_encoder_attn_encoder_attn_layers_1_MatMul_1_output_0_nhwc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_attn_layers_1_MatMul_1_output_0_nhwc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__sentence_encoder_attn_encoder_attn_layers_1_MatMul_1_output_0_nhwc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_attn_encoder_attn_layers_1_MatMul_1_output_0_nhwc_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__sentence_encoder_attn_encoder_attn_layers_1_MatMul_1_output_0_nhwc[] = { "_sentence_encoder_attn_encoder_attn_layers_1_MatMul_1_output_0" }; uint32_t dimensions__sentence_encoder_attn_encoder_attn_layers_1_MatMul_1_output_0_nhwc[] = {1, 129, 257, 2}; Qnn_Tensor_t outputs__sentence_encoder_attn_encoder_attn_layers_1_MatMul_1_output_0_nhwc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_attn_layers_1_MatMul_1_output_0_nhwc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000139439325721f, .offset= -30672}}}, .rank= 4, .dimensions=dimensions__sentence_encoder_attn_encoder_attn_layers_1_MatMul_1_output_0_nhwc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_attn_encoder_attn_layers_1_MatMul_1_output_0_nhwc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__sentence_encoder_attn_encoder_attn_layers_1_MatMul_1_output_0_nhwc, // Node Params 1, // Num Node Params inputs__sentence_encoder_attn_encoder_attn_layers_1_MatMul_1_output_0_nhwc, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_attn_encoder_attn_layers_1_MatMul_1_output_0_nhwc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_attn_encoder_attn_layers_1_Pad_1(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_attn_encoder_attn_layers_1_Pad_1 */ uint32_t dimensions__sentence_encoder_attn_encoder_attn_layers_1_Pad_1_pad_amount[] = {4, 2}; uint32_t _sentence_encoder_attn_encoder_attn_layers_1_Pad_1_pad_amount[] = {0, 0, 0, 0, 0, 1, 0, 0}; Qnn_Param_t params__sentence_encoder_attn_encoder_attn_layers_1_Pad_1[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_attn_layers_1_Pad_1_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__sentence_encoder_attn_encoder_attn_layers_1_Pad_1_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_attn_encoder_attn_layers_1_Pad_1_pad_amount, .dataSize=32}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="pad_constant_value", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_FLOAT_32, {.floatValue = 0.000000000000f}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="scheme", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}} }; const char* inputs__sentence_encoder_attn_encoder_attn_layers_1_Pad_1[] = { "_sentence_encoder_attn_encoder_attn_layers_1_MatMul_1_output_0_nhwc" }; uint32_t dimensions__sentence_encoder_attn_encoder_attn_layers_1_Pad_1_output_0[] = {1, 129, 258, 2}; Qnn_Tensor_t outputs__sentence_encoder_attn_encoder_attn_layers_1_Pad_1[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_attn_layers_1_Pad_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000139439325721f, .offset= -30672}}}, .rank= 4, .dimensions=dimensions__sentence_encoder_attn_encoder_attn_layers_1_Pad_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_attn_encoder_attn_layers_1_Pad_1", // Node Name "qti.aisw", // Package Name "Pad", // Qnn Node Type params__sentence_encoder_attn_encoder_attn_layers_1_Pad_1, // Node Params 3, // Num Node Params inputs__sentence_encoder_attn_encoder_attn_layers_1_Pad_1, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_attn_encoder_attn_layers_1_Pad_1, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_attn_encoder_attn_layers_1_Pad_1_output_0_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_attn_encoder_attn_layers_1_Pad_1_output_0_nchw */ uint32_t dimensions__sentence_encoder_attn_encoder_attn_layers_1_Pad_1_output_0_nchw_perm[] = {4}; uint32_t _sentence_encoder_attn_encoder_attn_layers_1_Pad_1_output_0_nchw_perm[] = {0, 3, 1, 2}; Qnn_Param_t params__sentence_encoder_attn_encoder_attn_layers_1_Pad_1_output_0_nchw[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_attn_layers_1_Pad_1_output_0_nchw_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__sentence_encoder_attn_encoder_attn_layers_1_Pad_1_output_0_nchw_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_attn_encoder_attn_layers_1_Pad_1_output_0_nchw_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__sentence_encoder_attn_encoder_attn_layers_1_Pad_1_output_0_nchw[] = { "_sentence_encoder_attn_encoder_attn_layers_1_Pad_1_output_0" }; uint32_t dimensions__sentence_encoder_attn_encoder_attn_layers_1_Pad_1_output_0_nchw[] = {1, 2, 129, 258}; Qnn_Tensor_t outputs__sentence_encoder_attn_encoder_attn_layers_1_Pad_1_output_0_nchw[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_attn_layers_1_Pad_1_output_0_nchw", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000139439325721f, .offset= -30672}}}, .rank= 4, .dimensions=dimensions__sentence_encoder_attn_encoder_attn_layers_1_Pad_1_output_0_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_attn_encoder_attn_layers_1_Pad_1_output_0_nchw", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__sentence_encoder_attn_encoder_attn_layers_1_Pad_1_output_0_nchw, // Node Params 1, // Num Node Params inputs__sentence_encoder_attn_encoder_attn_layers_1_Pad_1_output_0_nchw, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_attn_encoder_attn_layers_1_Pad_1_output_0_nchw, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_attn_encoder_attn_layers_1_Reshape_7(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_attn_encoder_attn_layers_1_Reshape_7 */ const char* inputs__sentence_encoder_attn_encoder_attn_layers_1_Reshape_7[] = { "_sentence_encoder_attn_encoder_attn_layers_1_Pad_1_output_0_nchw" }; uint32_t dimensions__sentence_encoder_attn_encoder_attn_layers_1_Reshape_7_output_0[] = {1, 2, 33282}; Qnn_Tensor_t outputs__sentence_encoder_attn_encoder_attn_layers_1_Reshape_7[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_attn_layers_1_Reshape_7_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000139439325721f, .offset= -30672}}}, .rank= 3, .dimensions=dimensions__sentence_encoder_attn_encoder_attn_layers_1_Reshape_7_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_attn_encoder_attn_layers_1_Reshape_7", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__sentence_encoder_attn_encoder_attn_layers_1_Reshape_7, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_attn_encoder_attn_layers_1_Reshape_7, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_attn_encoder_attn_layers_1_Reshape_7_output_0_nfc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_attn_encoder_attn_layers_1_Reshape_7_output_0_nfc */ uint32_t dimensions__sentence_encoder_attn_encoder_attn_layers_1_Reshape_7_output_0_nfc_perm[] = {3}; uint32_t _sentence_encoder_attn_encoder_attn_layers_1_Reshape_7_output_0_nfc_perm[] = {0, 2, 1}; Qnn_Param_t params__sentence_encoder_attn_encoder_attn_layers_1_Reshape_7_output_0_nfc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_attn_layers_1_Reshape_7_output_0_nfc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__sentence_encoder_attn_encoder_attn_layers_1_Reshape_7_output_0_nfc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_attn_encoder_attn_layers_1_Reshape_7_output_0_nfc_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__sentence_encoder_attn_encoder_attn_layers_1_Reshape_7_output_0_nfc[] = { "_sentence_encoder_attn_encoder_attn_layers_1_Reshape_7_output_0" }; uint32_t dimensions__sentence_encoder_attn_encoder_attn_layers_1_Reshape_7_output_0_nfc[] = {1, 33282, 2}; Qnn_Tensor_t outputs__sentence_encoder_attn_encoder_attn_layers_1_Reshape_7_output_0_nfc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_attn_layers_1_Reshape_7_output_0_nfc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000139439325721f, .offset= -30672}}}, .rank= 3, .dimensions=dimensions__sentence_encoder_attn_encoder_attn_layers_1_Reshape_7_output_0_nfc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_attn_encoder_attn_layers_1_Reshape_7_output_0_nfc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__sentence_encoder_attn_encoder_attn_layers_1_Reshape_7_output_0_nfc, // Node Params 1, // Num Node Params inputs__sentence_encoder_attn_encoder_attn_layers_1_Reshape_7_output_0_nfc, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_attn_encoder_attn_layers_1_Reshape_7_output_0_nfc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_attn_encoder_attn_layers_1_Pad_2(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_attn_encoder_attn_layers_1_Pad_2 */ uint32_t dimensions__sentence_encoder_attn_encoder_attn_layers_1_Pad_2_pad_amount[] = {3, 2}; uint32_t _sentence_encoder_attn_encoder_attn_layers_1_Pad_2_pad_amount[] = {0, 0, 0, 128, 0, 0}; Qnn_Param_t params__sentence_encoder_attn_encoder_attn_layers_1_Pad_2[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_attn_layers_1_Pad_2_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__sentence_encoder_attn_encoder_attn_layers_1_Pad_2_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_attn_encoder_attn_layers_1_Pad_2_pad_amount, .dataSize=24}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="pad_constant_value", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_FLOAT_32, {.floatValue = 0.000000000000f}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="scheme", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}} }; const char* inputs__sentence_encoder_attn_encoder_attn_layers_1_Pad_2[] = { "_sentence_encoder_attn_encoder_attn_layers_1_Reshape_7_output_0_nfc" }; uint32_t dimensions__sentence_encoder_attn_encoder_attn_layers_1_Pad_2_output_0[] = {1, 33410, 2}; Qnn_Tensor_t outputs__sentence_encoder_attn_encoder_attn_layers_1_Pad_2[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_attn_layers_1_Pad_2_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000139439325721f, .offset= -30672}}}, .rank= 3, .dimensions=dimensions__sentence_encoder_attn_encoder_attn_layers_1_Pad_2_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_attn_encoder_attn_layers_1_Pad_2", // Node Name "qti.aisw", // Package Name "Pad", // Qnn Node Type params__sentence_encoder_attn_encoder_attn_layers_1_Pad_2, // Node Params 3, // Num Node Params inputs__sentence_encoder_attn_encoder_attn_layers_1_Pad_2, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_attn_encoder_attn_layers_1_Pad_2, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_attn_encoder_attn_layers_1_Pad_2_output_0_ncf(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_attn_encoder_attn_layers_1_Pad_2_output_0_ncf */ uint32_t dimensions__sentence_encoder_attn_encoder_attn_layers_1_Pad_2_output_0_ncf_perm[] = {3}; uint32_t _sentence_encoder_attn_encoder_attn_layers_1_Pad_2_output_0_ncf_perm[] = {0, 2, 1}; Qnn_Param_t params__sentence_encoder_attn_encoder_attn_layers_1_Pad_2_output_0_ncf[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_attn_layers_1_Pad_2_output_0_ncf_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__sentence_encoder_attn_encoder_attn_layers_1_Pad_2_output_0_ncf_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_attn_encoder_attn_layers_1_Pad_2_output_0_ncf_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__sentence_encoder_attn_encoder_attn_layers_1_Pad_2_output_0_ncf[] = { "_sentence_encoder_attn_encoder_attn_layers_1_Pad_2_output_0" }; uint32_t dimensions__sentence_encoder_attn_encoder_attn_layers_1_Pad_2_output_0_ncf[] = {1, 2, 33410}; Qnn_Tensor_t outputs__sentence_encoder_attn_encoder_attn_layers_1_Pad_2_output_0_ncf[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_attn_layers_1_Pad_2_output_0_ncf", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000139439325721f, .offset= -30672}}}, .rank= 3, .dimensions=dimensions__sentence_encoder_attn_encoder_attn_layers_1_Pad_2_output_0_ncf, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_attn_encoder_attn_layers_1_Pad_2_output_0_ncf", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__sentence_encoder_attn_encoder_attn_layers_1_Pad_2_output_0_ncf, // Node Params 1, // Num Node Params inputs__sentence_encoder_attn_encoder_attn_layers_1_Pad_2_output_0_ncf, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_attn_encoder_attn_layers_1_Pad_2_output_0_ncf, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_attn_encoder_attn_layers_1_Reshape_10(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_attn_encoder_attn_layers_1_Reshape_10 */ const char* inputs__sentence_encoder_attn_encoder_attn_layers_1_Reshape_10[] = { "_sentence_encoder_attn_encoder_attn_layers_1_Pad_2_output_0_ncf" }; uint32_t dimensions__sentence_encoder_attn_encoder_attn_layers_1_Reshape_10_output_0[] = {1, 2, 130, 257}; Qnn_Tensor_t outputs__sentence_encoder_attn_encoder_attn_layers_1_Reshape_10[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_attn_layers_1_Reshape_10_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000139439325721f, .offset= -30672}}}, .rank= 4, .dimensions=dimensions__sentence_encoder_attn_encoder_attn_layers_1_Reshape_10_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_attn_encoder_attn_layers_1_Reshape_10", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__sentence_encoder_attn_encoder_attn_layers_1_Reshape_10, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_attn_encoder_attn_layers_1_Reshape_10, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode_Slice_1(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR Slice_1 */ uint32_t dimensions_Slice_1_ranges[] = {4, 3}; int32_t Slice_1_ranges[] = {0, 1, 1, 0, 2, 1, 0, 129, 1, 128, 257, 1}; Qnn_Param_t params_Slice_1[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="ranges", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "Slice_1_ranges", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_INT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions_Slice_1_ranges, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)Slice_1_ranges, .dataSize=48}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="begin_mask", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="end_mask", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="new_axes_mask", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="shrink_axes", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}} }; const char* inputs_Slice_1[] = { "_sentence_encoder_attn_encoder_attn_layers_1_Reshape_10_output_0" }; uint32_t dimensions__v_556[] = {1, 2, 129, 129}; Qnn_Tensor_t outputs_Slice_1[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_v_556", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000139439325721f, .offset= -30672}}}, .rank= 4, .dimensions=dimensions__v_556, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "Slice_1", // Node Name "qti.aisw", // Package Name "StridedSlice", // Qnn Node Type params_Slice_1, // Node Params 5, // Num Node Params inputs_Slice_1, // Input Tensor Names 1, // Num Input Tensor Names outputs_Slice_1, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_attn_encoder_attn_layers_1_Add_2(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_attn_encoder_attn_layers_1_Add_2 */ Qnn_Param_t params__sentence_encoder_attn_encoder_attn_layers_1_Add_2[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}} }; const char* inputs__sentence_encoder_attn_encoder_attn_layers_1_Add_2[] = { "_sentence_encoder_attn_encoder_attn_layers_1_MatMul_output_0", "_v_556" }; uint32_t dimensions__sentence_encoder_attn_encoder_attn_layers_1_Add_2_output_0[] = {1, 2, 129, 129}; Qnn_Tensor_t outputs__sentence_encoder_attn_encoder_attn_layers_1_Add_2[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_attn_layers_1_Add_2_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000444817960670f, .offset= -35015}}}, .rank= 4, .dimensions=dimensions__sentence_encoder_attn_encoder_attn_layers_1_Add_2_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_attn_encoder_attn_layers_1_Add_2", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__sentence_encoder_attn_encoder_attn_layers_1_Add_2, // Node Params 1, // Num Node Params inputs__sentence_encoder_attn_encoder_attn_layers_1_Add_2, // Input Tensor Names 2, // Num Input Tensor Names outputs__sentence_encoder_attn_encoder_attn_layers_1_Add_2, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_attn_encoder_attn_layers_1_Where(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_attn_encoder_attn_layers_1_Where */ const char* inputs__sentence_encoder_attn_encoder_attn_layers_1_Where[] = { "_sentence_encoder_attn_encoder_attn_layers_1_Cast_5_output_0", "_sentence_encoder_attn_encoder_attn_layers_0_Constant_88_output_0", "_sentence_encoder_attn_encoder_attn_layers_1_Add_2_output_0" }; uint32_t dimensions__sentence_encoder_attn_encoder_attn_layers_1_Where_output_0[] = {1, 2, 129, 129}; Qnn_Tensor_t outputs__sentence_encoder_attn_encoder_attn_layers_1_Where[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_attn_layers_1_Where_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.1526109278202057f, .offset= -65526}}}, .rank= 4, .dimensions=dimensions__sentence_encoder_attn_encoder_attn_layers_1_Where_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_attn_encoder_attn_layers_1_Where", // Node Name "qti.aisw", // Package Name "ElementWiseSelect", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__sentence_encoder_attn_encoder_attn_layers_1_Where, // Input Tensor Names 3, // Num Input Tensor Names outputs__sentence_encoder_attn_encoder_attn_layers_1_Where, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_attn_encoder_attn_layers_1_Softmax(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_attn_encoder_attn_layers_1_Softmax */ Qnn_Param_t params__sentence_encoder_attn_encoder_attn_layers_1_Softmax[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="axis", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 3}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="beta", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_FLOAT_32, {.floatValue = 1.000000000000f}}}} }; const char* inputs__sentence_encoder_attn_encoder_attn_layers_1_Softmax[] = { "_sentence_encoder_attn_encoder_attn_layers_1_Where_output_0" }; uint32_t dimensions__sentence_encoder_attn_encoder_attn_layers_1_Softmax_output_0[] = {1, 2, 129, 129}; Qnn_Tensor_t outputs__sentence_encoder_attn_encoder_attn_layers_1_Softmax[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_attn_layers_1_Softmax_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000152587890625f, .offset= 0}}}, .rank= 4, .dimensions=dimensions__sentence_encoder_attn_encoder_attn_layers_1_Softmax_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_attn_encoder_attn_layers_1_Softmax", // Node Name "qti.aisw", // Package Name "Softmax", // Qnn Node Type params__sentence_encoder_attn_encoder_attn_layers_1_Softmax, // Node Params 2, // Num Node Params inputs__sentence_encoder_attn_encoder_attn_layers_1_Softmax, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_attn_encoder_attn_layers_1_Softmax, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_attn_encoder_attn_layers_1_Softmax_output_0_nhwc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_attn_encoder_attn_layers_1_Softmax_output_0_nhwc */ uint32_t dimensions__sentence_encoder_attn_encoder_attn_layers_1_Softmax_output_0_nhwc_perm[] = {4}; uint32_t _sentence_encoder_attn_encoder_attn_layers_1_Softmax_output_0_nhwc_perm[] = {0, 2, 3, 1}; Qnn_Param_t params__sentence_encoder_attn_encoder_attn_layers_1_Softmax_output_0_nhwc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_attn_layers_1_Softmax_output_0_nhwc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__sentence_encoder_attn_encoder_attn_layers_1_Softmax_output_0_nhwc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_attn_encoder_attn_layers_1_Softmax_output_0_nhwc_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__sentence_encoder_attn_encoder_attn_layers_1_Softmax_output_0_nhwc[] = { "_sentence_encoder_attn_encoder_attn_layers_1_Softmax_output_0" }; uint32_t dimensions__sentence_encoder_attn_encoder_attn_layers_1_Softmax_output_0_nhwc[] = {1, 129, 129, 2}; Qnn_Tensor_t outputs__sentence_encoder_attn_encoder_attn_layers_1_Softmax_output_0_nhwc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_attn_layers_1_Softmax_output_0_nhwc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000152587890625f, .offset= 0}}}, .rank= 4, .dimensions=dimensions__sentence_encoder_attn_encoder_attn_layers_1_Softmax_output_0_nhwc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_attn_encoder_attn_layers_1_Softmax_output_0_nhwc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__sentence_encoder_attn_encoder_attn_layers_1_Softmax_output_0_nhwc, // Node Params 1, // Num Node Params inputs__sentence_encoder_attn_encoder_attn_layers_1_Softmax_output_0_nhwc, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_attn_encoder_attn_layers_1_Softmax_output_0_nhwc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_attn_encoder_attn_layers_1_MatMul_2(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_attn_encoder_attn_layers_1_MatMul_2 */ Qnn_Param_t params__sentence_encoder_attn_encoder_attn_layers_1_MatMul_2[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="transpose_in0", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="transpose_in1", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}} }; const char* inputs__sentence_encoder_attn_encoder_attn_layers_1_MatMul_2[] = { "_sentence_encoder_attn_encoder_attn_layers_1_Softmax_output_0", "_sentence_encoder_attn_encoder_attn_layers_1_Transpose_1_output_0_converted_UFIXED_POINT_8" }; uint32_t dimensions__sentence_encoder_attn_encoder_attn_layers_1_MatMul_2_output_0[] = {1, 2, 129, 32}; Qnn_Tensor_t outputs__sentence_encoder_attn_encoder_attn_layers_1_MatMul_2[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_attn_layers_1_MatMul_2_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000507057848154f, .offset= -35264}}}, .rank= 4, .dimensions=dimensions__sentence_encoder_attn_encoder_attn_layers_1_MatMul_2_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_attn_encoder_attn_layers_1_MatMul_2", // Node Name "qti.aisw", // Package Name "MatMul", // Qnn Node Type params__sentence_encoder_attn_encoder_attn_layers_1_MatMul_2, // Node Params 2, // Num Node Params inputs__sentence_encoder_attn_encoder_attn_layers_1_MatMul_2, // Input Tensor Names 2, // Num Input Tensor Names outputs__sentence_encoder_attn_encoder_attn_layers_1_MatMul_2, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_attn_encoder_attn_layers_1_Pad_3(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_attn_encoder_attn_layers_1_Pad_3 */ uint32_t dimensions__sentence_encoder_attn_encoder_attn_layers_1_Pad_3_pad_amount[] = {4, 2}; uint32_t _sentence_encoder_attn_encoder_attn_layers_1_Pad_3_pad_amount[] = {0, 0, 0, 0, 0, 128, 0, 0}; Qnn_Param_t params__sentence_encoder_attn_encoder_attn_layers_1_Pad_3[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_attn_layers_1_Pad_3_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__sentence_encoder_attn_encoder_attn_layers_1_Pad_3_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_attn_encoder_attn_layers_1_Pad_3_pad_amount, .dataSize=32}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="pad_constant_value", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_FLOAT_32, {.floatValue = 0.000000000000f}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="scheme", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}} }; const char* inputs__sentence_encoder_attn_encoder_attn_layers_1_Pad_3[] = { "_sentence_encoder_attn_encoder_attn_layers_1_Softmax_output_0_nhwc" }; uint32_t dimensions__sentence_encoder_attn_encoder_attn_layers_1_Pad_3_output_0[] = {1, 129, 257, 2}; Qnn_Tensor_t outputs__sentence_encoder_attn_encoder_attn_layers_1_Pad_3[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_attn_layers_1_Pad_3_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000152587890625f, .offset= 0}}}, .rank= 4, .dimensions=dimensions__sentence_encoder_attn_encoder_attn_layers_1_Pad_3_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_attn_encoder_attn_layers_1_Pad_3", // Node Name "qti.aisw", // Package Name "Pad", // Qnn Node Type params__sentence_encoder_attn_encoder_attn_layers_1_Pad_3, // Node Params 3, // Num Node Params inputs__sentence_encoder_attn_encoder_attn_layers_1_Pad_3, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_attn_encoder_attn_layers_1_Pad_3, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_attn_encoder_attn_layers_1_Pad_3_output_0_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_attn_encoder_attn_layers_1_Pad_3_output_0_nchw */ uint32_t dimensions__sentence_encoder_attn_encoder_attn_layers_1_Pad_3_output_0_nchw_perm[] = {4}; uint32_t _sentence_encoder_attn_encoder_attn_layers_1_Pad_3_output_0_nchw_perm[] = {0, 3, 1, 2}; Qnn_Param_t params__sentence_encoder_attn_encoder_attn_layers_1_Pad_3_output_0_nchw[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_attn_layers_1_Pad_3_output_0_nchw_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__sentence_encoder_attn_encoder_attn_layers_1_Pad_3_output_0_nchw_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_attn_encoder_attn_layers_1_Pad_3_output_0_nchw_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__sentence_encoder_attn_encoder_attn_layers_1_Pad_3_output_0_nchw[] = { "_sentence_encoder_attn_encoder_attn_layers_1_Pad_3_output_0" }; uint32_t dimensions__sentence_encoder_attn_encoder_attn_layers_1_Pad_3_output_0_nchw[] = {1, 2, 129, 257}; Qnn_Tensor_t outputs__sentence_encoder_attn_encoder_attn_layers_1_Pad_3_output_0_nchw[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_attn_layers_1_Pad_3_output_0_nchw", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000152587890625f, .offset= 0}}}, .rank= 4, .dimensions=dimensions__sentence_encoder_attn_encoder_attn_layers_1_Pad_3_output_0_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_attn_encoder_attn_layers_1_Pad_3_output_0_nchw", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__sentence_encoder_attn_encoder_attn_layers_1_Pad_3_output_0_nchw, // Node Params 1, // Num Node Params inputs__sentence_encoder_attn_encoder_attn_layers_1_Pad_3_output_0_nchw, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_attn_encoder_attn_layers_1_Pad_3_output_0_nchw, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_attn_encoder_attn_layers_1_Reshape_13(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_attn_encoder_attn_layers_1_Reshape_13 */ const char* inputs__sentence_encoder_attn_encoder_attn_layers_1_Reshape_13[] = { "_sentence_encoder_attn_encoder_attn_layers_1_Pad_3_output_0_nchw" }; uint32_t dimensions__sentence_encoder_attn_encoder_attn_layers_1_Reshape_13_output_0[] = {1, 2, 33153}; Qnn_Tensor_t outputs__sentence_encoder_attn_encoder_attn_layers_1_Reshape_13[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_attn_layers_1_Reshape_13_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000152587890625f, .offset= 0}}}, .rank= 3, .dimensions=dimensions__sentence_encoder_attn_encoder_attn_layers_1_Reshape_13_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_attn_encoder_attn_layers_1_Reshape_13", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__sentence_encoder_attn_encoder_attn_layers_1_Reshape_13, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_attn_encoder_attn_layers_1_Reshape_13, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_attn_encoder_attn_layers_1_Reshape_13_output_0_nfc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_attn_encoder_attn_layers_1_Reshape_13_output_0_nfc */ uint32_t dimensions__sentence_encoder_attn_encoder_attn_layers_1_Reshape_13_output_0_nfc_perm[] = {3}; uint32_t _sentence_encoder_attn_encoder_attn_layers_1_Reshape_13_output_0_nfc_perm[] = {0, 2, 1}; Qnn_Param_t params__sentence_encoder_attn_encoder_attn_layers_1_Reshape_13_output_0_nfc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_attn_layers_1_Reshape_13_output_0_nfc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__sentence_encoder_attn_encoder_attn_layers_1_Reshape_13_output_0_nfc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_attn_encoder_attn_layers_1_Reshape_13_output_0_nfc_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__sentence_encoder_attn_encoder_attn_layers_1_Reshape_13_output_0_nfc[] = { "_sentence_encoder_attn_encoder_attn_layers_1_Reshape_13_output_0" }; uint32_t dimensions__sentence_encoder_attn_encoder_attn_layers_1_Reshape_13_output_0_nfc[] = {1, 33153, 2}; Qnn_Tensor_t outputs__sentence_encoder_attn_encoder_attn_layers_1_Reshape_13_output_0_nfc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_attn_layers_1_Reshape_13_output_0_nfc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000152587890625f, .offset= 0}}}, .rank= 3, .dimensions=dimensions__sentence_encoder_attn_encoder_attn_layers_1_Reshape_13_output_0_nfc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_attn_encoder_attn_layers_1_Reshape_13_output_0_nfc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__sentence_encoder_attn_encoder_attn_layers_1_Reshape_13_output_0_nfc, // Node Params 1, // Num Node Params inputs__sentence_encoder_attn_encoder_attn_layers_1_Reshape_13_output_0_nfc, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_attn_encoder_attn_layers_1_Reshape_13_output_0_nfc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_attn_encoder_attn_layers_1_Pad_4(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_attn_encoder_attn_layers_1_Pad_4 */ uint32_t dimensions__sentence_encoder_attn_encoder_attn_layers_1_Pad_4_pad_amount[] = {3, 2}; uint32_t _sentence_encoder_attn_encoder_attn_layers_1_Pad_4_pad_amount[] = {0, 0, 129, 0, 0, 0}; Qnn_Param_t params__sentence_encoder_attn_encoder_attn_layers_1_Pad_4[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_attn_layers_1_Pad_4_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__sentence_encoder_attn_encoder_attn_layers_1_Pad_4_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_attn_encoder_attn_layers_1_Pad_4_pad_amount, .dataSize=24}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="pad_constant_value", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_FLOAT_32, {.floatValue = 0.000000000000f}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="scheme", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}} }; const char* inputs__sentence_encoder_attn_encoder_attn_layers_1_Pad_4[] = { "_sentence_encoder_attn_encoder_attn_layers_1_Reshape_13_output_0_nfc" }; uint32_t dimensions__sentence_encoder_attn_encoder_attn_layers_1_Pad_4_output_0[] = {1, 33282, 2}; Qnn_Tensor_t outputs__sentence_encoder_attn_encoder_attn_layers_1_Pad_4[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_attn_layers_1_Pad_4_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000152587890625f, .offset= 0}}}, .rank= 3, .dimensions=dimensions__sentence_encoder_attn_encoder_attn_layers_1_Pad_4_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_attn_encoder_attn_layers_1_Pad_4", // Node Name "qti.aisw", // Package Name "Pad", // Qnn Node Type params__sentence_encoder_attn_encoder_attn_layers_1_Pad_4, // Node Params 3, // Num Node Params inputs__sentence_encoder_attn_encoder_attn_layers_1_Pad_4, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_attn_encoder_attn_layers_1_Pad_4, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_attn_encoder_attn_layers_1_Pad_4_output_0_ncf(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_attn_encoder_attn_layers_1_Pad_4_output_0_ncf */ uint32_t dimensions__sentence_encoder_attn_encoder_attn_layers_1_Pad_4_output_0_ncf_perm[] = {3}; uint32_t _sentence_encoder_attn_encoder_attn_layers_1_Pad_4_output_0_ncf_perm[] = {0, 2, 1}; Qnn_Param_t params__sentence_encoder_attn_encoder_attn_layers_1_Pad_4_output_0_ncf[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_attn_layers_1_Pad_4_output_0_ncf_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__sentence_encoder_attn_encoder_attn_layers_1_Pad_4_output_0_ncf_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_attn_encoder_attn_layers_1_Pad_4_output_0_ncf_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__sentence_encoder_attn_encoder_attn_layers_1_Pad_4_output_0_ncf[] = { "_sentence_encoder_attn_encoder_attn_layers_1_Pad_4_output_0" }; uint32_t dimensions__sentence_encoder_attn_encoder_attn_layers_1_Pad_4_output_0_ncf[] = {1, 2, 33282}; Qnn_Tensor_t outputs__sentence_encoder_attn_encoder_attn_layers_1_Pad_4_output_0_ncf[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_attn_layers_1_Pad_4_output_0_ncf", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000152587890625f, .offset= 0}}}, .rank= 3, .dimensions=dimensions__sentence_encoder_attn_encoder_attn_layers_1_Pad_4_output_0_ncf, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_attn_encoder_attn_layers_1_Pad_4_output_0_ncf", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__sentence_encoder_attn_encoder_attn_layers_1_Pad_4_output_0_ncf, // Node Params 1, // Num Node Params inputs__sentence_encoder_attn_encoder_attn_layers_1_Pad_4_output_0_ncf, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_attn_encoder_attn_layers_1_Pad_4_output_0_ncf, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_attn_encoder_attn_layers_1_Reshape_16(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_attn_encoder_attn_layers_1_Reshape_16 */ const char* inputs__sentence_encoder_attn_encoder_attn_layers_1_Reshape_16[] = { "_sentence_encoder_attn_encoder_attn_layers_1_Pad_4_output_0_ncf" }; uint32_t dimensions__sentence_encoder_attn_encoder_attn_layers_1_Reshape_16_output_0[] = {1, 2, 129, 258}; Qnn_Tensor_t outputs__sentence_encoder_attn_encoder_attn_layers_1_Reshape_16[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_attn_layers_1_Reshape_16_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000152587890625f, .offset= 0}}}, .rank= 4, .dimensions=dimensions__sentence_encoder_attn_encoder_attn_layers_1_Reshape_16_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_attn_encoder_attn_layers_1_Reshape_16", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__sentence_encoder_attn_encoder_attn_layers_1_Reshape_16, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_attn_encoder_attn_layers_1_Reshape_16, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_attn_encoder_attn_layers_1_Slice_8(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_attn_encoder_attn_layers_1_Slice_8 */ uint32_t dimensions__sentence_encoder_attn_encoder_attn_layers_1_Slice_8_ranges[] = {4, 3}; int32_t _sentence_encoder_attn_encoder_attn_layers_1_Slice_8_ranges[] = {0, 1, 1, 0, 2, 1, 0, 129, 1, 1, 258, 1}; Qnn_Param_t params__sentence_encoder_attn_encoder_attn_layers_1_Slice_8[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="ranges", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_attn_layers_1_Slice_8_ranges", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_INT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__sentence_encoder_attn_encoder_attn_layers_1_Slice_8_ranges, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_attn_encoder_attn_layers_1_Slice_8_ranges, .dataSize=48}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="begin_mask", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="end_mask", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="new_axes_mask", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="shrink_axes", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}} }; const char* inputs__sentence_encoder_attn_encoder_attn_layers_1_Slice_8[] = { "_sentence_encoder_attn_encoder_attn_layers_1_Reshape_16_output_0" }; uint32_t dimensions__sentence_encoder_attn_encoder_attn_layers_1_Slice_8_output_0[] = {1, 2, 129, 257}; Qnn_Tensor_t outputs__sentence_encoder_attn_encoder_attn_layers_1_Slice_8[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_attn_layers_1_Slice_8_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000152587890625f, .offset= 0}}}, .rank= 4, .dimensions=dimensions__sentence_encoder_attn_encoder_attn_layers_1_Slice_8_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_attn_encoder_attn_layers_1_Slice_8", // Node Name "qti.aisw", // Package Name "StridedSlice", // Qnn Node Type params__sentence_encoder_attn_encoder_attn_layers_1_Slice_8, // Node Params 5, // Num Node Params inputs__sentence_encoder_attn_encoder_attn_layers_1_Slice_8, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_attn_encoder_attn_layers_1_Slice_8, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor__sentence_encoder_attn_encoder_attn_layers_1_Unsqueeze_41_output_0_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions__sentence_encoder_attn_encoder_attn_layers_1_Unsqueeze_41_output_0_nchw[] = {1, 1, 257, 32}; VALIDATE(model.addTensor("_sentence_encoder_attn_encoder_attn_layers_1_Unsqueeze_41_output_0_nchw", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_attn_layers_1_Unsqueeze_41_output_0_nchw", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0067470902577043f, .offset= -124}}}, .rank= 4, .dimensions=dimensions__sentence_encoder_attn_encoder_attn_layers_1_Unsqueeze_41_output_0_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(_sentence_encoder_attn_encoder_attn_layers_1_Unsqueeze_41_output_0_nchw), .dataSize=BINLEN(_sentence_encoder_attn_encoder_attn_layers_1_Unsqueeze_41_output_0_nchw)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__sentence_encoder_attn_encoder_attn_layers_1_MatMul_3(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_attn_encoder_attn_layers_1_MatMul_3 */ Qnn_Param_t params__sentence_encoder_attn_encoder_attn_layers_1_MatMul_3[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="transpose_in0", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="transpose_in1", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}} }; const char* inputs__sentence_encoder_attn_encoder_attn_layers_1_MatMul_3[] = { "_sentence_encoder_attn_encoder_attn_layers_1_Slice_8_output_0", "_sentence_encoder_attn_encoder_attn_layers_1_Unsqueeze_41_output_0_nchw" }; uint32_t dimensions__sentence_encoder_attn_encoder_attn_layers_1_MatMul_3_output_0[] = {1, 2, 129, 32}; Qnn_Tensor_t outputs__sentence_encoder_attn_encoder_attn_layers_1_MatMul_3[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_attn_layers_1_MatMul_3_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000025384001674f, .offset= -30719}}}, .rank= 4, .dimensions=dimensions__sentence_encoder_attn_encoder_attn_layers_1_MatMul_3_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_attn_encoder_attn_layers_1_MatMul_3", // Node Name "qti.aisw", // Package Name "MatMul", // Qnn Node Type params__sentence_encoder_attn_encoder_attn_layers_1_MatMul_3, // Node Params 2, // Num Node Params inputs__sentence_encoder_attn_encoder_attn_layers_1_MatMul_3, // Input Tensor Names 2, // Num Input Tensor Names outputs__sentence_encoder_attn_encoder_attn_layers_1_MatMul_3, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_attn_encoder_attn_layers_1_Add_4(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_attn_encoder_attn_layers_1_Add_4 */ Qnn_Param_t params__sentence_encoder_attn_encoder_attn_layers_1_Add_4[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}} }; const char* inputs__sentence_encoder_attn_encoder_attn_layers_1_Add_4[] = { "_sentence_encoder_attn_encoder_attn_layers_1_MatMul_2_output_0", "_sentence_encoder_attn_encoder_attn_layers_1_MatMul_3_output_0" }; uint32_t dimensions__sentence_encoder_attn_encoder_attn_layers_1_Add_4_output_0[] = {1, 2, 129, 32}; Qnn_Tensor_t outputs__sentence_encoder_attn_encoder_attn_layers_1_Add_4[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_attn_layers_1_Add_4_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000524930437678f, .offset= -34982}}}, .rank= 4, .dimensions=dimensions__sentence_encoder_attn_encoder_attn_layers_1_Add_4_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_attn_encoder_attn_layers_1_Add_4", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__sentence_encoder_attn_encoder_attn_layers_1_Add_4, // Node Params 1, // Num Node Params inputs__sentence_encoder_attn_encoder_attn_layers_1_Add_4, // Input Tensor Names 2, // Num Input Tensor Names outputs__sentence_encoder_attn_encoder_attn_layers_1_Add_4, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_attn_encoder_attn_layers_1_Transpose_9(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_attn_encoder_attn_layers_1_Transpose_9 */ uint32_t dimensions__sentence_encoder_attn_encoder_attn_layers_1_Transpose_9_perm[] = {4}; uint32_t _sentence_encoder_attn_encoder_attn_layers_1_Transpose_9_perm[] = {0, 1, 3, 2}; Qnn_Param_t params__sentence_encoder_attn_encoder_attn_layers_1_Transpose_9[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_attn_layers_1_Transpose_9_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__sentence_encoder_attn_encoder_attn_layers_1_Transpose_9_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_attn_encoder_attn_layers_1_Transpose_9_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__sentence_encoder_attn_encoder_attn_layers_1_Transpose_9[] = { "_sentence_encoder_attn_encoder_attn_layers_1_Add_4_output_0" }; uint32_t dimensions__sentence_encoder_attn_encoder_attn_layers_1_Transpose_9_output_0[] = {1, 2, 32, 129}; Qnn_Tensor_t outputs__sentence_encoder_attn_encoder_attn_layers_1_Transpose_9[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_attn_layers_1_Transpose_9_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000524930437678f, .offset= -34982}}}, .rank= 4, .dimensions=dimensions__sentence_encoder_attn_encoder_attn_layers_1_Transpose_9_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_attn_encoder_attn_layers_1_Transpose_9", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__sentence_encoder_attn_encoder_attn_layers_1_Transpose_9, // Node Params 1, // Num Node Params inputs__sentence_encoder_attn_encoder_attn_layers_1_Transpose_9, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_attn_encoder_attn_layers_1_Transpose_9, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_attn_encoder_attn_layers_1_Reshape_19(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_attn_encoder_attn_layers_1_Reshape_19 */ const char* inputs__sentence_encoder_attn_encoder_attn_layers_1_Reshape_19[] = { "_sentence_encoder_attn_encoder_attn_layers_1_Transpose_9_output_0" }; uint32_t dimensions__sentence_encoder_attn_encoder_attn_layers_1_conv_o_Conv_reshape_to_2d[] = {1, 64, 1, 129}; Qnn_Tensor_t outputs__sentence_encoder_attn_encoder_attn_layers_1_Reshape_19[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_attn_layers_1_conv_o_Conv_reshape_to_2d", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000524930437678f, .offset= -34982}}}, .rank= 4, .dimensions=dimensions__sentence_encoder_attn_encoder_attn_layers_1_conv_o_Conv_reshape_to_2d, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_attn_encoder_attn_layers_1_Reshape_19", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__sentence_encoder_attn_encoder_attn_layers_1_Reshape_19, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_attn_encoder_attn_layers_1_Reshape_19, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_attn_encoder_attn_layers_1_conv_o_Conv_reshape_to_2d_nhwc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_attn_encoder_attn_layers_1_conv_o_Conv_reshape_to_2d_nhwc */ uint32_t dimensions__sentence_encoder_attn_encoder_attn_layers_1_conv_o_Conv_reshape_to_2d_nhwc_perm[] = {4}; uint32_t _sentence_encoder_attn_encoder_attn_layers_1_conv_o_Conv_reshape_to_2d_nhwc_perm[] = {0, 2, 3, 1}; Qnn_Param_t params__sentence_encoder_attn_encoder_attn_layers_1_conv_o_Conv_reshape_to_2d_nhwc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_attn_layers_1_conv_o_Conv_reshape_to_2d_nhwc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__sentence_encoder_attn_encoder_attn_layers_1_conv_o_Conv_reshape_to_2d_nhwc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_attn_encoder_attn_layers_1_conv_o_Conv_reshape_to_2d_nhwc_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__sentence_encoder_attn_encoder_attn_layers_1_conv_o_Conv_reshape_to_2d_nhwc[] = { "_sentence_encoder_attn_encoder_attn_layers_1_conv_o_Conv_reshape_to_2d" }; uint32_t dimensions__sentence_encoder_attn_encoder_attn_layers_1_conv_o_Conv_reshape_to_2d_nhwc[] = {1, 1, 129, 64}; Qnn_Tensor_t outputs__sentence_encoder_attn_encoder_attn_layers_1_conv_o_Conv_reshape_to_2d_nhwc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_attn_layers_1_conv_o_Conv_reshape_to_2d_nhwc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000524930437678f, .offset= -34982}}}, .rank= 4, .dimensions=dimensions__sentence_encoder_attn_encoder_attn_layers_1_conv_o_Conv_reshape_to_2d_nhwc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_attn_encoder_attn_layers_1_conv_o_Conv_reshape_to_2d_nhwc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__sentence_encoder_attn_encoder_attn_layers_1_conv_o_Conv_reshape_to_2d_nhwc, // Node Params 1, // Num Node Params inputs__sentence_encoder_attn_encoder_attn_layers_1_conv_o_Conv_reshape_to_2d_nhwc, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_attn_encoder_attn_layers_1_conv_o_Conv_reshape_to_2d_nhwc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_dp_sentence_encoder_attn_encoder_attn_layers_1_conv_o_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_dp_sentence_encoder_attn_encoder_attn_layers_1_conv_o_weight[] = {1, 1, 64, 64}; VALIDATE(model.addTensor("tts_dp_sentence_encoder_attn_encoder_attn_layers_1_conv_o_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_dp_sentence_encoder_attn_encoder_attn_layers_1_conv_o_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0032551754266024f, .offset= -130}}}, .rank= 4, .dimensions=dimensions_tts_dp_sentence_encoder_attn_encoder_attn_layers_1_conv_o_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_dp_sentence_encoder_attn_encoder_attn_layers_1_conv_o_weight), .dataSize=BINLEN(tts_dp_sentence_encoder_attn_encoder_attn_layers_1_conv_o_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_dp_sentence_encoder_attn_encoder_attn_layers_1_conv_o_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_dp_sentence_encoder_attn_encoder_attn_layers_1_conv_o_bias[] = {64}; VALIDATE(model.addTensor("tts_dp_sentence_encoder_attn_encoder_attn_layers_1_conv_o_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_dp_sentence_encoder_attn_encoder_attn_layers_1_conv_o_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0013928471598774f, .offset= -101}}}, .rank= 1, .dimensions=dimensions_tts_dp_sentence_encoder_attn_encoder_attn_layers_1_conv_o_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_dp_sentence_encoder_attn_encoder_attn_layers_1_conv_o_bias), .dataSize=BINLEN(tts_dp_sentence_encoder_attn_encoder_attn_layers_1_conv_o_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__sentence_encoder_attn_encoder_attn_layers_1_conv_o_Conv_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_attn_encoder_attn_layers_1_conv_o_Conv_2d */ uint32_t dimensions__sentence_encoder_attn_encoder_attn_layers_1_conv_o_Conv_2d_dilation[] = {2}; uint32_t _sentence_encoder_attn_encoder_attn_layers_1_conv_o_Conv_2d_dilation[] = {1, 1}; uint32_t dimensions__sentence_encoder_attn_encoder_attn_layers_1_conv_o_Conv_2d_pad_amount[] = {2, 2}; uint32_t _sentence_encoder_attn_encoder_attn_layers_1_conv_o_Conv_2d_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions__sentence_encoder_attn_encoder_attn_layers_1_conv_o_Conv_2d_stride[] = {2}; uint32_t _sentence_encoder_attn_encoder_attn_layers_1_conv_o_Conv_2d_stride[] = {1, 1}; Qnn_Param_t params__sentence_encoder_attn_encoder_attn_layers_1_conv_o_Conv_2d[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_attn_layers_1_conv_o_Conv_2d_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__sentence_encoder_attn_encoder_attn_layers_1_conv_o_Conv_2d_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_attn_encoder_attn_layers_1_conv_o_Conv_2d_dilation, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_attn_layers_1_conv_o_Conv_2d_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__sentence_encoder_attn_encoder_attn_layers_1_conv_o_Conv_2d_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_attn_encoder_attn_layers_1_conv_o_Conv_2d_pad_amount, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_attn_layers_1_conv_o_Conv_2d_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__sentence_encoder_attn_encoder_attn_layers_1_conv_o_Conv_2d_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_attn_encoder_attn_layers_1_conv_o_Conv_2d_stride, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="group", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="reuse_sparse_indices", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}} }; const char* inputs__sentence_encoder_attn_encoder_attn_layers_1_conv_o_Conv_2d[] = { "_sentence_encoder_attn_encoder_attn_layers_1_conv_o_Conv_reshape_to_2d_nhwc", "tts_dp_sentence_encoder_attn_encoder_attn_layers_1_conv_o_weight", "tts_dp_sentence_encoder_attn_encoder_attn_layers_1_conv_o_bias" }; uint32_t dimensions__sentence_encoder_attn_encoder_attn_layers_1_conv_o_Conv_intermediate[] = {1, 1, 129, 64}; Qnn_Tensor_t outputs__sentence_encoder_attn_encoder_attn_layers_1_conv_o_Conv_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_attn_layers_1_conv_o_Conv_intermediate", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000532419544470f, .offset= -31420}}}, .rank= 4, .dimensions=dimensions__sentence_encoder_attn_encoder_attn_layers_1_conv_o_Conv_intermediate, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_attn_encoder_attn_layers_1_conv_o_Conv_2d", // Node Name "qti.aisw", // Package Name "Conv2d", // Qnn Node Type params__sentence_encoder_attn_encoder_attn_layers_1_conv_o_Conv_2d, // Node Params 5, // Num Node Params inputs__sentence_encoder_attn_encoder_attn_layers_1_conv_o_Conv_2d, // Input Tensor Names 3, // Num Input Tensor Names outputs__sentence_encoder_attn_encoder_attn_layers_1_conv_o_Conv_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_attn_encoder_attn_layers_1_conv_o_Conv_intermediate_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_attn_encoder_attn_layers_1_conv_o_Conv_intermediate_nchw */ uint32_t dimensions__sentence_encoder_attn_encoder_attn_layers_1_conv_o_Conv_intermediate_nchw_perm[] = {4}; uint32_t _sentence_encoder_attn_encoder_attn_layers_1_conv_o_Conv_intermediate_nchw_perm[] = {0, 3, 1, 2}; Qnn_Param_t params__sentence_encoder_attn_encoder_attn_layers_1_conv_o_Conv_intermediate_nchw[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_attn_layers_1_conv_o_Conv_intermediate_nchw_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__sentence_encoder_attn_encoder_attn_layers_1_conv_o_Conv_intermediate_nchw_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_attn_encoder_attn_layers_1_conv_o_Conv_intermediate_nchw_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__sentence_encoder_attn_encoder_attn_layers_1_conv_o_Conv_intermediate_nchw[] = { "_sentence_encoder_attn_encoder_attn_layers_1_conv_o_Conv_intermediate" }; uint32_t dimensions__sentence_encoder_attn_encoder_attn_layers_1_conv_o_Conv_intermediate_nchw[] = {1, 64, 1, 129}; Qnn_Tensor_t outputs__sentence_encoder_attn_encoder_attn_layers_1_conv_o_Conv_intermediate_nchw[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_attn_layers_1_conv_o_Conv_intermediate_nchw", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000532419544470f, .offset= -31420}}}, .rank= 4, .dimensions=dimensions__sentence_encoder_attn_encoder_attn_layers_1_conv_o_Conv_intermediate_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_attn_encoder_attn_layers_1_conv_o_Conv_intermediate_nchw", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__sentence_encoder_attn_encoder_attn_layers_1_conv_o_Conv_intermediate_nchw, // Node Params 1, // Num Node Params inputs__sentence_encoder_attn_encoder_attn_layers_1_conv_o_Conv_intermediate_nchw, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_attn_encoder_attn_layers_1_conv_o_Conv_intermediate_nchw, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_attn_encoder_attn_layers_1_conv_o_Conv_intermediate(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_attn_encoder_attn_layers_1_conv_o_Conv_intermediate */ const char* inputs__sentence_encoder_attn_encoder_attn_layers_1_conv_o_Conv_intermediate[] = { "_sentence_encoder_attn_encoder_attn_layers_1_conv_o_Conv_intermediate_nchw" }; uint32_t dimensions__sentence_encoder_attn_encoder_attn_layers_1_conv_o_Conv_output_0[] = {1, 64, 129}; Qnn_Tensor_t outputs__sentence_encoder_attn_encoder_attn_layers_1_conv_o_Conv_intermediate[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_attn_layers_1_conv_o_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000532419544470f, .offset= -31420}}}, .rank= 3, .dimensions=dimensions__sentence_encoder_attn_encoder_attn_layers_1_conv_o_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_attn_encoder_attn_layers_1_conv_o_Conv_intermediate", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__sentence_encoder_attn_encoder_attn_layers_1_conv_o_Conv_intermediate, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_attn_encoder_attn_layers_1_conv_o_Conv_intermediate, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_attn_encoder_attn_layers_1_conv_o_Conv_output_0_nfc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_attn_encoder_attn_layers_1_conv_o_Conv_output_0_nfc */ uint32_t dimensions__sentence_encoder_attn_encoder_attn_layers_1_conv_o_Conv_output_0_nfc_perm[] = {3}; uint32_t _sentence_encoder_attn_encoder_attn_layers_1_conv_o_Conv_output_0_nfc_perm[] = {0, 2, 1}; Qnn_Param_t params__sentence_encoder_attn_encoder_attn_layers_1_conv_o_Conv_output_0_nfc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_attn_layers_1_conv_o_Conv_output_0_nfc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__sentence_encoder_attn_encoder_attn_layers_1_conv_o_Conv_output_0_nfc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_attn_encoder_attn_layers_1_conv_o_Conv_output_0_nfc_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__sentence_encoder_attn_encoder_attn_layers_1_conv_o_Conv_output_0_nfc[] = { "_sentence_encoder_attn_encoder_attn_layers_1_conv_o_Conv_output_0" }; uint32_t dimensions__sentence_encoder_attn_encoder_attn_layers_1_conv_o_Conv_output_0_nfc[] = {1, 129, 64}; Qnn_Tensor_t outputs__sentence_encoder_attn_encoder_attn_layers_1_conv_o_Conv_output_0_nfc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_attn_layers_1_conv_o_Conv_output_0_nfc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000532419544470f, .offset= -31420}}}, .rank= 3, .dimensions=dimensions__sentence_encoder_attn_encoder_attn_layers_1_conv_o_Conv_output_0_nfc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_attn_encoder_attn_layers_1_conv_o_Conv_output_0_nfc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__sentence_encoder_attn_encoder_attn_layers_1_conv_o_Conv_output_0_nfc, // Node Params 1, // Num Node Params inputs__sentence_encoder_attn_encoder_attn_layers_1_conv_o_Conv_output_0_nfc, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_attn_encoder_attn_layers_1_conv_o_Conv_output_0_nfc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_attn_encoder_Add_2(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_attn_encoder_Add_2 */ Qnn_Param_t params__sentence_encoder_attn_encoder_Add_2[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}} }; const char* inputs__sentence_encoder_attn_encoder_Add_2[] = { "_sentence_encoder_attn_encoder_norm_layers_2_0_Transpose_1_output_0", "_sentence_encoder_attn_encoder_attn_layers_1_conv_o_Conv_output_0_nfc" }; uint32_t dimensions__sentence_encoder_attn_encoder_norm_layers_1_1_Transpose_output_0[] = {1, 129, 64}; Qnn_Tensor_t outputs__sentence_encoder_attn_encoder_Add_2[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_norm_layers_1_1_Transpose_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000894729746506f, .offset= -29795}}}, .rank= 3, .dimensions=dimensions__sentence_encoder_attn_encoder_norm_layers_1_1_Transpose_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_attn_encoder_Add_2", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__sentence_encoder_attn_encoder_Add_2, // Node Params 1, // Num Node Params inputs__sentence_encoder_attn_encoder_Add_2, // Input Tensor Names 2, // Num Input Tensor Names outputs__sentence_encoder_attn_encoder_Add_2, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_dp_sentence_encoder_attn_encoder_norm_layers_1_1_norm_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_dp_sentence_encoder_attn_encoder_norm_layers_1_1_norm_weight[] = {64}; VALIDATE(model.addTensor("tts_dp_sentence_encoder_attn_encoder_norm_layers_1_1_norm_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_dp_sentence_encoder_attn_encoder_norm_layers_1_1_norm_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0030096331611276f, .offset= 0}}}, .rank= 1, .dimensions=dimensions_tts_dp_sentence_encoder_attn_encoder_norm_layers_1_1_norm_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_dp_sentence_encoder_attn_encoder_norm_layers_1_1_norm_weight), .dataSize=BINLEN(tts_dp_sentence_encoder_attn_encoder_norm_layers_1_1_norm_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_dp_sentence_encoder_attn_encoder_norm_layers_1_1_norm_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_dp_sentence_encoder_attn_encoder_norm_layers_1_1_norm_bias[] = {64}; VALIDATE(model.addTensor("tts_dp_sentence_encoder_attn_encoder_norm_layers_1_1_norm_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_dp_sentence_encoder_attn_encoder_norm_layers_1_1_norm_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0009174664737657f, .offset= -152}}}, .rank= 1, .dimensions=dimensions_tts_dp_sentence_encoder_attn_encoder_norm_layers_1_1_norm_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_dp_sentence_encoder_attn_encoder_norm_layers_1_1_norm_bias), .dataSize=BINLEN(tts_dp_sentence_encoder_attn_encoder_norm_layers_1_1_norm_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__sentence_encoder_attn_encoder_norm_layers_1_1_norm_LayerNormalization(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_attn_encoder_norm_layers_1_1_norm_LayerNormalization */ uint32_t dimensions__sentence_encoder_attn_encoder_norm_layers_1_1_norm_LayerNormalization_axes[] = {1}; uint32_t _sentence_encoder_attn_encoder_norm_layers_1_1_norm_LayerNormalization_axes[] = {2}; Qnn_Param_t params__sentence_encoder_attn_encoder_norm_layers_1_1_norm_LayerNormalization[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="axes", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_norm_layers_1_1_norm_LayerNormalization_axes", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__sentence_encoder_attn_encoder_norm_layers_1_1_norm_LayerNormalization_axes, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_attn_encoder_norm_layers_1_1_norm_LayerNormalization_axes, .dataSize=4}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="epsilon", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_FLOAT_32, {.floatValue = 0.000001000000f}}}} }; const char* inputs__sentence_encoder_attn_encoder_norm_layers_1_1_norm_LayerNormalization[] = { "_sentence_encoder_attn_encoder_norm_layers_1_1_Transpose_output_0", "tts_dp_sentence_encoder_attn_encoder_norm_layers_1_1_norm_weight", "tts_dp_sentence_encoder_attn_encoder_norm_layers_1_1_norm_bias" }; uint32_t dimensions__sentence_encoder_attn_encoder_norm_layers_1_1_Transpose_1_output_0[] = {1, 129, 64}; Qnn_Tensor_t outputs__sentence_encoder_attn_encoder_norm_layers_1_1_norm_LayerNormalization[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_norm_layers_1_1_Transpose_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000768217578297f, .offset= -28531}}}, .rank= 3, .dimensions=dimensions__sentence_encoder_attn_encoder_norm_layers_1_1_Transpose_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_attn_encoder_norm_layers_1_1_norm_LayerNormalization", // Node Name "qti.aisw", // Package Name "LayerNorm", // Qnn Node Type params__sentence_encoder_attn_encoder_norm_layers_1_1_norm_LayerNormalization, // Node Params 2, // Num Node Params inputs__sentence_encoder_attn_encoder_norm_layers_1_1_norm_LayerNormalization, // Input Tensor Names 3, // Num Input Tensor Names outputs__sentence_encoder_attn_encoder_norm_layers_1_1_norm_LayerNormalization, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_attn_encoder_ffn_layers_1_Mul(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_attn_encoder_ffn_layers_1_Mul */ Qnn_Param_t params__sentence_encoder_attn_encoder_ffn_layers_1_Mul[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__sentence_encoder_attn_encoder_ffn_layers_1_Mul[] = { "_sentence_encoder_attn_encoder_norm_layers_1_1_Transpose_1_output_0", "_sentence_encoder_Concat_2_output_0" }; uint32_t dimensions__sentence_encoder_attn_encoder_ffn_layers_1_Mul_output_0[] = {1, 129, 64}; Qnn_Tensor_t outputs__sentence_encoder_attn_encoder_ffn_layers_1_Mul[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_ffn_layers_1_Mul_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000768217578297f, .offset= -28531}}}, .rank= 3, .dimensions=dimensions__sentence_encoder_attn_encoder_ffn_layers_1_Mul_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_attn_encoder_ffn_layers_1_Mul", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__sentence_encoder_attn_encoder_ffn_layers_1_Mul, // Node Params 1, // Num Node Params inputs__sentence_encoder_attn_encoder_ffn_layers_1_Mul, // Input Tensor Names 2, // Num Input Tensor Names outputs__sentence_encoder_attn_encoder_ffn_layers_1_Mul, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_attn_encoder_ffn_layers_1_Mul_output_0_ncf(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_attn_encoder_ffn_layers_1_Mul_output_0_ncf */ uint32_t dimensions__sentence_encoder_attn_encoder_ffn_layers_1_Mul_output_0_ncf_perm[] = {3}; uint32_t _sentence_encoder_attn_encoder_ffn_layers_1_Mul_output_0_ncf_perm[] = {0, 2, 1}; Qnn_Param_t params__sentence_encoder_attn_encoder_ffn_layers_1_Mul_output_0_ncf[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_ffn_layers_1_Mul_output_0_ncf_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__sentence_encoder_attn_encoder_ffn_layers_1_Mul_output_0_ncf_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_attn_encoder_ffn_layers_1_Mul_output_0_ncf_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__sentence_encoder_attn_encoder_ffn_layers_1_Mul_output_0_ncf[] = { "_sentence_encoder_attn_encoder_ffn_layers_1_Mul_output_0" }; uint32_t dimensions__sentence_encoder_attn_encoder_ffn_layers_1_Mul_output_0_ncf[] = {1, 64, 129}; Qnn_Tensor_t outputs__sentence_encoder_attn_encoder_ffn_layers_1_Mul_output_0_ncf[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_ffn_layers_1_Mul_output_0_ncf", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000768217578297f, .offset= -28531}}}, .rank= 3, .dimensions=dimensions__sentence_encoder_attn_encoder_ffn_layers_1_Mul_output_0_ncf, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_attn_encoder_ffn_layers_1_Mul_output_0_ncf", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__sentence_encoder_attn_encoder_ffn_layers_1_Mul_output_0_ncf, // Node Params 1, // Num Node Params inputs__sentence_encoder_attn_encoder_ffn_layers_1_Mul_output_0_ncf, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_attn_encoder_ffn_layers_1_Mul_output_0_ncf, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_attn_encoder_ffn_layers_1_conv_1_Conv_reshape_to_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_attn_encoder_ffn_layers_1_conv_1_Conv_reshape_to_2d */ const char* inputs__sentence_encoder_attn_encoder_ffn_layers_1_conv_1_Conv_reshape_to_2d[] = { "_sentence_encoder_attn_encoder_ffn_layers_1_Mul_output_0_ncf" }; uint32_t dimensions__sentence_encoder_attn_encoder_ffn_layers_1_conv_1_Conv_reshape_to_2d[] = {1, 64, 1, 129}; Qnn_Tensor_t outputs__sentence_encoder_attn_encoder_ffn_layers_1_conv_1_Conv_reshape_to_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_ffn_layers_1_conv_1_Conv_reshape_to_2d", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000768217578297f, .offset= -28531}}}, .rank= 4, .dimensions=dimensions__sentence_encoder_attn_encoder_ffn_layers_1_conv_1_Conv_reshape_to_2d, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_attn_encoder_ffn_layers_1_conv_1_Conv_reshape_to_2d", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__sentence_encoder_attn_encoder_ffn_layers_1_conv_1_Conv_reshape_to_2d, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_attn_encoder_ffn_layers_1_conv_1_Conv_reshape_to_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_attn_encoder_ffn_layers_1_conv_1_Conv_reshape_to_2d_nhwc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_attn_encoder_ffn_layers_1_conv_1_Conv_reshape_to_2d_nhwc */ uint32_t dimensions__sentence_encoder_attn_encoder_ffn_layers_1_conv_1_Conv_reshape_to_2d_nhwc_perm[] = {4}; uint32_t _sentence_encoder_attn_encoder_ffn_layers_1_conv_1_Conv_reshape_to_2d_nhwc_perm[] = {0, 2, 3, 1}; Qnn_Param_t params__sentence_encoder_attn_encoder_ffn_layers_1_conv_1_Conv_reshape_to_2d_nhwc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_ffn_layers_1_conv_1_Conv_reshape_to_2d_nhwc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__sentence_encoder_attn_encoder_ffn_layers_1_conv_1_Conv_reshape_to_2d_nhwc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_attn_encoder_ffn_layers_1_conv_1_Conv_reshape_to_2d_nhwc_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__sentence_encoder_attn_encoder_ffn_layers_1_conv_1_Conv_reshape_to_2d_nhwc[] = { "_sentence_encoder_attn_encoder_ffn_layers_1_conv_1_Conv_reshape_to_2d" }; uint32_t dimensions__sentence_encoder_attn_encoder_ffn_layers_1_conv_1_Conv_reshape_to_2d_nhwc[] = {1, 1, 129, 64}; Qnn_Tensor_t outputs__sentence_encoder_attn_encoder_ffn_layers_1_conv_1_Conv_reshape_to_2d_nhwc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_ffn_layers_1_conv_1_Conv_reshape_to_2d_nhwc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000768217578297f, .offset= -28531}}}, .rank= 4, .dimensions=dimensions__sentence_encoder_attn_encoder_ffn_layers_1_conv_1_Conv_reshape_to_2d_nhwc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_attn_encoder_ffn_layers_1_conv_1_Conv_reshape_to_2d_nhwc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__sentence_encoder_attn_encoder_ffn_layers_1_conv_1_Conv_reshape_to_2d_nhwc, // Node Params 1, // Num Node Params inputs__sentence_encoder_attn_encoder_ffn_layers_1_conv_1_Conv_reshape_to_2d_nhwc, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_attn_encoder_ffn_layers_1_conv_1_Conv_reshape_to_2d_nhwc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_dp_sentence_encoder_attn_encoder_ffn_layers_1_conv_1_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_dp_sentence_encoder_attn_encoder_ffn_layers_1_conv_1_weight[] = {1, 1, 64, 256}; VALIDATE(model.addTensor("tts_dp_sentence_encoder_attn_encoder_ffn_layers_1_conv_1_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_dp_sentence_encoder_attn_encoder_ffn_layers_1_conv_1_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0044525791890919f, .offset= -139}}}, .rank= 4, .dimensions=dimensions_tts_dp_sentence_encoder_attn_encoder_ffn_layers_1_conv_1_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_dp_sentence_encoder_attn_encoder_ffn_layers_1_conv_1_weight), .dataSize=BINLEN(tts_dp_sentence_encoder_attn_encoder_ffn_layers_1_conv_1_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_dp_sentence_encoder_attn_encoder_ffn_layers_1_conv_1_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_dp_sentence_encoder_attn_encoder_ffn_layers_1_conv_1_bias[] = {256}; VALIDATE(model.addTensor("tts_dp_sentence_encoder_attn_encoder_ffn_layers_1_conv_1_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_dp_sentence_encoder_attn_encoder_ffn_layers_1_conv_1_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0008892201585695f, .offset= -198}}}, .rank= 1, .dimensions=dimensions_tts_dp_sentence_encoder_attn_encoder_ffn_layers_1_conv_1_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_dp_sentence_encoder_attn_encoder_ffn_layers_1_conv_1_bias), .dataSize=BINLEN(tts_dp_sentence_encoder_attn_encoder_ffn_layers_1_conv_1_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__sentence_encoder_attn_encoder_ffn_layers_1_conv_1_Conv_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_attn_encoder_ffn_layers_1_conv_1_Conv_2d */ uint32_t dimensions__sentence_encoder_attn_encoder_ffn_layers_1_conv_1_Conv_2d_dilation[] = {2}; uint32_t _sentence_encoder_attn_encoder_ffn_layers_1_conv_1_Conv_2d_dilation[] = {1, 1}; uint32_t dimensions__sentence_encoder_attn_encoder_ffn_layers_1_conv_1_Conv_2d_pad_amount[] = {2, 2}; uint32_t _sentence_encoder_attn_encoder_ffn_layers_1_conv_1_Conv_2d_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions__sentence_encoder_attn_encoder_ffn_layers_1_conv_1_Conv_2d_stride[] = {2}; uint32_t _sentence_encoder_attn_encoder_ffn_layers_1_conv_1_Conv_2d_stride[] = {1, 1}; Qnn_Param_t params__sentence_encoder_attn_encoder_ffn_layers_1_conv_1_Conv_2d[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_ffn_layers_1_conv_1_Conv_2d_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__sentence_encoder_attn_encoder_ffn_layers_1_conv_1_Conv_2d_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_attn_encoder_ffn_layers_1_conv_1_Conv_2d_dilation, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_ffn_layers_1_conv_1_Conv_2d_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__sentence_encoder_attn_encoder_ffn_layers_1_conv_1_Conv_2d_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_attn_encoder_ffn_layers_1_conv_1_Conv_2d_pad_amount, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_ffn_layers_1_conv_1_Conv_2d_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__sentence_encoder_attn_encoder_ffn_layers_1_conv_1_Conv_2d_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_attn_encoder_ffn_layers_1_conv_1_Conv_2d_stride, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="group", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="reuse_sparse_indices", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}} }; const char* inputs__sentence_encoder_attn_encoder_ffn_layers_1_conv_1_Conv_2d[] = { "_sentence_encoder_attn_encoder_ffn_layers_1_conv_1_Conv_reshape_to_2d_nhwc", "tts_dp_sentence_encoder_attn_encoder_ffn_layers_1_conv_1_weight", "tts_dp_sentence_encoder_attn_encoder_ffn_layers_1_conv_1_bias" }; uint32_t dimensions__sentence_encoder_attn_encoder_ffn_layers_1_conv_1_Conv_intermediate[] = {1, 1, 129, 256}; Qnn_Tensor_t outputs__sentence_encoder_attn_encoder_ffn_layers_1_conv_1_Conv_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_ffn_layers_1_conv_1_Conv_intermediate", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000749703685869f, .offset= -30216}}}, .rank= 4, .dimensions=dimensions__sentence_encoder_attn_encoder_ffn_layers_1_conv_1_Conv_intermediate, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_attn_encoder_ffn_layers_1_conv_1_Conv_2d", // Node Name "qti.aisw", // Package Name "Conv2d", // Qnn Node Type params__sentence_encoder_attn_encoder_ffn_layers_1_conv_1_Conv_2d, // Node Params 5, // Num Node Params inputs__sentence_encoder_attn_encoder_ffn_layers_1_conv_1_Conv_2d, // Input Tensor Names 3, // Num Input Tensor Names outputs__sentence_encoder_attn_encoder_ffn_layers_1_conv_1_Conv_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_attn_encoder_ffn_layers_1_conv_1_Conv_intermediate_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_attn_encoder_ffn_layers_1_conv_1_Conv_intermediate_nchw */ uint32_t dimensions__sentence_encoder_attn_encoder_ffn_layers_1_conv_1_Conv_intermediate_nchw_perm[] = {4}; uint32_t _sentence_encoder_attn_encoder_ffn_layers_1_conv_1_Conv_intermediate_nchw_perm[] = {0, 3, 1, 2}; Qnn_Param_t params__sentence_encoder_attn_encoder_ffn_layers_1_conv_1_Conv_intermediate_nchw[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_ffn_layers_1_conv_1_Conv_intermediate_nchw_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__sentence_encoder_attn_encoder_ffn_layers_1_conv_1_Conv_intermediate_nchw_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_attn_encoder_ffn_layers_1_conv_1_Conv_intermediate_nchw_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__sentence_encoder_attn_encoder_ffn_layers_1_conv_1_Conv_intermediate_nchw[] = { "_sentence_encoder_attn_encoder_ffn_layers_1_conv_1_Conv_intermediate" }; uint32_t dimensions__sentence_encoder_attn_encoder_ffn_layers_1_conv_1_Conv_intermediate_nchw[] = {1, 256, 1, 129}; Qnn_Tensor_t outputs__sentence_encoder_attn_encoder_ffn_layers_1_conv_1_Conv_intermediate_nchw[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_ffn_layers_1_conv_1_Conv_intermediate_nchw", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000749703685869f, .offset= -30216}}}, .rank= 4, .dimensions=dimensions__sentence_encoder_attn_encoder_ffn_layers_1_conv_1_Conv_intermediate_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_attn_encoder_ffn_layers_1_conv_1_Conv_intermediate_nchw", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__sentence_encoder_attn_encoder_ffn_layers_1_conv_1_Conv_intermediate_nchw, // Node Params 1, // Num Node Params inputs__sentence_encoder_attn_encoder_ffn_layers_1_conv_1_Conv_intermediate_nchw, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_attn_encoder_ffn_layers_1_conv_1_Conv_intermediate_nchw, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_attn_encoder_ffn_layers_1_conv_1_Conv_intermediate(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_attn_encoder_ffn_layers_1_conv_1_Conv_intermediate */ const char* inputs__sentence_encoder_attn_encoder_ffn_layers_1_conv_1_Conv_intermediate[] = { "_sentence_encoder_attn_encoder_ffn_layers_1_conv_1_Conv_intermediate_nchw" }; uint32_t dimensions__sentence_encoder_attn_encoder_ffn_layers_1_conv_1_Conv_output_0[] = {1, 256, 129}; Qnn_Tensor_t outputs__sentence_encoder_attn_encoder_ffn_layers_1_conv_1_Conv_intermediate[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_ffn_layers_1_conv_1_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000749703685869f, .offset= -30216}}}, .rank= 3, .dimensions=dimensions__sentence_encoder_attn_encoder_ffn_layers_1_conv_1_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_attn_encoder_ffn_layers_1_conv_1_Conv_intermediate", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__sentence_encoder_attn_encoder_ffn_layers_1_conv_1_Conv_intermediate, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_attn_encoder_ffn_layers_1_conv_1_Conv_intermediate, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_attn_encoder_ffn_layers_1_Relu(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_attn_encoder_ffn_layers_1_Relu */ Qnn_Param_t params__sentence_encoder_attn_encoder_ffn_layers_1_Relu[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 4}}}} }; const char* inputs__sentence_encoder_attn_encoder_ffn_layers_1_Relu[] = { "_sentence_encoder_attn_encoder_ffn_layers_1_conv_1_Conv_output_0" }; uint32_t dimensions__sentence_encoder_attn_encoder_ffn_layers_1_Relu_output_0[] = {1, 256, 129}; Qnn_Tensor_t outputs__sentence_encoder_attn_encoder_ffn_layers_1_Relu[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_ffn_layers_1_Relu_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000404043094022f, .offset= 0}}}, .rank= 3, .dimensions=dimensions__sentence_encoder_attn_encoder_ffn_layers_1_Relu_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_attn_encoder_ffn_layers_1_Relu", // Node Name "qti.aisw", // Package Name "ElementWiseNeuron", // Qnn Node Type params__sentence_encoder_attn_encoder_ffn_layers_1_Relu, // Node Params 1, // Num Node Params inputs__sentence_encoder_attn_encoder_ffn_layers_1_Relu, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_attn_encoder_ffn_layers_1_Relu, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_attn_encoder_ffn_layers_1_Relu_output_0_nfc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_attn_encoder_ffn_layers_1_Relu_output_0_nfc */ uint32_t dimensions__sentence_encoder_attn_encoder_ffn_layers_1_Relu_output_0_nfc_perm[] = {3}; uint32_t _sentence_encoder_attn_encoder_ffn_layers_1_Relu_output_0_nfc_perm[] = {0, 2, 1}; Qnn_Param_t params__sentence_encoder_attn_encoder_ffn_layers_1_Relu_output_0_nfc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_ffn_layers_1_Relu_output_0_nfc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__sentence_encoder_attn_encoder_ffn_layers_1_Relu_output_0_nfc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_attn_encoder_ffn_layers_1_Relu_output_0_nfc_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__sentence_encoder_attn_encoder_ffn_layers_1_Relu_output_0_nfc[] = { "_sentence_encoder_attn_encoder_ffn_layers_1_Relu_output_0" }; uint32_t dimensions__sentence_encoder_attn_encoder_ffn_layers_1_Relu_output_0_nfc[] = {1, 129, 256}; Qnn_Tensor_t outputs__sentence_encoder_attn_encoder_ffn_layers_1_Relu_output_0_nfc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_ffn_layers_1_Relu_output_0_nfc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000404043094022f, .offset= 0}}}, .rank= 3, .dimensions=dimensions__sentence_encoder_attn_encoder_ffn_layers_1_Relu_output_0_nfc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_attn_encoder_ffn_layers_1_Relu_output_0_nfc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__sentence_encoder_attn_encoder_ffn_layers_1_Relu_output_0_nfc, // Node Params 1, // Num Node Params inputs__sentence_encoder_attn_encoder_ffn_layers_1_Relu_output_0_nfc, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_attn_encoder_ffn_layers_1_Relu_output_0_nfc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_attn_encoder_ffn_layers_1_Mul_1(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_attn_encoder_ffn_layers_1_Mul_1 */ Qnn_Param_t params__sentence_encoder_attn_encoder_ffn_layers_1_Mul_1[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__sentence_encoder_attn_encoder_ffn_layers_1_Mul_1[] = { "_sentence_encoder_attn_encoder_ffn_layers_1_Relu_output_0_nfc", "_sentence_encoder_Concat_2_output_0" }; uint32_t dimensions__sentence_encoder_attn_encoder_ffn_layers_1_Mul_1_output_0[] = {1, 129, 256}; Qnn_Tensor_t outputs__sentence_encoder_attn_encoder_ffn_layers_1_Mul_1[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_ffn_layers_1_Mul_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000404043094022f, .offset= 0}}}, .rank= 3, .dimensions=dimensions__sentence_encoder_attn_encoder_ffn_layers_1_Mul_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_attn_encoder_ffn_layers_1_Mul_1", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__sentence_encoder_attn_encoder_ffn_layers_1_Mul_1, // Node Params 1, // Num Node Params inputs__sentence_encoder_attn_encoder_ffn_layers_1_Mul_1, // Input Tensor Names 2, // Num Input Tensor Names outputs__sentence_encoder_attn_encoder_ffn_layers_1_Mul_1, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_attn_encoder_ffn_layers_1_Mul_1_output_0_ncf(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_attn_encoder_ffn_layers_1_Mul_1_output_0_ncf */ uint32_t dimensions__sentence_encoder_attn_encoder_ffn_layers_1_Mul_1_output_0_ncf_perm[] = {3}; uint32_t _sentence_encoder_attn_encoder_ffn_layers_1_Mul_1_output_0_ncf_perm[] = {0, 2, 1}; Qnn_Param_t params__sentence_encoder_attn_encoder_ffn_layers_1_Mul_1_output_0_ncf[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_ffn_layers_1_Mul_1_output_0_ncf_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__sentence_encoder_attn_encoder_ffn_layers_1_Mul_1_output_0_ncf_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_attn_encoder_ffn_layers_1_Mul_1_output_0_ncf_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__sentence_encoder_attn_encoder_ffn_layers_1_Mul_1_output_0_ncf[] = { "_sentence_encoder_attn_encoder_ffn_layers_1_Mul_1_output_0" }; uint32_t dimensions__sentence_encoder_attn_encoder_ffn_layers_1_Mul_1_output_0_ncf[] = {1, 256, 129}; Qnn_Tensor_t outputs__sentence_encoder_attn_encoder_ffn_layers_1_Mul_1_output_0_ncf[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_ffn_layers_1_Mul_1_output_0_ncf", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000404043094022f, .offset= 0}}}, .rank= 3, .dimensions=dimensions__sentence_encoder_attn_encoder_ffn_layers_1_Mul_1_output_0_ncf, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_attn_encoder_ffn_layers_1_Mul_1_output_0_ncf", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__sentence_encoder_attn_encoder_ffn_layers_1_Mul_1_output_0_ncf, // Node Params 1, // Num Node Params inputs__sentence_encoder_attn_encoder_ffn_layers_1_Mul_1_output_0_ncf, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_attn_encoder_ffn_layers_1_Mul_1_output_0_ncf, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_reshape_to_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_reshape_to_2d */ const char* inputs__sentence_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_reshape_to_2d[] = { "_sentence_encoder_attn_encoder_ffn_layers_1_Mul_1_output_0_ncf" }; uint32_t dimensions__sentence_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_reshape_to_2d[] = {1, 256, 1, 129}; Qnn_Tensor_t outputs__sentence_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_reshape_to_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_reshape_to_2d", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000404043094022f, .offset= 0}}}, .rank= 4, .dimensions=dimensions__sentence_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_reshape_to_2d, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_reshape_to_2d", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__sentence_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_reshape_to_2d, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_reshape_to_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_reshape_to_2d_nhwc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_reshape_to_2d_nhwc */ uint32_t dimensions__sentence_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_reshape_to_2d_nhwc_perm[] = {4}; uint32_t _sentence_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_reshape_to_2d_nhwc_perm[] = {0, 2, 3, 1}; Qnn_Param_t params__sentence_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_reshape_to_2d_nhwc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_reshape_to_2d_nhwc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__sentence_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_reshape_to_2d_nhwc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_reshape_to_2d_nhwc_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__sentence_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_reshape_to_2d_nhwc[] = { "_sentence_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_reshape_to_2d" }; uint32_t dimensions__sentence_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_reshape_to_2d_nhwc[] = {1, 1, 129, 256}; Qnn_Tensor_t outputs__sentence_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_reshape_to_2d_nhwc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_reshape_to_2d_nhwc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000404043094022f, .offset= 0}}}, .rank= 4, .dimensions=dimensions__sentence_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_reshape_to_2d_nhwc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_reshape_to_2d_nhwc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__sentence_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_reshape_to_2d_nhwc, // Node Params 1, // Num Node Params inputs__sentence_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_reshape_to_2d_nhwc, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_reshape_to_2d_nhwc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_dp_sentence_encoder_attn_encoder_ffn_layers_1_conv_2_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_dp_sentence_encoder_attn_encoder_ffn_layers_1_conv_2_weight[] = {1, 1, 256, 64}; VALIDATE(model.addTensor("tts_dp_sentence_encoder_attn_encoder_ffn_layers_1_conv_2_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_dp_sentence_encoder_attn_encoder_ffn_layers_1_conv_2_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0029728165827692f, .offset= -119}}}, .rank= 4, .dimensions=dimensions_tts_dp_sentence_encoder_attn_encoder_ffn_layers_1_conv_2_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_dp_sentence_encoder_attn_encoder_ffn_layers_1_conv_2_weight), .dataSize=BINLEN(tts_dp_sentence_encoder_attn_encoder_ffn_layers_1_conv_2_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_dp_sentence_encoder_attn_encoder_ffn_layers_1_conv_2_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_dp_sentence_encoder_attn_encoder_ffn_layers_1_conv_2_bias[] = {64}; VALIDATE(model.addTensor("tts_dp_sentence_encoder_attn_encoder_ffn_layers_1_conv_2_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_dp_sentence_encoder_attn_encoder_ffn_layers_1_conv_2_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0012597425375134f, .offset= -145}}}, .rank= 1, .dimensions=dimensions_tts_dp_sentence_encoder_attn_encoder_ffn_layers_1_conv_2_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_dp_sentence_encoder_attn_encoder_ffn_layers_1_conv_2_bias), .dataSize=BINLEN(tts_dp_sentence_encoder_attn_encoder_ffn_layers_1_conv_2_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__sentence_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_2d */ uint32_t dimensions__sentence_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_2d_dilation[] = {2}; uint32_t _sentence_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_2d_dilation[] = {1, 1}; uint32_t dimensions__sentence_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_2d_pad_amount[] = {2, 2}; uint32_t _sentence_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_2d_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions__sentence_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_2d_stride[] = {2}; uint32_t _sentence_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_2d_stride[] = {1, 1}; Qnn_Param_t params__sentence_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_2d[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_2d_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__sentence_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_2d_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_2d_dilation, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_2d_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__sentence_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_2d_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_2d_pad_amount, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_2d_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__sentence_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_2d_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_2d_stride, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="group", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="reuse_sparse_indices", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}} }; const char* inputs__sentence_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_2d[] = { "_sentence_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_reshape_to_2d_nhwc", "tts_dp_sentence_encoder_attn_encoder_ffn_layers_1_conv_2_weight", "tts_dp_sentence_encoder_attn_encoder_ffn_layers_1_conv_2_bias" }; uint32_t dimensions__sentence_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_intermediate[] = {1, 1, 129, 64}; Qnn_Tensor_t outputs__sentence_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_intermediate", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0001166206784546f, .offset= -29441}}}, .rank= 4, .dimensions=dimensions__sentence_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_intermediate, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_2d", // Node Name "qti.aisw", // Package Name "Conv2d", // Qnn Node Type params__sentence_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_2d, // Node Params 5, // Num Node Params inputs__sentence_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_2d, // Input Tensor Names 3, // Num Input Tensor Names outputs__sentence_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_intermediate_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_intermediate_nchw */ uint32_t dimensions__sentence_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_intermediate_nchw_perm[] = {4}; uint32_t _sentence_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_intermediate_nchw_perm[] = {0, 3, 1, 2}; Qnn_Param_t params__sentence_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_intermediate_nchw[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_intermediate_nchw_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__sentence_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_intermediate_nchw_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_intermediate_nchw_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__sentence_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_intermediate_nchw[] = { "_sentence_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_intermediate" }; uint32_t dimensions__sentence_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_intermediate_nchw[] = {1, 64, 1, 129}; Qnn_Tensor_t outputs__sentence_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_intermediate_nchw[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_intermediate_nchw", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0001166206784546f, .offset= -29441}}}, .rank= 4, .dimensions=dimensions__sentence_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_intermediate_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_intermediate_nchw", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__sentence_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_intermediate_nchw, // Node Params 1, // Num Node Params inputs__sentence_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_intermediate_nchw, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_intermediate_nchw, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_intermediate(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_intermediate */ const char* inputs__sentence_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_intermediate[] = { "_sentence_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_intermediate_nchw" }; uint32_t dimensions__sentence_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_output_0[] = {1, 64, 129}; Qnn_Tensor_t outputs__sentence_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_intermediate[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0001166206784546f, .offset= -29441}}}, .rank= 3, .dimensions=dimensions__sentence_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_intermediate", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__sentence_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_intermediate, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_intermediate, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_output_0_nfc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_output_0_nfc */ uint32_t dimensions__sentence_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_output_0_nfc_perm[] = {3}; uint32_t _sentence_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_output_0_nfc_perm[] = {0, 2, 1}; Qnn_Param_t params__sentence_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_output_0_nfc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_output_0_nfc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__sentence_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_output_0_nfc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_output_0_nfc_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__sentence_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_output_0_nfc[] = { "_sentence_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_output_0" }; uint32_t dimensions__sentence_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_output_0_nfc[] = {1, 129, 64}; Qnn_Tensor_t outputs__sentence_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_output_0_nfc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_output_0_nfc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0001166206784546f, .offset= -29441}}}, .rank= 3, .dimensions=dimensions__sentence_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_output_0_nfc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_output_0_nfc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__sentence_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_output_0_nfc, // Node Params 1, // Num Node Params inputs__sentence_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_output_0_nfc, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_output_0_nfc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_attn_encoder_ffn_layers_1_Mul_2(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_attn_encoder_ffn_layers_1_Mul_2 */ Qnn_Param_t params__sentence_encoder_attn_encoder_ffn_layers_1_Mul_2[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__sentence_encoder_attn_encoder_ffn_layers_1_Mul_2[] = { "_sentence_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_output_0_nfc", "_sentence_encoder_Concat_2_output_0" }; uint32_t dimensions__sentence_encoder_attn_encoder_ffn_layers_1_Mul_2_output_0[] = {1, 129, 64}; Qnn_Tensor_t outputs__sentence_encoder_attn_encoder_ffn_layers_1_Mul_2[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_ffn_layers_1_Mul_2_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0001166206784546f, .offset= -29441}}}, .rank= 3, .dimensions=dimensions__sentence_encoder_attn_encoder_ffn_layers_1_Mul_2_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_attn_encoder_ffn_layers_1_Mul_2", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__sentence_encoder_attn_encoder_ffn_layers_1_Mul_2, // Node Params 1, // Num Node Params inputs__sentence_encoder_attn_encoder_ffn_layers_1_Mul_2, // Input Tensor Names 2, // Num Input Tensor Names outputs__sentence_encoder_attn_encoder_ffn_layers_1_Mul_2, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_attn_encoder_Add_3(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_attn_encoder_Add_3 */ Qnn_Param_t params__sentence_encoder_attn_encoder_Add_3[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}} }; const char* inputs__sentence_encoder_attn_encoder_Add_3[] = { "_sentence_encoder_attn_encoder_norm_layers_1_1_Transpose_1_output_0", "_sentence_encoder_attn_encoder_ffn_layers_1_Mul_2_output_0" }; uint32_t dimensions__sentence_encoder_attn_encoder_norm_layers_2_1_Transpose_output_0[] = {1, 129, 64}; Qnn_Tensor_t outputs__sentence_encoder_attn_encoder_Add_3[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_norm_layers_2_1_Transpose_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0001208964895341f, .offset= -33416}}}, .rank= 3, .dimensions=dimensions__sentence_encoder_attn_encoder_norm_layers_2_1_Transpose_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_attn_encoder_Add_3", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__sentence_encoder_attn_encoder_Add_3, // Node Params 1, // Num Node Params inputs__sentence_encoder_attn_encoder_Add_3, // Input Tensor Names 2, // Num Input Tensor Names outputs__sentence_encoder_attn_encoder_Add_3, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_dp_sentence_encoder_attn_encoder_norm_layers_2_1_norm_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_dp_sentence_encoder_attn_encoder_norm_layers_2_1_norm_weight[] = {64}; VALIDATE(model.addTensor("tts_dp_sentence_encoder_attn_encoder_norm_layers_2_1_norm_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_dp_sentence_encoder_attn_encoder_norm_layers_2_1_norm_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0028663885314018f, .offset= 0}}}, .rank= 1, .dimensions=dimensions_tts_dp_sentence_encoder_attn_encoder_norm_layers_2_1_norm_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_dp_sentence_encoder_attn_encoder_norm_layers_2_1_norm_weight), .dataSize=BINLEN(tts_dp_sentence_encoder_attn_encoder_norm_layers_2_1_norm_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_dp_sentence_encoder_attn_encoder_norm_layers_2_1_norm_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_dp_sentence_encoder_attn_encoder_norm_layers_2_1_norm_bias[] = {64}; VALIDATE(model.addTensor("tts_dp_sentence_encoder_attn_encoder_norm_layers_2_1_norm_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_dp_sentence_encoder_attn_encoder_norm_layers_2_1_norm_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0017139845294878f, .offset= -182}}}, .rank= 1, .dimensions=dimensions_tts_dp_sentence_encoder_attn_encoder_norm_layers_2_1_norm_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_dp_sentence_encoder_attn_encoder_norm_layers_2_1_norm_bias), .dataSize=BINLEN(tts_dp_sentence_encoder_attn_encoder_norm_layers_2_1_norm_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__sentence_encoder_attn_encoder_norm_layers_2_1_norm_LayerNormalization(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_attn_encoder_norm_layers_2_1_norm_LayerNormalization */ uint32_t dimensions__sentence_encoder_attn_encoder_norm_layers_2_1_norm_LayerNormalization_axes[] = {1}; uint32_t _sentence_encoder_attn_encoder_norm_layers_2_1_norm_LayerNormalization_axes[] = {2}; Qnn_Param_t params__sentence_encoder_attn_encoder_norm_layers_2_1_norm_LayerNormalization[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="axes", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_norm_layers_2_1_norm_LayerNormalization_axes", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__sentence_encoder_attn_encoder_norm_layers_2_1_norm_LayerNormalization_axes, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_attn_encoder_norm_layers_2_1_norm_LayerNormalization_axes, .dataSize=4}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="epsilon", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_FLOAT_32, {.floatValue = 0.000001000000f}}}} }; const char* inputs__sentence_encoder_attn_encoder_norm_layers_2_1_norm_LayerNormalization[] = { "_sentence_encoder_attn_encoder_norm_layers_2_1_Transpose_output_0", "tts_dp_sentence_encoder_attn_encoder_norm_layers_2_1_norm_weight", "tts_dp_sentence_encoder_attn_encoder_norm_layers_2_1_norm_bias" }; uint32_t dimensions__sentence_encoder_attn_encoder_norm_layers_2_1_Transpose_1_output_0[] = {1, 129, 64}; Qnn_Tensor_t outputs__sentence_encoder_attn_encoder_norm_layers_2_1_norm_LayerNormalization[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_norm_layers_2_1_Transpose_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000861795851961f, .offset= -29342}}}, .rank= 3, .dimensions=dimensions__sentence_encoder_attn_encoder_norm_layers_2_1_Transpose_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_attn_encoder_norm_layers_2_1_norm_LayerNormalization", // Node Name "qti.aisw", // Package Name "LayerNorm", // Qnn Node Type params__sentence_encoder_attn_encoder_norm_layers_2_1_norm_LayerNormalization, // Node Params 2, // Num Node Params inputs__sentence_encoder_attn_encoder_norm_layers_2_1_norm_LayerNormalization, // Input Tensor Names 3, // Num Input Tensor Names outputs__sentence_encoder_attn_encoder_norm_layers_2_1_norm_LayerNormalization, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_attn_encoder_Mul_2(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_attn_encoder_Mul_2 */ Qnn_Param_t params__sentence_encoder_attn_encoder_Mul_2[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__sentence_encoder_attn_encoder_Mul_2[] = { "_sentence_encoder_attn_encoder_norm_layers_2_1_Transpose_1_output_0", "_sentence_encoder_Concat_2_output_0" }; uint32_t dimensions__sentence_encoder_attn_encoder_Mul_2_output_0[] = {1, 129, 64}; Qnn_Tensor_t outputs__sentence_encoder_attn_encoder_Mul_2[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_attn_encoder_Mul_2_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000861795851961f, .offset= -29342}}}, .rank= 3, .dimensions=dimensions__sentence_encoder_attn_encoder_Mul_2_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_attn_encoder_Mul_2", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__sentence_encoder_attn_encoder_Mul_2, // Node Params 1, // Num Node Params inputs__sentence_encoder_attn_encoder_Mul_2, // Input Tensor Names 2, // Num Input Tensor Names outputs__sentence_encoder_attn_encoder_Mul_2, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_Add(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_Add */ Qnn_Param_t params__sentence_encoder_Add[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}} }; const char* inputs__sentence_encoder_Add[] = { "_sentence_encoder_attn_encoder_Mul_2_output_0", "_sentence_encoder_convnext_convnext_5_Mul_3_output_0" }; uint32_t dimensions__sentence_encoder_Add_output_0[] = {1, 129, 64}; Qnn_Tensor_t outputs__sentence_encoder_Add[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_Add_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0001056542969309f, .offset= -37918}}}, .rank= 3, .dimensions=dimensions__sentence_encoder_Add_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_Add", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__sentence_encoder_Add, // Node Params 1, // Num Node Params inputs__sentence_encoder_Add, // Input Tensor Names 2, // Num Input Tensor Names outputs__sentence_encoder_Add, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_Slice_1(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_Slice_1 */ uint32_t dimensions__sentence_encoder_Slice_1_ranges[] = {3, 3}; int32_t _sentence_encoder_Slice_1_ranges[] = {0, 1, 1, 0, 1, 1, 0, 64, 1}; Qnn_Param_t params__sentence_encoder_Slice_1[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="ranges", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_Slice_1_ranges", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_INT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__sentence_encoder_Slice_1_ranges, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_Slice_1_ranges, .dataSize=36}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="begin_mask", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="end_mask", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="new_axes_mask", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="shrink_axes", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 0}}}} }; const char* inputs__sentence_encoder_Slice_1[] = { "_sentence_encoder_Add_output_0" }; uint32_t dimensions__sentence_encoder_Slice_1_output_0[] = {1, 1, 64}; Qnn_Tensor_t outputs__sentence_encoder_Slice_1[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_Slice_1_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0001056542969309f, .offset= -37918}}}, .rank= 3, .dimensions=dimensions__sentence_encoder_Slice_1_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_Slice_1", // Node Name "qti.aisw", // Package Name "StridedSlice", // Qnn Node Type params__sentence_encoder_Slice_1, // Node Params 5, // Num Node Params inputs__sentence_encoder_Slice_1, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_Slice_1, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_Slice_1_output_0_ncf(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_Slice_1_output_0_ncf */ uint32_t dimensions__sentence_encoder_Slice_1_output_0_ncf_perm[] = {3}; uint32_t _sentence_encoder_Slice_1_output_0_ncf_perm[] = {0, 2, 1}; Qnn_Param_t params__sentence_encoder_Slice_1_output_0_ncf[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_Slice_1_output_0_ncf_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__sentence_encoder_Slice_1_output_0_ncf_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_Slice_1_output_0_ncf_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__sentence_encoder_Slice_1_output_0_ncf[] = { "_sentence_encoder_Slice_1_output_0" }; uint32_t dimensions__sentence_encoder_Slice_1_output_0_ncf[] = {1, 64, 1}; Qnn_Tensor_t outputs__sentence_encoder_Slice_1_output_0_ncf[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_Slice_1_output_0_ncf", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0001056542969309f, .offset= -37918}}}, .rank= 3, .dimensions=dimensions__sentence_encoder_Slice_1_output_0_ncf, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_Slice_1_output_0_ncf", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__sentence_encoder_Slice_1_output_0_ncf, // Node Params 1, // Num Node Params inputs__sentence_encoder_Slice_1_output_0_ncf, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_Slice_1_output_0_ncf, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_proj_out_net_Conv_reshape_to_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_proj_out_net_Conv_reshape_to_2d */ const char* inputs__sentence_encoder_proj_out_net_Conv_reshape_to_2d[] = { "_sentence_encoder_Slice_1_output_0_ncf" }; uint32_t dimensions__sentence_encoder_proj_out_net_Conv_reshape_to_2d[] = {1, 64, 1, 1}; Qnn_Tensor_t outputs__sentence_encoder_proj_out_net_Conv_reshape_to_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_proj_out_net_Conv_reshape_to_2d", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0001056542969309f, .offset= -37918}}}, .rank= 4, .dimensions=dimensions__sentence_encoder_proj_out_net_Conv_reshape_to_2d, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_proj_out_net_Conv_reshape_to_2d", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__sentence_encoder_proj_out_net_Conv_reshape_to_2d, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_proj_out_net_Conv_reshape_to_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_proj_out_net_Conv_reshape_to_2d_nhwc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_proj_out_net_Conv_reshape_to_2d_nhwc */ uint32_t dimensions__sentence_encoder_proj_out_net_Conv_reshape_to_2d_nhwc_perm[] = {4}; uint32_t _sentence_encoder_proj_out_net_Conv_reshape_to_2d_nhwc_perm[] = {0, 2, 3, 1}; Qnn_Param_t params__sentence_encoder_proj_out_net_Conv_reshape_to_2d_nhwc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_proj_out_net_Conv_reshape_to_2d_nhwc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__sentence_encoder_proj_out_net_Conv_reshape_to_2d_nhwc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_proj_out_net_Conv_reshape_to_2d_nhwc_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__sentence_encoder_proj_out_net_Conv_reshape_to_2d_nhwc[] = { "_sentence_encoder_proj_out_net_Conv_reshape_to_2d" }; uint32_t dimensions__sentence_encoder_proj_out_net_Conv_reshape_to_2d_nhwc[] = {1, 1, 1, 64}; Qnn_Tensor_t outputs__sentence_encoder_proj_out_net_Conv_reshape_to_2d_nhwc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_proj_out_net_Conv_reshape_to_2d_nhwc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0001056542969309f, .offset= -37918}}}, .rank= 4, .dimensions=dimensions__sentence_encoder_proj_out_net_Conv_reshape_to_2d_nhwc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_proj_out_net_Conv_reshape_to_2d_nhwc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__sentence_encoder_proj_out_net_Conv_reshape_to_2d_nhwc, // Node Params 1, // Num Node Params inputs__sentence_encoder_proj_out_net_Conv_reshape_to_2d_nhwc, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_proj_out_net_Conv_reshape_to_2d_nhwc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_dp_sentence_encoder_proj_out_net_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_dp_sentence_encoder_proj_out_net_weight[] = {1, 1, 64, 64}; VALIDATE(model.addTensor("tts_dp_sentence_encoder_proj_out_net_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_dp_sentence_encoder_proj_out_net_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0042192637920380f, .offset= -115}}}, .rank= 4, .dimensions=dimensions_tts_dp_sentence_encoder_proj_out_net_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_dp_sentence_encoder_proj_out_net_weight), .dataSize=BINLEN(tts_dp_sentence_encoder_proj_out_net_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__sentence_encoder_proj_out_net_Conv_2d(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_proj_out_net_Conv_2d */ uint32_t dimensions__sentence_encoder_proj_out_net_Conv_2d_dilation[] = {2}; uint32_t _sentence_encoder_proj_out_net_Conv_2d_dilation[] = {1, 1}; uint32_t dimensions__sentence_encoder_proj_out_net_Conv_2d_pad_amount[] = {2, 2}; uint32_t _sentence_encoder_proj_out_net_Conv_2d_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions__sentence_encoder_proj_out_net_Conv_2d_stride[] = {2}; uint32_t _sentence_encoder_proj_out_net_Conv_2d_stride[] = {1, 1}; Qnn_Param_t params__sentence_encoder_proj_out_net_Conv_2d[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_proj_out_net_Conv_2d_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__sentence_encoder_proj_out_net_Conv_2d_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_proj_out_net_Conv_2d_dilation, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_proj_out_net_Conv_2d_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__sentence_encoder_proj_out_net_Conv_2d_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_proj_out_net_Conv_2d_pad_amount, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_proj_out_net_Conv_2d_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__sentence_encoder_proj_out_net_Conv_2d_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_proj_out_net_Conv_2d_stride, .dataSize=8}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="group", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="reuse_sparse_indices", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}} }; const char* inputs__sentence_encoder_proj_out_net_Conv_2d[] = { "_sentence_encoder_proj_out_net_Conv_reshape_to_2d_nhwc", "tts_dp_sentence_encoder_proj_out_net_weight" }; uint32_t dimensions__sentence_encoder_proj_out_net_Conv_intermediate[] = {1, 1, 1, 64}; Qnn_Tensor_t outputs__sentence_encoder_proj_out_net_Conv_2d[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_proj_out_net_Conv_intermediate", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000394291455450f, .offset= -34669}}}, .rank= 4, .dimensions=dimensions__sentence_encoder_proj_out_net_Conv_intermediate, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_proj_out_net_Conv_2d", // Node Name "qti.aisw", // Package Name "Conv2d", // Qnn Node Type params__sentence_encoder_proj_out_net_Conv_2d, // Node Params 5, // Num Node Params inputs__sentence_encoder_proj_out_net_Conv_2d, // Input Tensor Names 2, // Num Input Tensor Names outputs__sentence_encoder_proj_out_net_Conv_2d, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_proj_out_net_Conv_intermediate_nchw(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_proj_out_net_Conv_intermediate_nchw */ uint32_t dimensions__sentence_encoder_proj_out_net_Conv_intermediate_nchw_perm[] = {4}; uint32_t _sentence_encoder_proj_out_net_Conv_intermediate_nchw_perm[] = {0, 3, 1, 2}; Qnn_Param_t params__sentence_encoder_proj_out_net_Conv_intermediate_nchw[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_proj_out_net_Conv_intermediate_nchw_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__sentence_encoder_proj_out_net_Conv_intermediate_nchw_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_proj_out_net_Conv_intermediate_nchw_perm, .dataSize=16}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__sentence_encoder_proj_out_net_Conv_intermediate_nchw[] = { "_sentence_encoder_proj_out_net_Conv_intermediate" }; uint32_t dimensions__sentence_encoder_proj_out_net_Conv_intermediate_nchw[] = {1, 64, 1, 1}; Qnn_Tensor_t outputs__sentence_encoder_proj_out_net_Conv_intermediate_nchw[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_proj_out_net_Conv_intermediate_nchw", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000394291455450f, .offset= -34669}}}, .rank= 4, .dimensions=dimensions__sentence_encoder_proj_out_net_Conv_intermediate_nchw, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_proj_out_net_Conv_intermediate_nchw", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__sentence_encoder_proj_out_net_Conv_intermediate_nchw, // Node Params 1, // Num Node Params inputs__sentence_encoder_proj_out_net_Conv_intermediate_nchw, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_proj_out_net_Conv_intermediate_nchw, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_proj_out_net_Conv_intermediate(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_proj_out_net_Conv_intermediate */ const char* inputs__sentence_encoder_proj_out_net_Conv_intermediate[] = { "_sentence_encoder_proj_out_net_Conv_intermediate_nchw" }; uint32_t dimensions__sentence_encoder_proj_out_net_Conv_output_0[] = {1, 64, 1}; Qnn_Tensor_t outputs__sentence_encoder_proj_out_net_Conv_intermediate[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_proj_out_net_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000394291455450f, .offset= -34669}}}, .rank= 3, .dimensions=dimensions__sentence_encoder_proj_out_net_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_proj_out_net_Conv_intermediate", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__sentence_encoder_proj_out_net_Conv_intermediate, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_proj_out_net_Conv_intermediate, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_proj_out_net_Conv_output_0_nfc(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_proj_out_net_Conv_output_0_nfc */ uint32_t dimensions__sentence_encoder_proj_out_net_Conv_output_0_nfc_perm[] = {3}; uint32_t _sentence_encoder_proj_out_net_Conv_output_0_nfc_perm[] = {0, 2, 1}; Qnn_Param_t params__sentence_encoder_proj_out_net_Conv_output_0_nfc[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_proj_out_net_Conv_output_0_nfc_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__sentence_encoder_proj_out_net_Conv_output_0_nfc_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_proj_out_net_Conv_output_0_nfc_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__sentence_encoder_proj_out_net_Conv_output_0_nfc[] = { "_sentence_encoder_proj_out_net_Conv_output_0" }; uint32_t dimensions__sentence_encoder_proj_out_net_Conv_output_0_nfc[] = {1, 1, 64}; Qnn_Tensor_t outputs__sentence_encoder_proj_out_net_Conv_output_0_nfc[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_proj_out_net_Conv_output_0_nfc", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000394291455450f, .offset= -34669}}}, .rank= 3, .dimensions=dimensions__sentence_encoder_proj_out_net_Conv_output_0_nfc, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_proj_out_net_Conv_output_0_nfc", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__sentence_encoder_proj_out_net_Conv_output_0_nfc, // Node Params 1, // Num Node Params inputs__sentence_encoder_proj_out_net_Conv_output_0_nfc, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_proj_out_net_Conv_output_0_nfc, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_proj_out_Mul(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_proj_out_Mul */ Qnn_Param_t params__sentence_encoder_proj_out_Mul[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 13}}}} }; const char* inputs__sentence_encoder_proj_out_Mul[] = { "_sentence_encoder_proj_out_net_Conv_output_0_nfc", "_sentence_encoder_Slice_2_output_0" }; uint32_t dimensions__sentence_encoder_proj_out_Mul_output_0[] = {1, 1, 64}; Qnn_Tensor_t outputs__sentence_encoder_proj_out_Mul[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_proj_out_Mul_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000394291455450f, .offset= -34669}}}, .rank= 3, .dimensions=dimensions__sentence_encoder_proj_out_Mul_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_proj_out_Mul", // Node Name "qti.aisw", // Package Name "ElementWiseBinary", // Qnn Node Type params__sentence_encoder_proj_out_Mul, // Node Params 1, // Num Node Params inputs__sentence_encoder_proj_out_Mul, // Input Tensor Names 2, // Num Input Tensor Names outputs__sentence_encoder_proj_out_Mul, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__sentence_encoder_proj_out_Mul_output_0_ncf(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _sentence_encoder_proj_out_Mul_output_0_ncf */ uint32_t dimensions__sentence_encoder_proj_out_Mul_output_0_ncf_perm[] = {3}; uint32_t _sentence_encoder_proj_out_Mul_output_0_ncf_perm[] = {0, 2, 1}; Qnn_Param_t params__sentence_encoder_proj_out_Mul_output_0_ncf[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="perm", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_proj_out_Mul_output_0_ncf_perm", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions__sentence_encoder_proj_out_Mul_output_0_ncf_perm, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)_sentence_encoder_proj_out_Mul_output_0_ncf_perm, .dataSize=12}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}}}} }; const char* inputs__sentence_encoder_proj_out_Mul_output_0_ncf[] = { "_sentence_encoder_proj_out_Mul_output_0" }; uint32_t dimensions__sentence_encoder_proj_out_Mul_output_0_ncf[] = {1, 64, 1}; Qnn_Tensor_t outputs__sentence_encoder_proj_out_Mul_output_0_ncf[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_sentence_encoder_proj_out_Mul_output_0_ncf", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000394291455450f, .offset= -34669}}}, .rank= 3, .dimensions=dimensions__sentence_encoder_proj_out_Mul_output_0_ncf, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_sentence_encoder_proj_out_Mul_output_0_ncf", // Node Name "qti.aisw", // Package Name "Transpose", // Qnn Node Type params__sentence_encoder_proj_out_Mul_output_0_ncf, // Node Params 1, // Num Node Params inputs__sentence_encoder_proj_out_Mul_output_0_ncf, // Input Tensor Names 1, // Num Input Tensor Names outputs__sentence_encoder_proj_out_Mul_output_0_ncf, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__predictor_Reshape(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _predictor_Reshape */ const char* inputs__predictor_Reshape[] = { "_sentence_encoder_proj_out_Mul_output_0_ncf" }; uint32_t dimensions__predictor_Reshape_output_0[] = {1, 64}; Qnn_Tensor_t outputs__predictor_Reshape[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_predictor_Reshape_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000394291455450f, .offset= -34669}}}, .rank= 2, .dimensions=dimensions__predictor_Reshape_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_predictor_Reshape", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__predictor_Reshape, // Input Tensor Names 1, // Num Input Tensor Names outputs__predictor_Reshape, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__predictor_Concat_2(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _predictor_Concat_2 */ Qnn_Param_t params__predictor_Concat_2[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="axis", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}} }; const char* inputs__predictor_Concat_2[] = { "_predictor_Reshape_output_0", "_predictor_Reshape_1_output_0" }; uint32_t dimensions__predictor_Concat_2_output_0[] = {1, 192}; Qnn_Tensor_t outputs__predictor_Concat_2[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_predictor_Concat_2_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000394291455450f, .offset= -34669}}}, .rank= 2, .dimensions=dimensions__predictor_Concat_2_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_predictor_Concat_2", // Node Name "qti.aisw", // Package Name "Concat", // Qnn Node Type params__predictor_Concat_2, // Node Params 1, // Num Node Params inputs__predictor_Concat_2, // Input Tensor Names 2, // Num Input Tensor Names outputs__predictor_Concat_2, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_dp_predictor_layers_0_weight_permute(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_dp_predictor_layers_0_weight_permute[] = {128, 192}; VALIDATE(model.addTensor("tts_dp_predictor_layers_0_weight_permute", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_dp_predictor_layers_0_weight_permute", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0048527722246945f, .offset= -138}}}, .rank= 2, .dimensions=dimensions_tts_dp_predictor_layers_0_weight_permute, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_dp_predictor_layers_0_weight_permute), .dataSize=BINLEN(tts_dp_predictor_layers_0_weight_permute)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_dp_predictor_layers_0_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_dp_predictor_layers_0_bias[] = {128}; VALIDATE(model.addTensor("tts_dp_predictor_layers_0_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_dp_predictor_layers_0_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0009914825204760f, .offset= -124}}}, .rank= 1, .dimensions=dimensions_tts_dp_predictor_layers_0_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_dp_predictor_layers_0_bias), .dataSize=BINLEN(tts_dp_predictor_layers_0_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__predictor_layers_0_Gemm(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _predictor_layers_0_Gemm */ const char* inputs__predictor_layers_0_Gemm[] = { "_predictor_Concat_2_output_0", "tts_dp_predictor_layers_0_weight_permute", "tts_dp_predictor_layers_0_bias" }; uint32_t dimensions__predictor_layers_0_Gemm_output_0[] = {1, 128}; Qnn_Tensor_t outputs__predictor_layers_0_Gemm[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_predictor_layers_0_Gemm_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000465237062599f, .offset= -31789}}}, .rank= 2, .dimensions=dimensions__predictor_layers_0_Gemm_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_predictor_layers_0_Gemm", // Node Name "qti.aisw", // Package Name "FullyConnected", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__predictor_layers_0_Gemm, // Input Tensor Names 3, // Num Input Tensor Names outputs__predictor_layers_0_Gemm, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_dp_predictor_activation_weight(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_dp_predictor_activation_weight[] = {1}; VALIDATE(model.addTensor("tts_dp_predictor_activation_weight", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_dp_predictor_activation_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000000067460686f, .offset= 0}}}, .rank= 1, .dimensions=dimensions_tts_dp_predictor_activation_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_dp_predictor_activation_weight), .dataSize=BINLEN(tts_dp_predictor_activation_weight)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__predictor_activation_PRelu(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _predictor_activation_PRelu */ const char* inputs__predictor_activation_PRelu[] = { "_predictor_layers_0_Gemm_output_0", "tts_dp_predictor_activation_weight" }; uint32_t dimensions__predictor_activation_PRelu_output_0[] = {1, 128}; Qnn_Tensor_t outputs__predictor_activation_PRelu[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_predictor_activation_PRelu_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000239663895627f, .offset= -27}}}, .rank= 2, .dimensions=dimensions__predictor_activation_PRelu_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_predictor_activation_PRelu", // Node Name "qti.aisw", // Package Name "Prelu", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__predictor_activation_PRelu, // Input Tensor Names 2, // Num Input Tensor Names outputs__predictor_activation_PRelu, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addTensor_tts_dp_predictor_layers_1_weight_permute(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_dp_predictor_layers_1_weight_permute[] = {1, 128}; VALIDATE(model.addTensor("tts_dp_predictor_layers_1_weight_permute", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_dp_predictor_layers_1_weight_permute", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0021564606577158f, .offset= -124}}}, .rank= 2, .dimensions=dimensions_tts_dp_predictor_layers_1_weight_permute, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_dp_predictor_layers_1_weight_permute), .dataSize=BINLEN(tts_dp_predictor_layers_1_weight_permute)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addTensor_tts_dp_predictor_layers_1_bias(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; uint32_t dimensions_tts_dp_predictor_layers_1_bias[] = {1}; VALIDATE(model.addTensor("tts_dp_predictor_layers_1_bias", // Tensor Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "tts_dp_predictor_layers_1_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0004658546531573f, .offset= 0}}}, .rank= 1, .dimensions=dimensions_tts_dp_predictor_layers_1_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(tts_dp_predictor_layers_1_bias), .dataSize=BINLEN(tts_dp_predictor_layers_1_bias)}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} ), err); return err; } static ModelError_t addNode__predictor_layers_1_Gemm(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _predictor_layers_1_Gemm */ const char* inputs__predictor_layers_1_Gemm[] = { "_predictor_activation_PRelu_output_0", "tts_dp_predictor_layers_1_weight_permute", "tts_dp_predictor_layers_1_bias" }; uint32_t dimensions__predictor_layers_1_Gemm_output_0[] = {1, 1}; Qnn_Tensor_t outputs__predictor_layers_1_Gemm[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_predictor_layers_1_Gemm_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000159196151799f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__predictor_layers_1_Gemm_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_predictor_layers_1_Gemm", // Node Name "qti.aisw", // Package Name "FullyConnected", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__predictor_layers_1_Gemm, // Input Tensor Names 3, // Num Input Tensor Names outputs__predictor_layers_1_Gemm, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__predictor_Exp(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _predictor_Exp */ Qnn_Param_t params__predictor_Exp[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="operation", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 5}}}} }; const char* inputs__predictor_Exp[] = { "_predictor_layers_1_Gemm_output_0" }; uint32_t dimensions__predictor_Exp_output_0[] = {1, 1}; Qnn_Tensor_t outputs__predictor_Exp[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "_predictor_Exp_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000433134373452f, .offset= 0}}}, .rank= 2, .dimensions=dimensions__predictor_Exp_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_predictor_Exp", // Node Name "qti.aisw", // Package Name "ElementWiseUnary", // Qnn Node Type params__predictor_Exp, // Node Params 1, // Num Node Params inputs__predictor_Exp, // Input Tensor Names 1, // Num Input Tensor Names outputs__predictor_Exp, // Output Tensors 1// Num Output Tensors ), err); return err; } static ModelError_t addNode__predictor_Squeeze(QnnModel& model){ ModelError_t err = MODEL_NO_ERROR; /* ADDING NODE FOR _predictor_Squeeze */ const char* inputs__predictor_Squeeze[] = { "_predictor_Exp_output_0" }; uint32_t dimensions_duration[] = {1}; Qnn_Tensor_t outputs__predictor_Squeeze[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_2, {.v2= { .id=0, .name= "duration", .type= QNN_TENSOR_TYPE_APP_READ, .dataFormat= QNN_TENSOR_DATA_FORMAT_DENSE, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000433134373452f, .offset= 0}}}, .rank= 1, .dimensions=dimensions_duration, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}, .isDynamicDimensions= nullptr, .sparseParams= { QNN_SPARSE_LAYOUT_UNDEFINED, .hybridCoo= {.numSpecifiedElements= 0, .numSparseDimensions= 0}}, .isProduced= 0}}} }; VALIDATE(model.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_predictor_Squeeze", // Node Name "qti.aisw", // Package Name "Reshape", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__predictor_Squeeze, // Input Tensor Names 1, // Num Input Tensor Names outputs__predictor_Squeeze, // Output Tensors 1// Num Output Tensors ), err); return err; } QNN_API ModelError_t QnnModel_composeGraphs(Qnn_BackendHandle_t backendHandle, QNN_INTERFACE_VER_TYPE interface, Qnn_ContextHandle_t contextHandle, const GraphConfigInfo_t** graphsConfigInfo, const uint32_t numGraphsConfigInfo, GraphInfoPtr_t** graphsInfo, uint32_t* numGraphsInfo, bool debug, QnnLog_Callback_t logCallback, QnnLog_Level_t maxLogLevel) { ModelError_t err = MODEL_NO_ERROR; /* model/graph for duration_predictor_htp*/ QnnModel duration_predictor_htp; const QnnGraph_Config_t** graphConfigs = nullptr; VALIDATE(getQnnGraphConfigFromInfo("duration_predictor_htp", graphsConfigInfo, numGraphsConfigInfo, graphConfigs), err); VALIDATE(duration_predictor_htp.initialize(backendHandle, interface, contextHandle, "duration_predictor_htp", debug, DO_GRAPH_NODE_VALIDATIONS, graphConfigs), err); VALIDATE(addTensor_text_ids(duration_predictor_htp), err); VALIDATE(addTensor_style_dp(duration_predictor_htp), err); VALIDATE(addTensor_text_mask(duration_predictor_htp), err); VALIDATE(addNode_style_dp_ncf(duration_predictor_htp), err); VALIDATE(addNode_text_mask_ncf(duration_predictor_htp), err); VALIDATE(addTensor_tts_dp_sentence_encoder_text_embedder_char_embedder_weight(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_text_embedder_char_embedder_Gather(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_text_embedder_Transpose(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_text_embedder_Mul(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_text_embedder_Mul_output_0_nfc(duration_predictor_htp), err); VALIDATE(addTensor__sentence_encoder_ConstantOfShape_1_output_0(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_Concat_2(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_Concat_2_output_0_ncf(duration_predictor_htp), err); VALIDATE(addNode__predictor_Reshape_1(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_attn_encoder_Unsqueeze(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_attn_encoder_Unsqueeze_1(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_Slice_2(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_attn_encoder_Mul(duration_predictor_htp), err); VALIDATE(addTensor__sentence_encoder_attn_encoder_attn_layers_0_Constant_87_output_0(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_attn_encoder_attn_layers_0_Equal(duration_predictor_htp), err); VALIDATE(addTensor__sentence_encoder_Expand_output_0(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_Concat_1(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_convnext_convnext_0_Mul(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_convnext_convnext_0_dwconv_Pad(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_convnext_convnext_0_dwconv_Pad_output_0_ncf(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_convnext_convnext_0_dwconv_Conv_reshape_to_2d(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_convnext_convnext_0_dwconv_Conv_reshape_to_2d_nhwc(duration_predictor_htp), err); VALIDATE(addTensor_tts_dp_sentence_encoder_convnext_convnext_0_dwconv_weight(duration_predictor_htp), err); VALIDATE(addTensor_tts_dp_sentence_encoder_convnext_convnext_0_dwconv_bias(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_convnext_convnext_0_dwconv_Conv_2d(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_convnext_convnext_0_dwconv_Conv_intermediate_nchw(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_convnext_convnext_0_dwconv_Conv_intermediate(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_convnext_convnext_0_dwconv_Conv_output_0_nfc(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_convnext_convnext_0_Mul_1(duration_predictor_htp), err); VALIDATE(addTensor_tts_dp_sentence_encoder_convnext_convnext_0_norm_norm_weight(duration_predictor_htp), err); VALIDATE(addTensor_tts_dp_sentence_encoder_convnext_convnext_0_norm_norm_bias(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_convnext_convnext_0_norm_norm_LayerNormalization(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_convnext_convnext_0_norm_Transpose_1_output_0_ncf(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_convnext_convnext_0_pwconv1_Conv_reshape_to_2d(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_convnext_convnext_0_pwconv1_Conv_reshape_to_2d_nhwc(duration_predictor_htp), err); VALIDATE(addTensor_tts_dp_sentence_encoder_convnext_convnext_0_pwconv1_weight(duration_predictor_htp), err); VALIDATE(addTensor_tts_dp_sentence_encoder_convnext_convnext_0_pwconv1_bias(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_convnext_convnext_0_pwconv1_Conv_2d(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_convnext_convnext_0_pwconv1_Conv_intermediate_nchw(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_convnext_convnext_0_pwconv1_Conv_intermediate(duration_predictor_htp), err); VALIDATE(addNode__elementwiseneuron_2(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_convnext_convnext_0_pwconv2_Conv_reshape_to_2d(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_convnext_convnext_0_pwconv2_Conv_reshape_to_2d_nhwc(duration_predictor_htp), err); VALIDATE(addTensor_tts_dp_sentence_encoder_convnext_convnext_0_pwconv2_weight(duration_predictor_htp), err); VALIDATE(addTensor_tts_dp_sentence_encoder_convnext_convnext_0_pwconv2_bias(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_convnext_convnext_0_pwconv2_Conv_2d(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_convnext_convnext_0_pwconv2_Conv_intermediate_nchw(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_convnext_convnext_0_pwconv2_Conv_intermediate(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_convnext_convnext_0_pwconv2_Conv_output_0_nfc(duration_predictor_htp), err); VALIDATE(addTensor_tts_dp_sentence_encoder_convnext_convnext_0_gamma(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_convnext_convnext_0_Mul_2(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_convnext_convnext_0_Add(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_convnext_convnext_0_Mul_3(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_convnext_convnext_1_Mul(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_convnext_convnext_1_dwconv_Pad(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_convnext_convnext_1_dwconv_Pad_output_0_ncf(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_convnext_convnext_1_dwconv_Conv_reshape_to_2d(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_convnext_convnext_1_dwconv_Conv_reshape_to_2d_nhwc(duration_predictor_htp), err); VALIDATE(addTensor_tts_dp_sentence_encoder_convnext_convnext_1_dwconv_weight(duration_predictor_htp), err); VALIDATE(addTensor_tts_dp_sentence_encoder_convnext_convnext_1_dwconv_bias(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_convnext_convnext_1_dwconv_Conv_2d(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_convnext_convnext_1_dwconv_Conv_intermediate_nchw(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_convnext_convnext_1_dwconv_Conv_intermediate(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_convnext_convnext_1_dwconv_Conv_output_0_nfc(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_convnext_convnext_1_Mul_1(duration_predictor_htp), err); VALIDATE(addTensor_tts_dp_sentence_encoder_convnext_convnext_1_norm_norm_weight(duration_predictor_htp), err); VALIDATE(addTensor_tts_dp_sentence_encoder_convnext_convnext_1_norm_norm_bias(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_convnext_convnext_1_norm_norm_LayerNormalization(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_convnext_convnext_1_norm_Transpose_1_output_0_ncf(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_convnext_convnext_1_pwconv1_Conv_reshape_to_2d(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_convnext_convnext_1_pwconv1_Conv_reshape_to_2d_nhwc(duration_predictor_htp), err); VALIDATE(addTensor_tts_dp_sentence_encoder_convnext_convnext_1_pwconv1_weight(duration_predictor_htp), err); VALIDATE(addTensor_tts_dp_sentence_encoder_convnext_convnext_1_pwconv1_bias(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_convnext_convnext_1_pwconv1_Conv_2d(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_convnext_convnext_1_pwconv1_Conv_intermediate_nchw(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_convnext_convnext_1_pwconv1_Conv_intermediate(duration_predictor_htp), err); VALIDATE(addNode__elementwiseneuron_4(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_convnext_convnext_1_pwconv2_Conv_reshape_to_2d(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_convnext_convnext_1_pwconv2_Conv_reshape_to_2d_nhwc(duration_predictor_htp), err); VALIDATE(addTensor_tts_dp_sentence_encoder_convnext_convnext_1_pwconv2_weight(duration_predictor_htp), err); VALIDATE(addTensor_tts_dp_sentence_encoder_convnext_convnext_1_pwconv2_bias(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_convnext_convnext_1_pwconv2_Conv_2d(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_convnext_convnext_1_pwconv2_Conv_intermediate_nchw(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_convnext_convnext_1_pwconv2_Conv_intermediate(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_convnext_convnext_1_pwconv2_Conv_output_0_nfc(duration_predictor_htp), err); VALIDATE(addTensor_tts_dp_sentence_encoder_convnext_convnext_1_gamma(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_convnext_convnext_1_Mul_2(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_convnext_convnext_1_Add(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_convnext_convnext_1_Mul_3(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_convnext_convnext_2_Mul(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_convnext_convnext_2_dwconv_Pad(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_convnext_convnext_2_dwconv_Pad_output_0_ncf(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_convnext_convnext_2_dwconv_Conv_reshape_to_2d(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_convnext_convnext_2_dwconv_Conv_reshape_to_2d_nhwc(duration_predictor_htp), err); VALIDATE(addTensor_tts_dp_sentence_encoder_convnext_convnext_2_dwconv_weight(duration_predictor_htp), err); VALIDATE(addTensor_tts_dp_sentence_encoder_convnext_convnext_2_dwconv_bias(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_convnext_convnext_2_dwconv_Conv_2d(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_convnext_convnext_2_dwconv_Conv_intermediate_nchw(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_convnext_convnext_2_dwconv_Conv_intermediate(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_convnext_convnext_2_dwconv_Conv_output_0_nfc(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_convnext_convnext_2_Mul_1(duration_predictor_htp), err); VALIDATE(addTensor_tts_dp_sentence_encoder_convnext_convnext_2_norm_norm_weight(duration_predictor_htp), err); VALIDATE(addTensor_tts_dp_sentence_encoder_convnext_convnext_2_norm_norm_bias(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_convnext_convnext_2_norm_norm_LayerNormalization(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_convnext_convnext_2_norm_Transpose_1_output_0_ncf(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_convnext_convnext_2_pwconv1_Conv_reshape_to_2d(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_convnext_convnext_2_pwconv1_Conv_reshape_to_2d_nhwc(duration_predictor_htp), err); VALIDATE(addTensor_tts_dp_sentence_encoder_convnext_convnext_2_pwconv1_weight(duration_predictor_htp), err); VALIDATE(addTensor_tts_dp_sentence_encoder_convnext_convnext_2_pwconv1_bias(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_convnext_convnext_2_pwconv1_Conv_2d(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_convnext_convnext_2_pwconv1_Conv_intermediate_nchw(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_convnext_convnext_2_pwconv1_Conv_intermediate(duration_predictor_htp), err); VALIDATE(addNode__elementwiseneuron_6(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_convnext_convnext_2_pwconv2_Conv_reshape_to_2d(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_convnext_convnext_2_pwconv2_Conv_reshape_to_2d_nhwc(duration_predictor_htp), err); VALIDATE(addTensor_tts_dp_sentence_encoder_convnext_convnext_2_pwconv2_weight(duration_predictor_htp), err); VALIDATE(addTensor_tts_dp_sentence_encoder_convnext_convnext_2_pwconv2_bias(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_convnext_convnext_2_pwconv2_Conv_2d(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_convnext_convnext_2_pwconv2_Conv_intermediate_nchw(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_convnext_convnext_2_pwconv2_Conv_intermediate(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_convnext_convnext_2_pwconv2_Conv_output_0_nfc(duration_predictor_htp), err); VALIDATE(addTensor_tts_dp_sentence_encoder_convnext_convnext_2_gamma(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_convnext_convnext_2_Mul_2(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_convnext_convnext_2_Add(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_convnext_convnext_2_Mul_3(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_convnext_convnext_3_Mul(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_convnext_convnext_3_dwconv_Pad(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_convnext_convnext_3_dwconv_Pad_output_0_ncf(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_convnext_convnext_3_dwconv_Conv_reshape_to_2d(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_convnext_convnext_3_dwconv_Conv_reshape_to_2d_nhwc(duration_predictor_htp), err); VALIDATE(addTensor_tts_dp_sentence_encoder_convnext_convnext_3_dwconv_weight(duration_predictor_htp), err); VALIDATE(addTensor_tts_dp_sentence_encoder_convnext_convnext_3_dwconv_bias(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_convnext_convnext_3_dwconv_Conv_2d(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_convnext_convnext_3_dwconv_Conv_intermediate_nchw(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_convnext_convnext_3_dwconv_Conv_intermediate(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_convnext_convnext_3_dwconv_Conv_output_0_nfc(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_convnext_convnext_3_Mul_1(duration_predictor_htp), err); VALIDATE(addTensor_tts_dp_sentence_encoder_convnext_convnext_3_norm_norm_weight(duration_predictor_htp), err); VALIDATE(addTensor_tts_dp_sentence_encoder_convnext_convnext_3_norm_norm_bias(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_convnext_convnext_3_norm_norm_LayerNormalization(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_convnext_convnext_3_norm_Transpose_1_output_0_ncf(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_convnext_convnext_3_pwconv1_Conv_reshape_to_2d(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_convnext_convnext_3_pwconv1_Conv_reshape_to_2d_nhwc(duration_predictor_htp), err); VALIDATE(addTensor_tts_dp_sentence_encoder_convnext_convnext_3_pwconv1_weight(duration_predictor_htp), err); VALIDATE(addTensor_tts_dp_sentence_encoder_convnext_convnext_3_pwconv1_bias(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_convnext_convnext_3_pwconv1_Conv_2d(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_convnext_convnext_3_pwconv1_Conv_intermediate_nchw(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_convnext_convnext_3_pwconv1_Conv_intermediate(duration_predictor_htp), err); VALIDATE(addNode__elementwiseneuron_8(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_convnext_convnext_3_pwconv2_Conv_reshape_to_2d(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_convnext_convnext_3_pwconv2_Conv_reshape_to_2d_nhwc(duration_predictor_htp), err); VALIDATE(addTensor_tts_dp_sentence_encoder_convnext_convnext_3_pwconv2_weight(duration_predictor_htp), err); VALIDATE(addTensor_tts_dp_sentence_encoder_convnext_convnext_3_pwconv2_bias(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_convnext_convnext_3_pwconv2_Conv_2d(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_convnext_convnext_3_pwconv2_Conv_intermediate_nchw(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_convnext_convnext_3_pwconv2_Conv_intermediate(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_convnext_convnext_3_pwconv2_Conv_output_0_nfc(duration_predictor_htp), err); VALIDATE(addTensor_tts_dp_sentence_encoder_convnext_convnext_3_gamma(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_convnext_convnext_3_Mul_2(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_convnext_convnext_3_Add(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_convnext_convnext_3_Mul_3(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_convnext_convnext_4_Mul(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_convnext_convnext_4_dwconv_Pad(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_convnext_convnext_4_dwconv_Pad_output_0_ncf(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_convnext_convnext_4_dwconv_Conv_reshape_to_2d(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_convnext_convnext_4_dwconv_Conv_reshape_to_2d_nhwc(duration_predictor_htp), err); VALIDATE(addTensor_tts_dp_sentence_encoder_convnext_convnext_4_dwconv_weight(duration_predictor_htp), err); VALIDATE(addTensor_tts_dp_sentence_encoder_convnext_convnext_4_dwconv_bias(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_convnext_convnext_4_dwconv_Conv_2d(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_convnext_convnext_4_dwconv_Conv_intermediate_nchw(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_convnext_convnext_4_dwconv_Conv_intermediate(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_convnext_convnext_4_dwconv_Conv_output_0_nfc(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_convnext_convnext_4_Mul_1(duration_predictor_htp), err); VALIDATE(addTensor_tts_dp_sentence_encoder_convnext_convnext_4_norm_norm_weight(duration_predictor_htp), err); VALIDATE(addTensor_tts_dp_sentence_encoder_convnext_convnext_4_norm_norm_bias(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_convnext_convnext_4_norm_norm_LayerNormalization(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_convnext_convnext_4_norm_Transpose_1_output_0_ncf(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_convnext_convnext_4_pwconv1_Conv_reshape_to_2d(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_convnext_convnext_4_pwconv1_Conv_reshape_to_2d_nhwc(duration_predictor_htp), err); VALIDATE(addTensor_tts_dp_sentence_encoder_convnext_convnext_4_pwconv1_weight(duration_predictor_htp), err); VALIDATE(addTensor_tts_dp_sentence_encoder_convnext_convnext_4_pwconv1_bias(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_convnext_convnext_4_pwconv1_Conv_2d(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_convnext_convnext_4_pwconv1_Conv_intermediate_nchw(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_convnext_convnext_4_pwconv1_Conv_intermediate(duration_predictor_htp), err); VALIDATE(addNode__elementwiseneuron_10(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_convnext_convnext_4_pwconv2_Conv_reshape_to_2d(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_convnext_convnext_4_pwconv2_Conv_reshape_to_2d_nhwc(duration_predictor_htp), err); VALIDATE(addTensor_tts_dp_sentence_encoder_convnext_convnext_4_pwconv2_weight(duration_predictor_htp), err); VALIDATE(addTensor_tts_dp_sentence_encoder_convnext_convnext_4_pwconv2_bias(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_convnext_convnext_4_pwconv2_Conv_2d(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_convnext_convnext_4_pwconv2_Conv_intermediate_nchw(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_convnext_convnext_4_pwconv2_Conv_intermediate(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_convnext_convnext_4_pwconv2_Conv_output_0_nfc(duration_predictor_htp), err); VALIDATE(addTensor_tts_dp_sentence_encoder_convnext_convnext_4_gamma(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_convnext_convnext_4_Mul_2(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_convnext_convnext_4_Add(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_convnext_convnext_4_Mul_3(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_convnext_convnext_5_Mul(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_convnext_convnext_5_dwconv_Pad(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_convnext_convnext_5_dwconv_Pad_output_0_ncf(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_convnext_convnext_5_dwconv_Conv_reshape_to_2d(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_convnext_convnext_5_dwconv_Conv_reshape_to_2d_nhwc(duration_predictor_htp), err); VALIDATE(addTensor_tts_dp_sentence_encoder_convnext_convnext_5_dwconv_weight(duration_predictor_htp), err); VALIDATE(addTensor_tts_dp_sentence_encoder_convnext_convnext_5_dwconv_bias(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_convnext_convnext_5_dwconv_Conv_2d(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_convnext_convnext_5_dwconv_Conv_intermediate_nchw(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_convnext_convnext_5_dwconv_Conv_intermediate(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_convnext_convnext_5_dwconv_Conv_output_0_nfc(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_convnext_convnext_5_Mul_1(duration_predictor_htp), err); VALIDATE(addTensor_tts_dp_sentence_encoder_convnext_convnext_5_norm_norm_weight(duration_predictor_htp), err); VALIDATE(addTensor_tts_dp_sentence_encoder_convnext_convnext_5_norm_norm_bias(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_convnext_convnext_5_norm_norm_LayerNormalization(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_convnext_convnext_5_norm_Transpose_1_output_0_ncf(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_convnext_convnext_5_pwconv1_Conv_reshape_to_2d(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_convnext_convnext_5_pwconv1_Conv_reshape_to_2d_nhwc(duration_predictor_htp), err); VALIDATE(addTensor_tts_dp_sentence_encoder_convnext_convnext_5_pwconv1_weight(duration_predictor_htp), err); VALIDATE(addTensor_tts_dp_sentence_encoder_convnext_convnext_5_pwconv1_bias(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_convnext_convnext_5_pwconv1_Conv_2d(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_convnext_convnext_5_pwconv1_Conv_intermediate_nchw(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_convnext_convnext_5_pwconv1_Conv_intermediate(duration_predictor_htp), err); VALIDATE(addNode__elementwiseneuron_12(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_convnext_convnext_5_pwconv2_Conv_reshape_to_2d(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_convnext_convnext_5_pwconv2_Conv_reshape_to_2d_nhwc(duration_predictor_htp), err); VALIDATE(addTensor_tts_dp_sentence_encoder_convnext_convnext_5_pwconv2_weight(duration_predictor_htp), err); VALIDATE(addTensor_tts_dp_sentence_encoder_convnext_convnext_5_pwconv2_bias(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_convnext_convnext_5_pwconv2_Conv_2d(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_convnext_convnext_5_pwconv2_Conv_intermediate_nchw(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_convnext_convnext_5_pwconv2_Conv_intermediate(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_convnext_convnext_5_pwconv2_Conv_output_0_nfc(duration_predictor_htp), err); VALIDATE(addTensor_tts_dp_sentence_encoder_convnext_convnext_5_gamma(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_convnext_convnext_5_Mul_2(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_convnext_convnext_5_Add(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_convnext_convnext_5_Mul_3(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_attn_encoder_Mul_1(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_attn_encoder_Mul_1_output_0_ncf(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_attn_encoder_attn_layers_0_conv_q_Conv_reshape_to_2d(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_attn_encoder_attn_layers_0_conv_q_Conv_reshape_to_2d_nhwc(duration_predictor_htp), err); VALIDATE(addTensor_tts_dp_sentence_encoder_attn_encoder_attn_layers_0_conv_q_weight(duration_predictor_htp), err); VALIDATE(addTensor_tts_dp_sentence_encoder_attn_encoder_attn_layers_0_conv_q_bias(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_attn_encoder_attn_layers_0_conv_q_Conv_2d(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_attn_encoder_attn_layers_0_conv_q_Conv_intermediate_nchw(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_attn_encoder_attn_layers_0_conv_q_Conv_intermediate(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_attn_encoder_attn_layers_0_conv_k_Conv_reshape_to_2d(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_attn_encoder_attn_layers_0_conv_k_Conv_reshape_to_2d_nhwc(duration_predictor_htp), err); VALIDATE(addTensor_tts_dp_sentence_encoder_attn_encoder_attn_layers_0_conv_k_weight(duration_predictor_htp), err); VALIDATE(addTensor_tts_dp_sentence_encoder_attn_encoder_attn_layers_0_conv_k_bias(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_attn_encoder_attn_layers_0_conv_k_Conv_2d(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_attn_encoder_attn_layers_0_conv_k_Conv_intermediate_nchw(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_attn_encoder_attn_layers_0_conv_k_Conv_intermediate(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_attn_encoder_attn_layers_0_Reshape_1_output_0_converted_UFIXED_POINT_8(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_attn_encoder_attn_layers_0_conv_v_Conv_reshape_to_2d(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_attn_encoder_attn_layers_0_conv_v_Conv_reshape_to_2d_nhwc(duration_predictor_htp), err); VALIDATE(addTensor_tts_dp_sentence_encoder_attn_encoder_attn_layers_0_conv_v_weight(duration_predictor_htp), err); VALIDATE(addTensor_tts_dp_sentence_encoder_attn_encoder_attn_layers_0_conv_v_bias(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_attn_encoder_attn_layers_0_conv_v_Conv_2d(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_attn_encoder_attn_layers_0_conv_v_Conv_intermediate_nchw(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_attn_encoder_attn_layers_0_conv_v_Conv_intermediate(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_attn_encoder_attn_layers_0_Transpose(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_attn_encoder_attn_layers_0_Transpose_1(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_attn_encoder_attn_layers_0_Transpose_1_output_0_converted_UFIXED_POINT_8(duration_predictor_htp), err); VALIDATE(addTensor__sentence_encoder_attn_encoder_attn_layers_0_Constant_10_output_0(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_attn_encoder_attn_layers_0_Div(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_attn_encoder_attn_layers_0_MatMul(duration_predictor_htp), err); VALIDATE(addTensor__sentence_encoder_attn_encoder_attn_layers_0_Transpose_3_output_0_nchw(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_attn_encoder_attn_layers_0_MatMul_1(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_attn_encoder_attn_layers_0_MatMul_1_output_0_nhwc(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_attn_encoder_attn_layers_0_Pad_1(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_attn_encoder_attn_layers_0_Pad_1_output_0_nchw(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_attn_encoder_attn_layers_0_Reshape_7(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_attn_encoder_attn_layers_0_Reshape_7_output_0_nfc(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_attn_encoder_attn_layers_0_Pad_2(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_attn_encoder_attn_layers_0_Pad_2_output_0_ncf(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_attn_encoder_attn_layers_0_Reshape_10(duration_predictor_htp), err); VALIDATE(addNode_Slice_0(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_attn_encoder_attn_layers_0_Add_2(duration_predictor_htp), err); VALIDATE(addTensor__sentence_encoder_attn_encoder_attn_layers_0_Constant_88_output_0(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_attn_encoder_attn_layers_0_Where(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_attn_encoder_attn_layers_0_Softmax(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_attn_encoder_attn_layers_0_Softmax_output_0_nhwc(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_attn_encoder_attn_layers_0_MatMul_2(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_attn_encoder_attn_layers_0_Pad_3(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_attn_encoder_attn_layers_0_Pad_3_output_0_nchw(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_attn_encoder_attn_layers_0_Reshape_13(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_attn_encoder_attn_layers_0_Reshape_13_output_0_nfc(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_attn_encoder_attn_layers_0_Pad_4(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_attn_encoder_attn_layers_0_Pad_4_output_0_ncf(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_attn_encoder_attn_layers_0_Reshape_16(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_attn_encoder_attn_layers_0_Slice_8(duration_predictor_htp), err); VALIDATE(addTensor__sentence_encoder_attn_encoder_attn_layers_0_Unsqueeze_41_output_0_nchw(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_attn_encoder_attn_layers_0_MatMul_3(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_attn_encoder_attn_layers_0_Add_4(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_attn_encoder_attn_layers_0_Transpose_9(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_attn_encoder_attn_layers_0_Reshape_19(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_attn_encoder_attn_layers_0_conv_o_Conv_reshape_to_2d_nhwc(duration_predictor_htp), err); VALIDATE(addTensor_tts_dp_sentence_encoder_attn_encoder_attn_layers_0_conv_o_weight(duration_predictor_htp), err); VALIDATE(addTensor_tts_dp_sentence_encoder_attn_encoder_attn_layers_0_conv_o_bias(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_attn_encoder_attn_layers_0_conv_o_Conv_2d(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_attn_encoder_attn_layers_0_conv_o_Conv_intermediate_nchw(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_attn_encoder_attn_layers_0_conv_o_Conv_intermediate(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_attn_encoder_attn_layers_0_conv_o_Conv_output_0_nfc(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_attn_encoder_Add(duration_predictor_htp), err); VALIDATE(addTensor_tts_dp_sentence_encoder_attn_encoder_norm_layers_1_0_norm_weight(duration_predictor_htp), err); VALIDATE(addTensor_tts_dp_sentence_encoder_attn_encoder_norm_layers_1_0_norm_bias(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_attn_encoder_norm_layers_1_0_norm_LayerNormalization(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_attn_encoder_ffn_layers_0_Mul(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_attn_encoder_ffn_layers_0_Mul_output_0_ncf(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_attn_encoder_ffn_layers_0_conv_1_Conv_reshape_to_2d(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_attn_encoder_ffn_layers_0_conv_1_Conv_reshape_to_2d_nhwc(duration_predictor_htp), err); VALIDATE(addTensor_tts_dp_sentence_encoder_attn_encoder_ffn_layers_0_conv_1_weight(duration_predictor_htp), err); VALIDATE(addTensor_tts_dp_sentence_encoder_attn_encoder_ffn_layers_0_conv_1_bias(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_attn_encoder_ffn_layers_0_conv_1_Conv_2d(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_attn_encoder_ffn_layers_0_conv_1_Conv_intermediate_nchw(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_attn_encoder_ffn_layers_0_conv_1_Conv_intermediate(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_attn_encoder_ffn_layers_0_Relu(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_attn_encoder_ffn_layers_0_Relu_output_0_nfc(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_attn_encoder_ffn_layers_0_Mul_1(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_attn_encoder_ffn_layers_0_Mul_1_output_0_ncf(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_reshape_to_2d(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_reshape_to_2d_nhwc(duration_predictor_htp), err); VALIDATE(addTensor_tts_dp_sentence_encoder_attn_encoder_ffn_layers_0_conv_2_weight(duration_predictor_htp), err); VALIDATE(addTensor_tts_dp_sentence_encoder_attn_encoder_ffn_layers_0_conv_2_bias(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_2d(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_intermediate_nchw(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_intermediate(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_attn_encoder_ffn_layers_0_conv_2_Conv_output_0_nfc(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_attn_encoder_ffn_layers_0_Mul_2(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_attn_encoder_Add_1(duration_predictor_htp), err); VALIDATE(addTensor_tts_dp_sentence_encoder_attn_encoder_norm_layers_2_0_norm_weight(duration_predictor_htp), err); VALIDATE(addTensor_tts_dp_sentence_encoder_attn_encoder_norm_layers_2_0_norm_bias(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_attn_encoder_norm_layers_2_0_norm_LayerNormalization(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_attn_encoder_norm_layers_2_0_Transpose_1_output_0_ncf(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_attn_encoder_attn_layers_1_conv_q_Conv_reshape_to_2d(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_attn_encoder_attn_layers_1_conv_q_Conv_reshape_to_2d_nhwc(duration_predictor_htp), err); VALIDATE(addTensor_tts_dp_sentence_encoder_attn_encoder_attn_layers_1_conv_q_weight(duration_predictor_htp), err); VALIDATE(addTensor_tts_dp_sentence_encoder_attn_encoder_attn_layers_1_conv_q_bias(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_attn_encoder_attn_layers_1_conv_q_Conv_2d(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_attn_encoder_attn_layers_1_conv_q_Conv_intermediate_nchw(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_attn_encoder_attn_layers_1_conv_q_Conv_intermediate(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_attn_encoder_attn_layers_1_conv_k_Conv_reshape_to_2d(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_attn_encoder_attn_layers_1_conv_k_Conv_reshape_to_2d_nhwc(duration_predictor_htp), err); VALIDATE(addTensor_tts_dp_sentence_encoder_attn_encoder_attn_layers_1_conv_k_weight(duration_predictor_htp), err); VALIDATE(addTensor_tts_dp_sentence_encoder_attn_encoder_attn_layers_1_conv_k_bias(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_attn_encoder_attn_layers_1_conv_k_Conv_2d(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_attn_encoder_attn_layers_1_conv_k_Conv_intermediate_nchw(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_attn_encoder_attn_layers_1_conv_k_Conv_intermediate(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_attn_encoder_attn_layers_1_Reshape_1_output_0_converted_UFIXED_POINT_8(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_attn_encoder_attn_layers_1_conv_v_Conv_reshape_to_2d(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_attn_encoder_attn_layers_1_conv_v_Conv_reshape_to_2d_nhwc(duration_predictor_htp), err); VALIDATE(addTensor_tts_dp_sentence_encoder_attn_encoder_attn_layers_1_conv_v_weight(duration_predictor_htp), err); VALIDATE(addTensor_tts_dp_sentence_encoder_attn_encoder_attn_layers_1_conv_v_bias(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_attn_encoder_attn_layers_1_conv_v_Conv_2d(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_attn_encoder_attn_layers_1_conv_v_Conv_intermediate_nchw(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_attn_encoder_attn_layers_1_conv_v_Conv_intermediate(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_attn_encoder_attn_layers_1_Transpose(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_attn_encoder_attn_layers_1_Transpose_1(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_attn_encoder_attn_layers_1_Transpose_1_output_0_converted_UFIXED_POINT_8(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_attn_encoder_attn_layers_1_Div(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_attn_encoder_attn_layers_1_MatMul(duration_predictor_htp), err); VALIDATE(addTensor__sentence_encoder_attn_encoder_attn_layers_1_Transpose_3_output_0_nchw(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_attn_encoder_attn_layers_1_MatMul_1(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_attn_encoder_attn_layers_1_MatMul_1_output_0_nhwc(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_attn_encoder_attn_layers_1_Pad_1(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_attn_encoder_attn_layers_1_Pad_1_output_0_nchw(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_attn_encoder_attn_layers_1_Reshape_7(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_attn_encoder_attn_layers_1_Reshape_7_output_0_nfc(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_attn_encoder_attn_layers_1_Pad_2(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_attn_encoder_attn_layers_1_Pad_2_output_0_ncf(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_attn_encoder_attn_layers_1_Reshape_10(duration_predictor_htp), err); VALIDATE(addNode_Slice_1(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_attn_encoder_attn_layers_1_Add_2(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_attn_encoder_attn_layers_1_Where(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_attn_encoder_attn_layers_1_Softmax(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_attn_encoder_attn_layers_1_Softmax_output_0_nhwc(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_attn_encoder_attn_layers_1_MatMul_2(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_attn_encoder_attn_layers_1_Pad_3(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_attn_encoder_attn_layers_1_Pad_3_output_0_nchw(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_attn_encoder_attn_layers_1_Reshape_13(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_attn_encoder_attn_layers_1_Reshape_13_output_0_nfc(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_attn_encoder_attn_layers_1_Pad_4(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_attn_encoder_attn_layers_1_Pad_4_output_0_ncf(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_attn_encoder_attn_layers_1_Reshape_16(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_attn_encoder_attn_layers_1_Slice_8(duration_predictor_htp), err); VALIDATE(addTensor__sentence_encoder_attn_encoder_attn_layers_1_Unsqueeze_41_output_0_nchw(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_attn_encoder_attn_layers_1_MatMul_3(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_attn_encoder_attn_layers_1_Add_4(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_attn_encoder_attn_layers_1_Transpose_9(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_attn_encoder_attn_layers_1_Reshape_19(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_attn_encoder_attn_layers_1_conv_o_Conv_reshape_to_2d_nhwc(duration_predictor_htp), err); VALIDATE(addTensor_tts_dp_sentence_encoder_attn_encoder_attn_layers_1_conv_o_weight(duration_predictor_htp), err); VALIDATE(addTensor_tts_dp_sentence_encoder_attn_encoder_attn_layers_1_conv_o_bias(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_attn_encoder_attn_layers_1_conv_o_Conv_2d(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_attn_encoder_attn_layers_1_conv_o_Conv_intermediate_nchw(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_attn_encoder_attn_layers_1_conv_o_Conv_intermediate(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_attn_encoder_attn_layers_1_conv_o_Conv_output_0_nfc(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_attn_encoder_Add_2(duration_predictor_htp), err); VALIDATE(addTensor_tts_dp_sentence_encoder_attn_encoder_norm_layers_1_1_norm_weight(duration_predictor_htp), err); VALIDATE(addTensor_tts_dp_sentence_encoder_attn_encoder_norm_layers_1_1_norm_bias(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_attn_encoder_norm_layers_1_1_norm_LayerNormalization(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_attn_encoder_ffn_layers_1_Mul(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_attn_encoder_ffn_layers_1_Mul_output_0_ncf(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_attn_encoder_ffn_layers_1_conv_1_Conv_reshape_to_2d(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_attn_encoder_ffn_layers_1_conv_1_Conv_reshape_to_2d_nhwc(duration_predictor_htp), err); VALIDATE(addTensor_tts_dp_sentence_encoder_attn_encoder_ffn_layers_1_conv_1_weight(duration_predictor_htp), err); VALIDATE(addTensor_tts_dp_sentence_encoder_attn_encoder_ffn_layers_1_conv_1_bias(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_attn_encoder_ffn_layers_1_conv_1_Conv_2d(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_attn_encoder_ffn_layers_1_conv_1_Conv_intermediate_nchw(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_attn_encoder_ffn_layers_1_conv_1_Conv_intermediate(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_attn_encoder_ffn_layers_1_Relu(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_attn_encoder_ffn_layers_1_Relu_output_0_nfc(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_attn_encoder_ffn_layers_1_Mul_1(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_attn_encoder_ffn_layers_1_Mul_1_output_0_ncf(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_reshape_to_2d(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_reshape_to_2d_nhwc(duration_predictor_htp), err); VALIDATE(addTensor_tts_dp_sentence_encoder_attn_encoder_ffn_layers_1_conv_2_weight(duration_predictor_htp), err); VALIDATE(addTensor_tts_dp_sentence_encoder_attn_encoder_ffn_layers_1_conv_2_bias(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_2d(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_intermediate_nchw(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_intermediate(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_attn_encoder_ffn_layers_1_conv_2_Conv_output_0_nfc(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_attn_encoder_ffn_layers_1_Mul_2(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_attn_encoder_Add_3(duration_predictor_htp), err); VALIDATE(addTensor_tts_dp_sentence_encoder_attn_encoder_norm_layers_2_1_norm_weight(duration_predictor_htp), err); VALIDATE(addTensor_tts_dp_sentence_encoder_attn_encoder_norm_layers_2_1_norm_bias(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_attn_encoder_norm_layers_2_1_norm_LayerNormalization(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_attn_encoder_Mul_2(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_Add(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_Slice_1(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_Slice_1_output_0_ncf(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_proj_out_net_Conv_reshape_to_2d(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_proj_out_net_Conv_reshape_to_2d_nhwc(duration_predictor_htp), err); VALIDATE(addTensor_tts_dp_sentence_encoder_proj_out_net_weight(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_proj_out_net_Conv_2d(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_proj_out_net_Conv_intermediate_nchw(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_proj_out_net_Conv_intermediate(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_proj_out_net_Conv_output_0_nfc(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_proj_out_Mul(duration_predictor_htp), err); VALIDATE(addNode__sentence_encoder_proj_out_Mul_output_0_ncf(duration_predictor_htp), err); VALIDATE(addNode__predictor_Reshape(duration_predictor_htp), err); VALIDATE(addNode__predictor_Concat_2(duration_predictor_htp), err); VALIDATE(addTensor_tts_dp_predictor_layers_0_weight_permute(duration_predictor_htp), err); VALIDATE(addTensor_tts_dp_predictor_layers_0_bias(duration_predictor_htp), err); VALIDATE(addNode__predictor_layers_0_Gemm(duration_predictor_htp), err); VALIDATE(addTensor_tts_dp_predictor_activation_weight(duration_predictor_htp), err); VALIDATE(addNode__predictor_activation_PRelu(duration_predictor_htp), err); VALIDATE(addTensor_tts_dp_predictor_layers_1_weight_permute(duration_predictor_htp), err); VALIDATE(addTensor_tts_dp_predictor_layers_1_bias(duration_predictor_htp), err); VALIDATE(addNode__predictor_layers_1_Gemm(duration_predictor_htp), err); VALIDATE(addNode__predictor_Exp(duration_predictor_htp), err); VALIDATE(addNode__predictor_Squeeze(duration_predictor_htp), err); // Add all models to array to get graphsInfo QnnModel* models [] = {&duration_predictor_htp}; uint32_t numModels = 1; // Populate the constructed graphs in provided output variables VALIDATE(getGraphInfoFromModels(*models, numModels, graphsInfo), err); *numGraphsInfo = numModels; return err; } // PREPARE_GRAPHS QNN_API ModelError_t QnnModel_freeGraphsInfo(GraphInfoPtr_t** graphsInfo, uint32_t numGraphsInfo){ return qnn_wrapper_api::freeGraphsInfo(graphsInfo, numGraphsInfo); } // FREEGRAPHINFO }